0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/err.h>
0011 #include <linux/errno.h>
0012 #include <linux/module.h>
0013 #include <linux/device.h>
0014 #include <linux/mutex.h>
0015 #include <linux/math64.h>
0016 #include <linux/sizes.h>
0017 #include <linux/slab.h>
0018
0019 #include <linux/mtd/mtd.h>
0020 #include <linux/of_platform.h>
0021 #include <linux/sched/task_stack.h>
0022 #include <linux/spi/flash.h>
0023 #include <linux/mtd/spi-nor.h>
0024
0025 #include "core.h"
0026
0027
0028
0029
0030
0031
0032
0033 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
0034
0035
0036
0037
0038
0039 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
0040
0041 #define SPI_NOR_MAX_ADDR_NBYTES 4
0042
0043 #define SPI_NOR_SRST_SLEEP_MIN 200
0044 #define SPI_NOR_SRST_SLEEP_MAX 400
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor,
0058 const struct spi_mem_op *op)
0059 {
0060 switch (nor->cmd_ext_type) {
0061 case SPI_NOR_EXT_INVERT:
0062 return ~op->cmd.opcode;
0063
0064 case SPI_NOR_EXT_REPEAT:
0065 return op->cmd.opcode;
0066
0067 default:
0068 dev_err(nor->dev, "Unknown command extension type\n");
0069 return 0;
0070 }
0071 }
0072
0073
0074
0075
0076
0077
0078
0079
0080 void spi_nor_spimem_setup_op(const struct spi_nor *nor,
0081 struct spi_mem_op *op,
0082 const enum spi_nor_protocol proto)
0083 {
0084 u8 ext;
0085
0086 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto);
0087
0088 if (op->addr.nbytes)
0089 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto);
0090
0091 if (op->dummy.nbytes)
0092 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto);
0093
0094 if (op->data.nbytes)
0095 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto);
0096
0097 if (spi_nor_protocol_is_dtr(proto)) {
0098
0099
0100
0101
0102
0103
0104 op->cmd.dtr = true;
0105 op->addr.dtr = true;
0106 op->dummy.dtr = true;
0107 op->data.dtr = true;
0108
0109
0110 op->dummy.nbytes *= 2;
0111
0112 ext = spi_nor_get_cmd_ext(nor, op);
0113 op->cmd.opcode = (op->cmd.opcode << 8) | ext;
0114 op->cmd.nbytes = 2;
0115 }
0116 }
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
0129 {
0130
0131 if (object_is_on_stack(op->data.buf.in) ||
0132 !virt_addr_valid(op->data.buf.in)) {
0133 if (op->data.nbytes > nor->bouncebuf_size)
0134 op->data.nbytes = nor->bouncebuf_size;
0135 op->data.buf.in = nor->bouncebuf;
0136 return true;
0137 }
0138
0139 return false;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
0150 {
0151 int error;
0152
0153 error = spi_mem_adjust_op_size(nor->spimem, op);
0154 if (error)
0155 return error;
0156
0157 return spi_mem_exec_op(nor->spimem, op);
0158 }
0159
0160 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode,
0161 u8 *buf, size_t len)
0162 {
0163 if (spi_nor_protocol_is_dtr(nor->reg_proto))
0164 return -EOPNOTSUPP;
0165
0166 return nor->controller_ops->read_reg(nor, opcode, buf, len);
0167 }
0168
0169 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode,
0170 const u8 *buf, size_t len)
0171 {
0172 if (spi_nor_protocol_is_dtr(nor->reg_proto))
0173 return -EOPNOTSUPP;
0174
0175 return nor->controller_ops->write_reg(nor, opcode, buf, len);
0176 }
0177
0178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs)
0179 {
0180 if (spi_nor_protocol_is_dtr(nor->reg_proto))
0181 return -EOPNOTSUPP;
0182
0183 return nor->controller_ops->erase(nor, offs);
0184 }
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
0197 size_t len, u8 *buf)
0198 {
0199 struct spi_mem_op op =
0200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
0201 SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0),
0202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
0203 SPI_MEM_OP_DATA_IN(len, buf, 0));
0204 bool usebouncebuf;
0205 ssize_t nbytes;
0206 int error;
0207
0208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto);
0209
0210
0211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
0212 if (spi_nor_protocol_is_dtr(nor->read_proto))
0213 op.dummy.nbytes *= 2;
0214
0215 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
0216
0217 if (nor->dirmap.rdesc) {
0218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
0219 op.data.nbytes, op.data.buf.in);
0220 } else {
0221 error = spi_nor_spimem_exec_op(nor, &op);
0222 if (error)
0223 return error;
0224 nbytes = op.data.nbytes;
0225 }
0226
0227 if (usebouncebuf && nbytes > 0)
0228 memcpy(buf, op.data.buf.in, nbytes);
0229
0230 return nbytes;
0231 }
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
0243 {
0244 if (nor->spimem)
0245 return spi_nor_spimem_read_data(nor, from, len, buf);
0246
0247 return nor->controller_ops->read(nor, from, len, buf);
0248 }
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
0261 size_t len, const u8 *buf)
0262 {
0263 struct spi_mem_op op =
0264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
0265 SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0),
0266 SPI_MEM_OP_NO_DUMMY,
0267 SPI_MEM_OP_DATA_OUT(len, buf, 0));
0268 ssize_t nbytes;
0269 int error;
0270
0271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
0272 op.addr.nbytes = 0;
0273
0274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto);
0275
0276 if (spi_nor_spimem_bounce(nor, &op))
0277 memcpy(nor->bouncebuf, buf, op.data.nbytes);
0278
0279 if (nor->dirmap.wdesc) {
0280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
0281 op.data.nbytes, op.data.buf.out);
0282 } else {
0283 error = spi_nor_spimem_exec_op(nor, &op);
0284 if (error)
0285 return error;
0286 nbytes = op.data.nbytes;
0287 }
0288
0289 return nbytes;
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
0302 const u8 *buf)
0303 {
0304 if (nor->spimem)
0305 return spi_nor_spimem_write_data(nor, to, len, buf);
0306
0307 return nor->controller_ops->write(nor, to, len, buf);
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op,
0320 enum spi_nor_protocol proto)
0321 {
0322 if (!nor->spimem)
0323 return -EOPNOTSUPP;
0324
0325 spi_nor_spimem_setup_op(nor, op, proto);
0326 return spi_nor_spimem_exec_op(nor, op);
0327 }
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op,
0342 enum spi_nor_protocol proto)
0343 {
0344 int ret;
0345
0346 if (!nor->spimem)
0347 return -EOPNOTSUPP;
0348
0349 ret = spi_nor_write_enable(nor);
0350 if (ret)
0351 return ret;
0352 spi_nor_spimem_setup_op(nor, op, proto);
0353 return spi_nor_spimem_exec_op(nor, op);
0354 }
0355
0356
0357
0358
0359
0360
0361
0362 int spi_nor_write_enable(struct spi_nor *nor)
0363 {
0364 int ret;
0365
0366 if (nor->spimem) {
0367 struct spi_mem_op op = SPI_NOR_WREN_OP;
0368
0369 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0370
0371 ret = spi_mem_exec_op(nor->spimem, &op);
0372 } else {
0373 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN,
0374 NULL, 0);
0375 }
0376
0377 if (ret)
0378 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
0379
0380 return ret;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389 int spi_nor_write_disable(struct spi_nor *nor)
0390 {
0391 int ret;
0392
0393 if (nor->spimem) {
0394 struct spi_mem_op op = SPI_NOR_WRDI_OP;
0395
0396 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0397
0398 ret = spi_mem_exec_op(nor->spimem, &op);
0399 } else {
0400 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI,
0401 NULL, 0);
0402 }
0403
0404 if (ret)
0405 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
0406
0407 return ret;
0408 }
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id,
0424 enum spi_nor_protocol proto)
0425 {
0426 int ret;
0427
0428 if (nor->spimem) {
0429 struct spi_mem_op op =
0430 SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN);
0431
0432 spi_nor_spimem_setup_op(nor, &op, proto);
0433 ret = spi_mem_exec_op(nor->spimem, &op);
0434 } else {
0435 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
0436 SPI_NOR_MAX_ID_LEN);
0437 }
0438 return ret;
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
0450 {
0451 int ret;
0452
0453 if (nor->spimem) {
0454 struct spi_mem_op op = SPI_NOR_RDSR_OP(sr);
0455
0456 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
0457 op.addr.nbytes = nor->params->rdsr_addr_nbytes;
0458 op.dummy.nbytes = nor->params->rdsr_dummy;
0459
0460
0461
0462
0463 op.data.nbytes = 2;
0464 }
0465
0466 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0467
0468 ret = spi_mem_exec_op(nor->spimem, &op);
0469 } else {
0470 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr,
0471 1);
0472 }
0473
0474 if (ret)
0475 dev_dbg(nor->dev, "error %d reading SR\n", ret);
0476
0477 return ret;
0478 }
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
0490 {
0491 int ret;
0492
0493 if (nor->spimem) {
0494 struct spi_mem_op op = SPI_NOR_RDCR_OP(cr);
0495
0496 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0497
0498 ret = spi_mem_exec_op(nor->spimem, &op);
0499 } else {
0500 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr,
0501 1);
0502 }
0503
0504 if (ret)
0505 dev_dbg(nor->dev, "error %d reading CR\n", ret);
0506
0507 return ret;
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
0519 {
0520 int ret;
0521
0522 if (nor->spimem) {
0523 struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable);
0524
0525 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0526
0527 ret = spi_mem_exec_op(nor->spimem, &op);
0528 } else {
0529 ret = spi_nor_controller_ops_write_reg(nor,
0530 enable ? SPINOR_OP_EN4B :
0531 SPINOR_OP_EX4B,
0532 NULL, 0);
0533 }
0534
0535 if (ret)
0536 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
0537
0538 return ret;
0539 }
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
0551 {
0552 int ret;
0553
0554 nor->bouncebuf[0] = enable << 7;
0555
0556 if (nor->spimem) {
0557 struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf);
0558
0559 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0560
0561 ret = spi_mem_exec_op(nor->spimem, &op);
0562 } else {
0563 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR,
0564 nor->bouncebuf, 1);
0565 }
0566
0567 if (ret)
0568 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
0569
0570 return ret;
0571 }
0572
0573
0574
0575
0576
0577
0578
0579
0580 int spi_nor_sr_ready(struct spi_nor *nor)
0581 {
0582 int ret;
0583
0584 ret = spi_nor_read_sr(nor, nor->bouncebuf);
0585 if (ret)
0586 return ret;
0587
0588 return !(nor->bouncebuf[0] & SR_WIP);
0589 }
0590
0591
0592
0593
0594
0595
0596
0597 static int spi_nor_ready(struct spi_nor *nor)
0598 {
0599
0600 if (nor->params->ready)
0601 return nor->params->ready(nor);
0602
0603 return spi_nor_sr_ready(nor);
0604 }
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
0615 unsigned long timeout_jiffies)
0616 {
0617 unsigned long deadline;
0618 int timeout = 0, ret;
0619
0620 deadline = jiffies + timeout_jiffies;
0621
0622 while (!timeout) {
0623 if (time_after_eq(jiffies, deadline))
0624 timeout = 1;
0625
0626 ret = spi_nor_ready(nor);
0627 if (ret < 0)
0628 return ret;
0629 if (ret)
0630 return 0;
0631
0632 cond_resched();
0633 }
0634
0635 dev_dbg(nor->dev, "flash operation timed out\n");
0636
0637 return -ETIMEDOUT;
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647 int spi_nor_wait_till_ready(struct spi_nor *nor)
0648 {
0649 return spi_nor_wait_till_ready_with_timeout(nor,
0650 DEFAULT_READY_WAIT_JIFFIES);
0651 }
0652
0653
0654
0655
0656
0657
0658
0659 int spi_nor_global_block_unlock(struct spi_nor *nor)
0660 {
0661 int ret;
0662
0663 ret = spi_nor_write_enable(nor);
0664 if (ret)
0665 return ret;
0666
0667 if (nor->spimem) {
0668 struct spi_mem_op op = SPI_NOR_GBULK_OP;
0669
0670 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0671
0672 ret = spi_mem_exec_op(nor->spimem, &op);
0673 } else {
0674 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK,
0675 NULL, 0);
0676 }
0677
0678 if (ret) {
0679 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret);
0680 return ret;
0681 }
0682
0683 return spi_nor_wait_till_ready(nor);
0684 }
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
0695 {
0696 int ret;
0697
0698 ret = spi_nor_write_enable(nor);
0699 if (ret)
0700 return ret;
0701
0702 if (nor->spimem) {
0703 struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
0704
0705 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0706
0707 ret = spi_mem_exec_op(nor->spimem, &op);
0708 } else {
0709 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr,
0710 len);
0711 }
0712
0713 if (ret) {
0714 dev_dbg(nor->dev, "error %d writing SR\n", ret);
0715 return ret;
0716 }
0717
0718 return spi_nor_wait_till_ready(nor);
0719 }
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
0730 {
0731 int ret;
0732
0733 nor->bouncebuf[0] = sr1;
0734
0735 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
0736 if (ret)
0737 return ret;
0738
0739 ret = spi_nor_read_sr(nor, nor->bouncebuf);
0740 if (ret)
0741 return ret;
0742
0743 if (nor->bouncebuf[0] != sr1) {
0744 dev_dbg(nor->dev, "SR1: read back test failed\n");
0745 return -EIO;
0746 }
0747
0748 return 0;
0749 }
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
0762 {
0763 int ret;
0764 u8 *sr_cr = nor->bouncebuf;
0765 u8 cr_written;
0766
0767
0768 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
0769 ret = spi_nor_read_cr(nor, &sr_cr[1]);
0770 if (ret)
0771 return ret;
0772 } else if (nor->params->quad_enable) {
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788 sr_cr[1] = SR2_QUAD_EN_BIT1;
0789 } else {
0790 sr_cr[1] = 0;
0791 }
0792
0793 sr_cr[0] = sr1;
0794
0795 ret = spi_nor_write_sr(nor, sr_cr, 2);
0796 if (ret)
0797 return ret;
0798
0799 ret = spi_nor_read_sr(nor, sr_cr);
0800 if (ret)
0801 return ret;
0802
0803 if (sr1 != sr_cr[0]) {
0804 dev_dbg(nor->dev, "SR: Read back test failed\n");
0805 return -EIO;
0806 }
0807
0808 if (nor->flags & SNOR_F_NO_READ_CR)
0809 return 0;
0810
0811 cr_written = sr_cr[1];
0812
0813 ret = spi_nor_read_cr(nor, &sr_cr[1]);
0814 if (ret)
0815 return ret;
0816
0817 if (cr_written != sr_cr[1]) {
0818 dev_dbg(nor->dev, "CR: read back test failed\n");
0819 return -EIO;
0820 }
0821
0822 return 0;
0823 }
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
0836 {
0837 int ret;
0838 u8 *sr_cr = nor->bouncebuf;
0839 u8 sr_written;
0840
0841
0842 ret = spi_nor_read_sr(nor, sr_cr);
0843 if (ret)
0844 return ret;
0845
0846 sr_cr[1] = cr;
0847
0848 ret = spi_nor_write_sr(nor, sr_cr, 2);
0849 if (ret)
0850 return ret;
0851
0852 sr_written = sr_cr[0];
0853
0854 ret = spi_nor_read_sr(nor, sr_cr);
0855 if (ret)
0856 return ret;
0857
0858 if (sr_written != sr_cr[0]) {
0859 dev_dbg(nor->dev, "SR: Read back test failed\n");
0860 return -EIO;
0861 }
0862
0863 if (nor->flags & SNOR_F_NO_READ_CR)
0864 return 0;
0865
0866 ret = spi_nor_read_cr(nor, &sr_cr[1]);
0867 if (ret)
0868 return ret;
0869
0870 if (cr != sr_cr[1]) {
0871 dev_dbg(nor->dev, "CR: read back test failed\n");
0872 return -EIO;
0873 }
0874
0875 return 0;
0876 }
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
0888 {
0889 if (nor->flags & SNOR_F_HAS_16BIT_SR)
0890 return spi_nor_write_16bit_sr_and_check(nor, sr1);
0891
0892 return spi_nor_write_sr1_and_check(nor, sr1);
0893 }
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
0904 {
0905 int ret;
0906
0907 ret = spi_nor_write_enable(nor);
0908 if (ret)
0909 return ret;
0910
0911 if (nor->spimem) {
0912 struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2);
0913
0914 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0915
0916 ret = spi_mem_exec_op(nor->spimem, &op);
0917 } else {
0918 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2,
0919 sr2, 1);
0920 }
0921
0922 if (ret) {
0923 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
0924 return ret;
0925 }
0926
0927 return spi_nor_wait_till_ready(nor);
0928 }
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
0940 {
0941 int ret;
0942
0943 if (nor->spimem) {
0944 struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2);
0945
0946 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0947
0948 ret = spi_mem_exec_op(nor->spimem, &op);
0949 } else {
0950 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2,
0951 1);
0952 }
0953
0954 if (ret)
0955 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
0956
0957 return ret;
0958 }
0959
0960
0961
0962
0963
0964
0965
0966 static int spi_nor_erase_chip(struct spi_nor *nor)
0967 {
0968 int ret;
0969
0970 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
0971
0972 if (nor->spimem) {
0973 struct spi_mem_op op = SPI_NOR_CHIP_ERASE_OP;
0974
0975 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
0976
0977 ret = spi_mem_exec_op(nor->spimem, &op);
0978 } else {
0979 ret = spi_nor_controller_ops_write_reg(nor,
0980 SPINOR_OP_CHIP_ERASE,
0981 NULL, 0);
0982 }
0983
0984 if (ret)
0985 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
0986
0987 return ret;
0988 }
0989
0990 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
0991 {
0992 size_t i;
0993
0994 for (i = 0; i < size; i++)
0995 if (table[i][0] == opcode)
0996 return table[i][1];
0997
0998
0999 return opcode;
1000 }
1001
1002 u8 spi_nor_convert_3to4_read(u8 opcode)
1003 {
1004 static const u8 spi_nor_3to4_read[][2] = {
1005 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1006 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1007 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1008 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1009 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1010 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1011 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1012 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1013
1014 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1015 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1016 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1017 };
1018
1019 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1020 ARRAY_SIZE(spi_nor_3to4_read));
1021 }
1022
1023 static u8 spi_nor_convert_3to4_program(u8 opcode)
1024 {
1025 static const u8 spi_nor_3to4_program[][2] = {
1026 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1027 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1028 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1029 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1030 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1031 };
1032
1033 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1034 ARRAY_SIZE(spi_nor_3to4_program));
1035 }
1036
1037 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1038 {
1039 static const u8 spi_nor_3to4_erase[][2] = {
1040 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1041 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1042 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1043 };
1044
1045 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1046 ARRAY_SIZE(spi_nor_3to4_erase));
1047 }
1048
1049 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1050 {
1051 return !!nor->params->erase_map.uniform_erase_type;
1052 }
1053
1054 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1055 {
1056 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1057 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1058 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1059
1060 if (!spi_nor_has_uniform_erase(nor)) {
1061 struct spi_nor_erase_map *map = &nor->params->erase_map;
1062 struct spi_nor_erase_type *erase;
1063 int i;
1064
1065 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1066 erase = &map->erase_type[i];
1067 erase->opcode =
1068 spi_nor_convert_3to4_erase(erase->opcode);
1069 }
1070 }
1071 }
1072
1073 int spi_nor_lock_and_prep(struct spi_nor *nor)
1074 {
1075 int ret = 0;
1076
1077 mutex_lock(&nor->lock);
1078
1079 if (nor->controller_ops && nor->controller_ops->prepare) {
1080 ret = nor->controller_ops->prepare(nor);
1081 if (ret) {
1082 mutex_unlock(&nor->lock);
1083 return ret;
1084 }
1085 }
1086 return ret;
1087 }
1088
1089 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1090 {
1091 if (nor->controller_ops && nor->controller_ops->unprepare)
1092 nor->controller_ops->unprepare(nor);
1093 mutex_unlock(&nor->lock);
1094 }
1095
1096 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1097 {
1098 if (!nor->params->convert_addr)
1099 return addr;
1100
1101 return nor->params->convert_addr(nor, addr);
1102 }
1103
1104
1105
1106
1107 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1108 {
1109 int i;
1110
1111 addr = spi_nor_convert_addr(nor, addr);
1112
1113 if (nor->spimem) {
1114 struct spi_mem_op op =
1115 SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode,
1116 nor->addr_nbytes, addr);
1117
1118 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
1119
1120 return spi_mem_exec_op(nor->spimem, &op);
1121 } else if (nor->controller_ops->erase) {
1122 return spi_nor_controller_ops_erase(nor, addr);
1123 }
1124
1125
1126
1127
1128
1129 for (i = nor->addr_nbytes - 1; i >= 0; i--) {
1130 nor->bouncebuf[i] = addr & 0xff;
1131 addr >>= 8;
1132 }
1133
1134 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode,
1135 nor->bouncebuf, nor->addr_nbytes);
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1147 u64 dividend, u32 *remainder)
1148 {
1149
1150 *remainder = (u32)dividend & erase->size_mask;
1151 return dividend >> erase->size_shift;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 static const struct spi_nor_erase_type *
1168 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1169 const struct spi_nor_erase_region *region,
1170 u64 addr, u32 len)
1171 {
1172 const struct spi_nor_erase_type *erase;
1173 u32 rem;
1174 int i;
1175 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1176
1177
1178
1179
1180
1181 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1182
1183 if (!(erase_mask & BIT(i)))
1184 continue;
1185
1186 erase = &map->erase_type[i];
1187
1188
1189 if (region->offset & SNOR_OVERLAID_REGION &&
1190 region->size <= len)
1191 return erase;
1192
1193
1194 if (erase->size > len)
1195 continue;
1196
1197 spi_nor_div_by_erase_size(erase, addr, &rem);
1198 if (!rem)
1199 return erase;
1200 }
1201
1202 return NULL;
1203 }
1204
1205 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1206 {
1207 return region->offset & SNOR_LAST_REGION;
1208 }
1209
1210 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1211 {
1212 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221 struct spi_nor_erase_region *
1222 spi_nor_region_next(struct spi_nor_erase_region *region)
1223 {
1224 if (spi_nor_region_is_last(region))
1225 return NULL;
1226 region++;
1227 return region;
1228 }
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 static struct spi_nor_erase_region *
1240 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1241 {
1242 struct spi_nor_erase_region *region = map->regions;
1243 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1244 u64 region_end = region_start + region->size;
1245
1246 while (addr < region_start || addr >= region_end) {
1247 region = spi_nor_region_next(region);
1248 if (!region)
1249 return ERR_PTR(-EINVAL);
1250
1251 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1252 region_end = region_start + region->size;
1253 }
1254
1255 return region;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266 static struct spi_nor_erase_command *
1267 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1268 const struct spi_nor_erase_type *erase)
1269 {
1270 struct spi_nor_erase_command *cmd;
1271
1272 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1273 if (!cmd)
1274 return ERR_PTR(-ENOMEM);
1275
1276 INIT_LIST_HEAD(&cmd->list);
1277 cmd->opcode = erase->opcode;
1278 cmd->count = 1;
1279
1280 if (region->offset & SNOR_OVERLAID_REGION)
1281 cmd->size = region->size;
1282 else
1283 cmd->size = erase->size;
1284
1285 return cmd;
1286 }
1287
1288
1289
1290
1291
1292 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1293 {
1294 struct spi_nor_erase_command *cmd, *next;
1295
1296 list_for_each_entry_safe(cmd, next, erase_list, list) {
1297 list_del(&cmd->list);
1298 kfree(cmd);
1299 }
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1316 struct list_head *erase_list,
1317 u64 addr, u32 len)
1318 {
1319 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1320 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1321 struct spi_nor_erase_region *region;
1322 struct spi_nor_erase_command *cmd = NULL;
1323 u64 region_end;
1324 int ret = -EINVAL;
1325
1326 region = spi_nor_find_erase_region(map, addr);
1327 if (IS_ERR(region))
1328 return PTR_ERR(region);
1329
1330 region_end = spi_nor_region_end(region);
1331
1332 while (len) {
1333 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1334 if (!erase)
1335 goto destroy_erase_cmd_list;
1336
1337 if (prev_erase != erase ||
1338 erase->size != cmd->size ||
1339 region->offset & SNOR_OVERLAID_REGION) {
1340 cmd = spi_nor_init_erase_cmd(region, erase);
1341 if (IS_ERR(cmd)) {
1342 ret = PTR_ERR(cmd);
1343 goto destroy_erase_cmd_list;
1344 }
1345
1346 list_add_tail(&cmd->list, erase_list);
1347 } else {
1348 cmd->count++;
1349 }
1350
1351 addr += cmd->size;
1352 len -= cmd->size;
1353
1354 if (len && addr >= region_end) {
1355 region = spi_nor_region_next(region);
1356 if (!region)
1357 goto destroy_erase_cmd_list;
1358 region_end = spi_nor_region_end(region);
1359 }
1360
1361 prev_erase = erase;
1362 }
1363
1364 return 0;
1365
1366 destroy_erase_cmd_list:
1367 spi_nor_destroy_erase_cmd_list(erase_list);
1368 return ret;
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1383 {
1384 LIST_HEAD(erase_list);
1385 struct spi_nor_erase_command *cmd, *next;
1386 int ret;
1387
1388 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1389 if (ret)
1390 return ret;
1391
1392 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1393 nor->erase_opcode = cmd->opcode;
1394 while (cmd->count) {
1395 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1396 cmd->size, cmd->opcode, cmd->count);
1397
1398 ret = spi_nor_write_enable(nor);
1399 if (ret)
1400 goto destroy_erase_cmd_list;
1401
1402 ret = spi_nor_erase_sector(nor, addr);
1403 if (ret)
1404 goto destroy_erase_cmd_list;
1405
1406 ret = spi_nor_wait_till_ready(nor);
1407 if (ret)
1408 goto destroy_erase_cmd_list;
1409
1410 addr += cmd->size;
1411 cmd->count--;
1412 }
1413 list_del(&cmd->list);
1414 kfree(cmd);
1415 }
1416
1417 return 0;
1418
1419 destroy_erase_cmd_list:
1420 spi_nor_destroy_erase_cmd_list(&erase_list);
1421 return ret;
1422 }
1423
1424
1425
1426
1427
1428 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1429 {
1430 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1431 u32 addr, len;
1432 uint32_t rem;
1433 int ret;
1434
1435 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1436 (long long)instr->len);
1437
1438 if (spi_nor_has_uniform_erase(nor)) {
1439 div_u64_rem(instr->len, mtd->erasesize, &rem);
1440 if (rem)
1441 return -EINVAL;
1442 }
1443
1444 addr = instr->addr;
1445 len = instr->len;
1446
1447 ret = spi_nor_lock_and_prep(nor);
1448 if (ret)
1449 return ret;
1450
1451
1452 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1453 unsigned long timeout;
1454
1455 ret = spi_nor_write_enable(nor);
1456 if (ret)
1457 goto erase_err;
1458
1459 ret = spi_nor_erase_chip(nor);
1460 if (ret)
1461 goto erase_err;
1462
1463
1464
1465
1466
1467
1468
1469 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1470 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1471 (unsigned long)(mtd->size / SZ_2M));
1472 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1473 if (ret)
1474 goto erase_err;
1475
1476
1477
1478
1479
1480
1481
1482 } else if (spi_nor_has_uniform_erase(nor)) {
1483 while (len) {
1484 ret = spi_nor_write_enable(nor);
1485 if (ret)
1486 goto erase_err;
1487
1488 ret = spi_nor_erase_sector(nor, addr);
1489 if (ret)
1490 goto erase_err;
1491
1492 ret = spi_nor_wait_till_ready(nor);
1493 if (ret)
1494 goto erase_err;
1495
1496 addr += mtd->erasesize;
1497 len -= mtd->erasesize;
1498 }
1499
1500
1501 } else {
1502 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1503 if (ret)
1504 goto erase_err;
1505 }
1506
1507 ret = spi_nor_write_disable(nor);
1508
1509 erase_err:
1510 spi_nor_unlock_and_unprep(nor);
1511
1512 return ret;
1513 }
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1525 {
1526 int ret;
1527
1528 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1529 if (ret)
1530 return ret;
1531
1532 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1533 return 0;
1534
1535 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1536
1537 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1538 }
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1550 {
1551 int ret;
1552
1553 if (nor->flags & SNOR_F_NO_READ_CR)
1554 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1555
1556 ret = spi_nor_read_cr(nor, nor->bouncebuf);
1557 if (ret)
1558 return ret;
1559
1560 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1561 return 0;
1562
1563 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1564
1565 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1566 }
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1581 {
1582 u8 *sr2 = nor->bouncebuf;
1583 int ret;
1584 u8 sr2_written;
1585
1586
1587 ret = spi_nor_read_sr2(nor, sr2);
1588 if (ret)
1589 return ret;
1590 if (*sr2 & SR2_QUAD_EN_BIT7)
1591 return 0;
1592
1593
1594 *sr2 |= SR2_QUAD_EN_BIT7;
1595
1596 ret = spi_nor_write_sr2(nor, sr2);
1597 if (ret)
1598 return ret;
1599
1600 sr2_written = *sr2;
1601
1602
1603 ret = spi_nor_read_sr2(nor, sr2);
1604 if (ret)
1605 return ret;
1606
1607 if (*sr2 != sr2_written) {
1608 dev_dbg(nor->dev, "SR2: Read back test failed\n");
1609 return -EIO;
1610 }
1611
1612 return 0;
1613 }
1614
1615 static const struct spi_nor_manufacturer *manufacturers[] = {
1616 &spi_nor_atmel,
1617 &spi_nor_catalyst,
1618 &spi_nor_eon,
1619 &spi_nor_esmt,
1620 &spi_nor_everspin,
1621 &spi_nor_fujitsu,
1622 &spi_nor_gigadevice,
1623 &spi_nor_intel,
1624 &spi_nor_issi,
1625 &spi_nor_macronix,
1626 &spi_nor_micron,
1627 &spi_nor_st,
1628 &spi_nor_spansion,
1629 &spi_nor_sst,
1630 &spi_nor_winbond,
1631 &spi_nor_xilinx,
1632 &spi_nor_xmc,
1633 };
1634
1635 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
1636 const u8 *id)
1637 {
1638 const struct flash_info *part;
1639 unsigned int i, j;
1640
1641 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
1642 for (j = 0; j < manufacturers[i]->nparts; j++) {
1643 part = &manufacturers[i]->parts[j];
1644 if (part->id_len &&
1645 !memcmp(part->id, id, part->id_len)) {
1646 nor->manufacturer = manufacturers[i];
1647 return part;
1648 }
1649 }
1650 }
1651
1652 return NULL;
1653 }
1654
1655 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
1656 {
1657 const struct flash_info *info;
1658 u8 *id = nor->bouncebuf;
1659 int ret;
1660
1661 ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto);
1662 if (ret) {
1663 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
1664 return ERR_PTR(ret);
1665 }
1666
1667 info = spi_nor_match_id(nor, id);
1668 if (!info) {
1669 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
1670 SPI_NOR_MAX_ID_LEN, id);
1671 return ERR_PTR(-ENODEV);
1672 }
1673 return info;
1674 }
1675
1676 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
1677 size_t *retlen, u_char *buf)
1678 {
1679 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1680 ssize_t ret;
1681
1682 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
1683
1684 ret = spi_nor_lock_and_prep(nor);
1685 if (ret)
1686 return ret;
1687
1688 while (len) {
1689 loff_t addr = from;
1690
1691 addr = spi_nor_convert_addr(nor, addr);
1692
1693 ret = spi_nor_read_data(nor, addr, len, buf);
1694 if (ret == 0) {
1695
1696 ret = -EIO;
1697 goto read_err;
1698 }
1699 if (ret < 0)
1700 goto read_err;
1701
1702 WARN_ON(ret > len);
1703 *retlen += ret;
1704 buf += ret;
1705 from += ret;
1706 len -= ret;
1707 }
1708 ret = 0;
1709
1710 read_err:
1711 spi_nor_unlock_and_unprep(nor);
1712 return ret;
1713 }
1714
1715
1716
1717
1718
1719
1720 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1721 size_t *retlen, const u_char *buf)
1722 {
1723 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1724 size_t page_offset, page_remain, i;
1725 ssize_t ret;
1726 u32 page_size = nor->params->page_size;
1727
1728 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1729
1730 ret = spi_nor_lock_and_prep(nor);
1731 if (ret)
1732 return ret;
1733
1734 for (i = 0; i < len; ) {
1735 ssize_t written;
1736 loff_t addr = to + i;
1737
1738
1739
1740
1741
1742
1743 if (is_power_of_2(page_size)) {
1744 page_offset = addr & (page_size - 1);
1745 } else {
1746 uint64_t aux = addr;
1747
1748 page_offset = do_div(aux, page_size);
1749 }
1750
1751 page_remain = min_t(size_t, page_size - page_offset, len - i);
1752
1753 addr = spi_nor_convert_addr(nor, addr);
1754
1755 ret = spi_nor_write_enable(nor);
1756 if (ret)
1757 goto write_err;
1758
1759 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
1760 if (ret < 0)
1761 goto write_err;
1762 written = ret;
1763
1764 ret = spi_nor_wait_till_ready(nor);
1765 if (ret)
1766 goto write_err;
1767 *retlen += written;
1768 i += written;
1769 }
1770
1771 write_err:
1772 spi_nor_unlock_and_unprep(nor);
1773 return ret;
1774 }
1775
1776 static int spi_nor_check(struct spi_nor *nor)
1777 {
1778 if (!nor->dev ||
1779 (!nor->spimem && !nor->controller_ops) ||
1780 (!nor->spimem && nor->controller_ops &&
1781 (!nor->controller_ops->read ||
1782 !nor->controller_ops->write ||
1783 !nor->controller_ops->read_reg ||
1784 !nor->controller_ops->write_reg))) {
1785 pr_err("spi-nor: please fill all the necessary fields!\n");
1786 return -EINVAL;
1787 }
1788
1789 if (nor->spimem && nor->controller_ops) {
1790 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
1791 return -EINVAL;
1792 }
1793
1794 return 0;
1795 }
1796
1797 void
1798 spi_nor_set_read_settings(struct spi_nor_read_command *read,
1799 u8 num_mode_clocks,
1800 u8 num_wait_states,
1801 u8 opcode,
1802 enum spi_nor_protocol proto)
1803 {
1804 read->num_mode_clocks = num_mode_clocks;
1805 read->num_wait_states = num_wait_states;
1806 read->opcode = opcode;
1807 read->proto = proto;
1808 }
1809
1810 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
1811 enum spi_nor_protocol proto)
1812 {
1813 pp->opcode = opcode;
1814 pp->proto = proto;
1815 }
1816
1817 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
1818 {
1819 size_t i;
1820
1821 for (i = 0; i < size; i++)
1822 if (table[i][0] == (int)hwcaps)
1823 return table[i][1];
1824
1825 return -EINVAL;
1826 }
1827
1828 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
1829 {
1830 static const int hwcaps_read2cmd[][2] = {
1831 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
1832 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
1833 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
1834 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
1835 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
1836 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
1837 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
1838 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
1839 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
1840 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
1841 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
1842 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
1843 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
1844 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
1845 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
1846 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR },
1847 };
1848
1849 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
1850 ARRAY_SIZE(hwcaps_read2cmd));
1851 }
1852
1853 int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
1854 {
1855 static const int hwcaps_pp2cmd[][2] = {
1856 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
1857 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
1858 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
1859 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
1860 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
1861 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
1862 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
1863 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR },
1864 };
1865
1866 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
1867 ARRAY_SIZE(hwcaps_pp2cmd));
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 static int spi_nor_spimem_check_op(struct spi_nor *nor,
1879 struct spi_mem_op *op)
1880 {
1881
1882
1883
1884
1885
1886
1887 op->addr.nbytes = 4;
1888 if (!spi_mem_supports_op(nor->spimem, op)) {
1889 if (nor->params->size > SZ_16M)
1890 return -EOPNOTSUPP;
1891
1892
1893 op->addr.nbytes = 3;
1894 if (!spi_mem_supports_op(nor->spimem, op))
1895 return -EOPNOTSUPP;
1896 }
1897
1898 return 0;
1899 }
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
1910 const struct spi_nor_read_command *read)
1911 {
1912 struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode);
1913
1914 spi_nor_spimem_setup_op(nor, &op, read->proto);
1915
1916
1917 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
1918 if (spi_nor_protocol_is_dtr(nor->read_proto))
1919 op.dummy.nbytes *= 2;
1920
1921 return spi_nor_spimem_check_op(nor, &op);
1922 }
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
1933 const struct spi_nor_pp_command *pp)
1934 {
1935 struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode);
1936
1937 spi_nor_spimem_setup_op(nor, &op, pp->proto);
1938
1939 return spi_nor_spimem_check_op(nor, &op);
1940 }
1941
1942
1943
1944
1945
1946
1947
1948
1949 static void
1950 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
1951 {
1952 struct spi_nor_flash_parameter *params = nor->params;
1953 unsigned int cap;
1954
1955
1956 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
1957
1958
1959
1960
1961
1962 if (nor->flags & SNOR_F_BROKEN_RESET)
1963 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR);
1964
1965 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
1966 int rdidx, ppidx;
1967
1968 if (!(*hwcaps & BIT(cap)))
1969 continue;
1970
1971 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
1972 if (rdidx >= 0 &&
1973 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
1974 *hwcaps &= ~BIT(cap);
1975
1976 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
1977 if (ppidx < 0)
1978 continue;
1979
1980 if (spi_nor_spimem_check_pp(nor,
1981 ¶ms->page_programs[ppidx]))
1982 *hwcaps &= ~BIT(cap);
1983 }
1984 }
1985
1986
1987
1988
1989
1990
1991
1992 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
1993 u8 opcode)
1994 {
1995 erase->size = size;
1996 erase->opcode = opcode;
1997
1998 erase->size_shift = ffs(erase->size) - 1;
1999 erase->size_mask = (1 << erase->size_shift) - 1;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008
2009 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2010 u8 erase_mask, u64 flash_size)
2011 {
2012
2013 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2014 SNOR_LAST_REGION;
2015 map->uniform_region.size = flash_size;
2016 map->regions = &map->uniform_region;
2017 map->uniform_erase_type = erase_mask;
2018 }
2019
2020 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2021 const struct sfdp_parameter_header *bfpt_header,
2022 const struct sfdp_bfpt *bfpt)
2023 {
2024 int ret;
2025
2026 if (nor->manufacturer && nor->manufacturer->fixups &&
2027 nor->manufacturer->fixups->post_bfpt) {
2028 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2029 bfpt);
2030 if (ret)
2031 return ret;
2032 }
2033
2034 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2035 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt);
2036
2037 return 0;
2038 }
2039
2040 static int spi_nor_select_read(struct spi_nor *nor,
2041 u32 shared_hwcaps)
2042 {
2043 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2044 const struct spi_nor_read_command *read;
2045
2046 if (best_match < 0)
2047 return -EINVAL;
2048
2049 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2050 if (cmd < 0)
2051 return -EINVAL;
2052
2053 read = &nor->params->reads[cmd];
2054 nor->read_opcode = read->opcode;
2055 nor->read_proto = read->proto;
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2068 return 0;
2069 }
2070
2071 static int spi_nor_select_pp(struct spi_nor *nor,
2072 u32 shared_hwcaps)
2073 {
2074 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2075 const struct spi_nor_pp_command *pp;
2076
2077 if (best_match < 0)
2078 return -EINVAL;
2079
2080 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2081 if (cmd < 0)
2082 return -EINVAL;
2083
2084 pp = &nor->params->page_programs[cmd];
2085 nor->program_opcode = pp->opcode;
2086 nor->write_proto = pp->proto;
2087 return 0;
2088 }
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102 static const struct spi_nor_erase_type *
2103 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2104 const u32 wanted_size)
2105 {
2106 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2107 int i;
2108 u8 uniform_erase_type = map->uniform_erase_type;
2109
2110 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2111 if (!(uniform_erase_type & BIT(i)))
2112 continue;
2113
2114 tested_erase = &map->erase_type[i];
2115
2116
2117
2118
2119
2120 if (tested_erase->size == wanted_size) {
2121 erase = tested_erase;
2122 break;
2123 }
2124
2125
2126
2127
2128
2129 if (!erase && tested_erase->size)
2130 erase = tested_erase;
2131
2132 }
2133
2134 if (!erase)
2135 return NULL;
2136
2137
2138 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2139 map->uniform_erase_type |= BIT(erase - map->erase_type);
2140 return erase;
2141 }
2142
2143 static int spi_nor_select_erase(struct spi_nor *nor)
2144 {
2145 struct spi_nor_erase_map *map = &nor->params->erase_map;
2146 const struct spi_nor_erase_type *erase = NULL;
2147 struct mtd_info *mtd = &nor->mtd;
2148 u32 wanted_size = nor->info->sector_size;
2149 int i;
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2160
2161 wanted_size = 4096u;
2162 #endif
2163
2164 if (spi_nor_has_uniform_erase(nor)) {
2165 erase = spi_nor_select_uniform_erase(map, wanted_size);
2166 if (!erase)
2167 return -EINVAL;
2168 nor->erase_opcode = erase->opcode;
2169 mtd->erasesize = erase->size;
2170 return 0;
2171 }
2172
2173
2174
2175
2176
2177 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2178 if (map->erase_type[i].size) {
2179 erase = &map->erase_type[i];
2180 break;
2181 }
2182 }
2183
2184 if (!erase)
2185 return -EINVAL;
2186
2187 mtd->erasesize = erase->size;
2188 return 0;
2189 }
2190
2191 static int spi_nor_default_setup(struct spi_nor *nor,
2192 const struct spi_nor_hwcaps *hwcaps)
2193 {
2194 struct spi_nor_flash_parameter *params = nor->params;
2195 u32 ignored_mask, shared_mask;
2196 int err;
2197
2198
2199
2200
2201
2202 shared_mask = hwcaps->mask & params->hwcaps.mask;
2203
2204 if (nor->spimem) {
2205
2206
2207
2208
2209
2210 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2211 } else {
2212
2213
2214
2215
2216
2217 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR;
2218 if (shared_mask & ignored_mask) {
2219 dev_dbg(nor->dev,
2220 "SPI n-n-n protocols are not supported.\n");
2221 shared_mask &= ~ignored_mask;
2222 }
2223 }
2224
2225
2226 err = spi_nor_select_read(nor, shared_mask);
2227 if (err) {
2228 dev_dbg(nor->dev,
2229 "can't select read settings supported by both the SPI controller and memory.\n");
2230 return err;
2231 }
2232
2233
2234 err = spi_nor_select_pp(nor, shared_mask);
2235 if (err) {
2236 dev_dbg(nor->dev,
2237 "can't select write settings supported by both the SPI controller and memory.\n");
2238 return err;
2239 }
2240
2241
2242 err = spi_nor_select_erase(nor);
2243 if (err) {
2244 dev_dbg(nor->dev,
2245 "can't select erase settings supported by both the SPI controller and memory.\n");
2246 return err;
2247 }
2248
2249 return 0;
2250 }
2251
2252 static int spi_nor_set_addr_nbytes(struct spi_nor *nor)
2253 {
2254 if (nor->params->addr_nbytes) {
2255 nor->addr_nbytes = nor->params->addr_nbytes;
2256 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) {
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 nor->addr_nbytes = 4;
2270 } else if (nor->info->addr_nbytes) {
2271 nor->addr_nbytes = nor->info->addr_nbytes;
2272 } else {
2273 nor->addr_nbytes = 3;
2274 }
2275
2276 if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) {
2277
2278 nor->addr_nbytes = 4;
2279 }
2280
2281 if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) {
2282 dev_dbg(nor->dev, "The number of address bytes is too large: %u\n",
2283 nor->addr_nbytes);
2284 return -EINVAL;
2285 }
2286
2287
2288 if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES &&
2289 !(nor->flags & SNOR_F_HAS_4BAIT))
2290 spi_nor_set_4byte_opcodes(nor);
2291
2292 return 0;
2293 }
2294
2295 static int spi_nor_setup(struct spi_nor *nor,
2296 const struct spi_nor_hwcaps *hwcaps)
2297 {
2298 int ret;
2299
2300 if (nor->params->setup)
2301 ret = nor->params->setup(nor, hwcaps);
2302 else
2303 ret = spi_nor_default_setup(nor, hwcaps);
2304 if (ret)
2305 return ret;
2306
2307 return spi_nor_set_addr_nbytes(nor);
2308 }
2309
2310
2311
2312
2313
2314
2315 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2316 {
2317 if (nor->manufacturer && nor->manufacturer->fixups &&
2318 nor->manufacturer->fixups->default_init)
2319 nor->manufacturer->fixups->default_init(nor);
2320
2321 if (nor->info->fixups && nor->info->fixups->default_init)
2322 nor->info->fixups->default_init(nor);
2323 }
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
2335 {
2336 struct spi_nor_flash_parameter *params = nor->params;
2337 struct spi_nor_erase_map *map = ¶ms->erase_map;
2338 const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
2339 u8 i, erase_mask;
2340
2341 if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
2342 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2343 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2344 0, 8, SPINOR_OP_READ_1_1_2,
2345 SNOR_PROTO_1_1_2);
2346 }
2347
2348 if (no_sfdp_flags & SPI_NOR_QUAD_READ) {
2349 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2350 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2351 0, 8, SPINOR_OP_READ_1_1_4,
2352 SNOR_PROTO_1_1_4);
2353 }
2354
2355 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) {
2356 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2357 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2358 0, 8, SPINOR_OP_READ_1_1_8,
2359 SNOR_PROTO_1_1_8);
2360 }
2361
2362 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) {
2363 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR;
2364 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR],
2365 0, 20, SPINOR_OP_READ_FAST,
2366 SNOR_PROTO_8_8_8_DTR);
2367 }
2368
2369 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) {
2370 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR;
2371
2372
2373
2374
2375 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR],
2376 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR);
2377 }
2378
2379
2380
2381
2382
2383 erase_mask = 0;
2384 i = 0;
2385 if (no_sfdp_flags & SECT_4K) {
2386 erase_mask |= BIT(i);
2387 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2388 SPINOR_OP_BE_4K);
2389 i++;
2390 }
2391 erase_mask |= BIT(i);
2392 spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
2393 SPINOR_OP_SE);
2394 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2395 }
2396
2397
2398
2399
2400
2401
2402 static void spi_nor_init_flags(struct spi_nor *nor)
2403 {
2404 struct device_node *np = spi_nor_get_flash_node(nor);
2405 const u16 flags = nor->info->flags;
2406
2407 if (of_property_read_bool(np, "broken-flash-reset"))
2408 nor->flags |= SNOR_F_BROKEN_RESET;
2409
2410 if (flags & SPI_NOR_SWP_IS_VOLATILE)
2411 nor->flags |= SNOR_F_SWP_IS_VOLATILE;
2412
2413 if (flags & SPI_NOR_HAS_LOCK)
2414 nor->flags |= SNOR_F_HAS_LOCK;
2415
2416 if (flags & SPI_NOR_HAS_TB) {
2417 nor->flags |= SNOR_F_HAS_SR_TB;
2418 if (flags & SPI_NOR_TB_SR_BIT6)
2419 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
2420 }
2421
2422 if (flags & SPI_NOR_4BIT_BP) {
2423 nor->flags |= SNOR_F_HAS_4BIT_BP;
2424 if (flags & SPI_NOR_BP3_SR_BIT6)
2425 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
2426 }
2427
2428 if (flags & NO_CHIP_ERASE)
2429 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2430 }
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440 static void spi_nor_init_fixup_flags(struct spi_nor *nor)
2441 {
2442 const u8 fixup_flags = nor->info->fixup_flags;
2443
2444 if (fixup_flags & SPI_NOR_4B_OPCODES)
2445 nor->flags |= SNOR_F_4B_OPCODES;
2446
2447 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE)
2448 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
2449 }
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459 static void spi_nor_late_init_params(struct spi_nor *nor)
2460 {
2461 if (nor->manufacturer && nor->manufacturer->fixups &&
2462 nor->manufacturer->fixups->late_init)
2463 nor->manufacturer->fixups->late_init(nor);
2464
2465 if (nor->info->fixups && nor->info->fixups->late_init)
2466 nor->info->fixups->late_init(nor);
2467
2468 spi_nor_init_flags(nor);
2469 spi_nor_init_fixup_flags(nor);
2470
2471
2472
2473
2474
2475 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2476 spi_nor_init_default_locking_ops(nor);
2477 }
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor)
2488 {
2489 struct spi_nor_flash_parameter sfdp_params;
2490
2491 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2492
2493 if (spi_nor_parse_sfdp(nor)) {
2494 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2495 nor->flags &= ~SNOR_F_4B_OPCODES;
2496 }
2497 }
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508 static void spi_nor_init_params_deprecated(struct spi_nor *nor)
2509 {
2510 spi_nor_no_sfdp_init_params(nor);
2511
2512 spi_nor_manufacturer_init_params(nor);
2513
2514 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ |
2515 SPI_NOR_QUAD_READ |
2516 SPI_NOR_OCTAL_READ |
2517 SPI_NOR_OCTAL_DTR_READ))
2518 spi_nor_sfdp_init_params_deprecated(nor);
2519 }
2520
2521
2522
2523
2524
2525
2526
2527 static void spi_nor_init_default_params(struct spi_nor *nor)
2528 {
2529 struct spi_nor_flash_parameter *params = nor->params;
2530 const struct flash_info *info = nor->info;
2531 struct device_node *np = spi_nor_get_flash_node(nor);
2532
2533 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2534 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2535 params->otp.org = &info->otp_org;
2536
2537
2538 nor->flags |= SNOR_F_HAS_16BIT_SR;
2539
2540
2541 params->writesize = 1;
2542 params->size = (u64)info->sector_size * info->n_sectors;
2543 params->page_size = info->page_size;
2544
2545 if (!(info->flags & SPI_NOR_NO_FR)) {
2546
2547 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2548
2549
2550 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2551 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2552 }
2553
2554
2555 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2556 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2557 0, 0, SPINOR_OP_READ,
2558 SNOR_PROTO_1_1_1);
2559
2560 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2561 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2562 0, 8, SPINOR_OP_READ_FAST,
2563 SNOR_PROTO_1_1_1);
2564
2565 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2566 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2567 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2568 }
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607 static int spi_nor_init_params(struct spi_nor *nor)
2608 {
2609 int ret;
2610
2611 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
2612 if (!nor->params)
2613 return -ENOMEM;
2614
2615 spi_nor_init_default_params(nor);
2616
2617 if (nor->info->parse_sfdp) {
2618 ret = spi_nor_parse_sfdp(nor);
2619 if (ret) {
2620 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
2621 return ret;
2622 }
2623 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) {
2624 spi_nor_no_sfdp_init_params(nor);
2625 } else {
2626 spi_nor_init_params_deprecated(nor);
2627 }
2628
2629 spi_nor_late_init_params(nor);
2630
2631 return 0;
2632 }
2633
2634
2635
2636
2637
2638
2639
2640 static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
2641 {
2642 int ret;
2643
2644 if (!nor->params->octal_dtr_enable)
2645 return 0;
2646
2647 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
2648 nor->write_proto == SNOR_PROTO_8_8_8_DTR))
2649 return 0;
2650
2651 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
2652 return 0;
2653
2654 ret = nor->params->octal_dtr_enable(nor, enable);
2655 if (ret)
2656 return ret;
2657
2658 if (enable)
2659 nor->reg_proto = SNOR_PROTO_8_8_8_DTR;
2660 else
2661 nor->reg_proto = SNOR_PROTO_1_1_1;
2662
2663 return 0;
2664 }
2665
2666
2667
2668
2669
2670
2671
2672 static int spi_nor_quad_enable(struct spi_nor *nor)
2673 {
2674 if (!nor->params->quad_enable)
2675 return 0;
2676
2677 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2678 spi_nor_get_protocol_width(nor->write_proto) == 4))
2679 return 0;
2680
2681 return nor->params->quad_enable(nor);
2682 }
2683
2684 static int spi_nor_init(struct spi_nor *nor)
2685 {
2686 int err;
2687
2688 err = spi_nor_octal_dtr_enable(nor, true);
2689 if (err) {
2690 dev_dbg(nor->dev, "octal mode not supported\n");
2691 return err;
2692 }
2693
2694 err = spi_nor_quad_enable(nor);
2695 if (err) {
2696 dev_dbg(nor->dev, "quad mode not supported\n");
2697 return err;
2698 }
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) ||
2711 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) &&
2712 nor->flags & SNOR_F_SWP_IS_VOLATILE))
2713 spi_nor_try_unlock_all(nor);
2714
2715 if (nor->addr_nbytes == 4 &&
2716 nor->read_proto != SNOR_PROTO_8_8_8_DTR &&
2717 !(nor->flags & SNOR_F_4B_OPCODES)) {
2718
2719
2720
2721
2722
2723
2724
2725 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
2726 "enabling reset hack; may not recover from unexpected reboots\n");
2727 return nor->params->set_4byte_addr_mode(nor, true);
2728 }
2729
2730 return 0;
2731 }
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748 static void spi_nor_soft_reset(struct spi_nor *nor)
2749 {
2750 struct spi_mem_op op;
2751 int ret;
2752
2753 op = (struct spi_mem_op)SPINOR_SRSTEN_OP;
2754
2755 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2756
2757 ret = spi_mem_exec_op(nor->spimem, &op);
2758 if (ret) {
2759 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2760 return;
2761 }
2762
2763 op = (struct spi_mem_op)SPINOR_SRST_OP;
2764
2765 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
2766
2767 ret = spi_mem_exec_op(nor->spimem, &op);
2768 if (ret) {
2769 dev_warn(nor->dev, "Software reset failed: %d\n", ret);
2770 return;
2771 }
2772
2773
2774
2775
2776
2777
2778 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX);
2779 }
2780
2781
2782 static int spi_nor_suspend(struct mtd_info *mtd)
2783 {
2784 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2785 int ret;
2786
2787
2788 ret = spi_nor_octal_dtr_enable(nor, false);
2789 if (ret)
2790 dev_err(nor->dev, "suspend() failed\n");
2791
2792 return ret;
2793 }
2794
2795
2796 static void spi_nor_resume(struct mtd_info *mtd)
2797 {
2798 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2799 struct device *dev = nor->dev;
2800 int ret;
2801
2802
2803 ret = spi_nor_init(nor);
2804 if (ret)
2805 dev_err(dev, "resume() failed\n");
2806 }
2807
2808 static int spi_nor_get_device(struct mtd_info *mtd)
2809 {
2810 struct mtd_info *master = mtd_get_master(mtd);
2811 struct spi_nor *nor = mtd_to_spi_nor(master);
2812 struct device *dev;
2813
2814 if (nor->spimem)
2815 dev = nor->spimem->spi->controller->dev.parent;
2816 else
2817 dev = nor->dev;
2818
2819 if (!try_module_get(dev->driver->owner))
2820 return -ENODEV;
2821
2822 return 0;
2823 }
2824
2825 static void spi_nor_put_device(struct mtd_info *mtd)
2826 {
2827 struct mtd_info *master = mtd_get_master(mtd);
2828 struct spi_nor *nor = mtd_to_spi_nor(master);
2829 struct device *dev;
2830
2831 if (nor->spimem)
2832 dev = nor->spimem->spi->controller->dev.parent;
2833 else
2834 dev = nor->dev;
2835
2836 module_put(dev->driver->owner);
2837 }
2838
2839 void spi_nor_restore(struct spi_nor *nor)
2840 {
2841
2842 if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
2843 nor->flags & SNOR_F_BROKEN_RESET)
2844 nor->params->set_4byte_addr_mode(nor, false);
2845
2846 if (nor->flags & SNOR_F_SOFT_RESET)
2847 spi_nor_soft_reset(nor);
2848 }
2849 EXPORT_SYMBOL_GPL(spi_nor_restore);
2850
2851 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor,
2852 const char *name)
2853 {
2854 unsigned int i, j;
2855
2856 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2857 for (j = 0; j < manufacturers[i]->nparts; j++) {
2858 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
2859 nor->manufacturer = manufacturers[i];
2860 return &manufacturers[i]->parts[j];
2861 }
2862 }
2863 }
2864
2865 return NULL;
2866 }
2867
2868 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
2869 const char *name)
2870 {
2871 const struct flash_info *info = NULL;
2872
2873 if (name)
2874 info = spi_nor_match_name(nor, name);
2875
2876 if (!info)
2877 return spi_nor_detect(nor);
2878
2879
2880
2881
2882
2883 if (name && info->id_len) {
2884 const struct flash_info *jinfo;
2885
2886 jinfo = spi_nor_detect(nor);
2887 if (IS_ERR(jinfo)) {
2888 return jinfo;
2889 } else if (jinfo != info) {
2890
2891
2892
2893
2894
2895
2896
2897 dev_warn(nor->dev, "found %s, expected %s\n",
2898 jinfo->name, info->name);
2899 info = jinfo;
2900 }
2901 }
2902
2903 return info;
2904 }
2905
2906 static void spi_nor_set_mtd_info(struct spi_nor *nor)
2907 {
2908 struct mtd_info *mtd = &nor->mtd;
2909 struct device *dev = nor->dev;
2910
2911 spi_nor_set_mtd_locking_ops(nor);
2912 spi_nor_set_mtd_otp_ops(nor);
2913
2914 mtd->dev.parent = dev;
2915 if (!mtd->name)
2916 mtd->name = dev_name(dev);
2917 mtd->type = MTD_NORFLASH;
2918 mtd->flags = MTD_CAP_NORFLASH;
2919 if (nor->info->flags & SPI_NOR_NO_ERASE)
2920 mtd->flags |= MTD_NO_ERASE;
2921 else
2922 mtd->_erase = spi_nor_erase;
2923 mtd->writesize = nor->params->writesize;
2924 mtd->writebufsize = nor->params->page_size;
2925 mtd->size = nor->params->size;
2926 mtd->_read = spi_nor_read;
2927
2928 if (!mtd->_write)
2929 mtd->_write = spi_nor_write;
2930 mtd->_suspend = spi_nor_suspend;
2931 mtd->_resume = spi_nor_resume;
2932 mtd->_get_device = spi_nor_get_device;
2933 mtd->_put_device = spi_nor_put_device;
2934 }
2935
2936 int spi_nor_scan(struct spi_nor *nor, const char *name,
2937 const struct spi_nor_hwcaps *hwcaps)
2938 {
2939 const struct flash_info *info;
2940 struct device *dev = nor->dev;
2941 struct mtd_info *mtd = &nor->mtd;
2942 int ret;
2943 int i;
2944
2945 ret = spi_nor_check(nor);
2946 if (ret)
2947 return ret;
2948
2949
2950 nor->reg_proto = SNOR_PROTO_1_1_1;
2951 nor->read_proto = SNOR_PROTO_1_1_1;
2952 nor->write_proto = SNOR_PROTO_1_1_1;
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962 nor->bouncebuf_size = PAGE_SIZE;
2963 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
2964 GFP_KERNEL);
2965 if (!nor->bouncebuf)
2966 return -ENOMEM;
2967
2968 info = spi_nor_get_flash_info(nor, name);
2969 if (IS_ERR(info))
2970 return PTR_ERR(info);
2971
2972 nor->info = info;
2973
2974 mutex_init(&nor->lock);
2975
2976
2977 ret = spi_nor_init_params(nor);
2978 if (ret)
2979 return ret;
2980
2981
2982
2983
2984
2985
2986
2987
2988 ret = spi_nor_setup(nor, hwcaps);
2989 if (ret)
2990 return ret;
2991
2992
2993 ret = spi_nor_init(nor);
2994 if (ret)
2995 return ret;
2996
2997
2998 spi_nor_set_mtd_info(nor);
2999
3000 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3001 (long long)mtd->size >> 10);
3002
3003 dev_dbg(dev,
3004 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3005 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3006 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3007 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3008
3009 if (mtd->numeraseregions)
3010 for (i = 0; i < mtd->numeraseregions; i++)
3011 dev_dbg(dev,
3012 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3013 ".erasesize = 0x%.8x (%uKiB), "
3014 ".numblocks = %d }\n",
3015 i, (long long)mtd->eraseregions[i].offset,
3016 mtd->eraseregions[i].erasesize,
3017 mtd->eraseregions[i].erasesize / 1024,
3018 mtd->eraseregions[i].numblocks);
3019 return 0;
3020 }
3021 EXPORT_SYMBOL_GPL(spi_nor_scan);
3022
3023 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3024 {
3025 struct spi_mem_dirmap_info info = {
3026 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0),
3027 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3028 SPI_MEM_OP_DUMMY(nor->read_dummy, 0),
3029 SPI_MEM_OP_DATA_IN(0, NULL, 0)),
3030 .offset = 0,
3031 .length = nor->params->size,
3032 };
3033 struct spi_mem_op *op = &info.op_tmpl;
3034
3035 spi_nor_spimem_setup_op(nor, op, nor->read_proto);
3036
3037
3038 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3039 if (spi_nor_protocol_is_dtr(nor->read_proto))
3040 op->dummy.nbytes *= 2;
3041
3042
3043
3044
3045
3046
3047 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3048
3049 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3050 &info);
3051 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3052 }
3053
3054 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3055 {
3056 struct spi_mem_dirmap_info info = {
3057 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0),
3058 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0),
3059 SPI_MEM_OP_NO_DUMMY,
3060 SPI_MEM_OP_DATA_OUT(0, NULL, 0)),
3061 .offset = 0,
3062 .length = nor->params->size,
3063 };
3064 struct spi_mem_op *op = &info.op_tmpl;
3065
3066 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3067 op->addr.nbytes = 0;
3068
3069 spi_nor_spimem_setup_op(nor, op, nor->write_proto);
3070
3071
3072
3073
3074
3075
3076 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3077
3078 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3079 &info);
3080 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3081 }
3082
3083 static int spi_nor_probe(struct spi_mem *spimem)
3084 {
3085 struct spi_device *spi = spimem->spi;
3086 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3087 struct spi_nor *nor;
3088
3089
3090
3091
3092 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3093 char *flash_name;
3094 int ret;
3095
3096 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3097 if (!nor)
3098 return -ENOMEM;
3099
3100 nor->spimem = spimem;
3101 nor->dev = &spi->dev;
3102 spi_nor_set_flash_node(nor, spi->dev.of_node);
3103
3104 spi_mem_set_drvdata(spimem, nor);
3105
3106 if (data && data->name)
3107 nor->mtd.name = data->name;
3108
3109 if (!nor->mtd.name)
3110 nor->mtd.name = spi_mem_get_name(spimem);
3111
3112
3113
3114
3115
3116
3117
3118 if (data && data->type)
3119 flash_name = data->type;
3120 else if (!strcmp(spi->modalias, "spi-nor"))
3121 flash_name = NULL;
3122 else
3123 flash_name = spi->modalias;
3124
3125 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3126 if (ret)
3127 return ret;
3128
3129 spi_nor_debugfs_register(nor);
3130
3131
3132
3133
3134
3135
3136 if (nor->params->page_size > PAGE_SIZE) {
3137 nor->bouncebuf_size = nor->params->page_size;
3138 devm_kfree(nor->dev, nor->bouncebuf);
3139 nor->bouncebuf = devm_kmalloc(nor->dev,
3140 nor->bouncebuf_size,
3141 GFP_KERNEL);
3142 if (!nor->bouncebuf)
3143 return -ENOMEM;
3144 }
3145
3146 ret = spi_nor_create_read_dirmap(nor);
3147 if (ret)
3148 return ret;
3149
3150 ret = spi_nor_create_write_dirmap(nor);
3151 if (ret)
3152 return ret;
3153
3154 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3155 data ? data->nr_parts : 0);
3156 }
3157
3158 static int spi_nor_remove(struct spi_mem *spimem)
3159 {
3160 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3161
3162 spi_nor_restore(nor);
3163
3164
3165 return mtd_device_unregister(&nor->mtd);
3166 }
3167
3168 static void spi_nor_shutdown(struct spi_mem *spimem)
3169 {
3170 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3171
3172 spi_nor_restore(nor);
3173 }
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187 static const struct spi_device_id spi_nor_dev_ids[] = {
3188
3189
3190
3191
3192
3193 {"spi-nor"},
3194
3195
3196
3197
3198
3199 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3200
3201
3202
3203
3204
3205 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3206 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3207 {"mx25l25635e"},{"mx66l51235l"},
3208 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3209 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3210 {"s25fl064k"},
3211 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3212 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3213 {"m25p64"}, {"m25p128"},
3214 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3215 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3216
3217
3218 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3219 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3220 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3221
3222
3223 { "mr25h128" },
3224 { "mr25h256" },
3225 { "mr25h10" },
3226 { "mr25h40" },
3227
3228 { },
3229 };
3230 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3231
3232 static const struct of_device_id spi_nor_of_table[] = {
3233
3234
3235
3236
3237 { .compatible = "jedec,spi-nor" },
3238 { },
3239 };
3240 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3241
3242
3243
3244
3245
3246
3247 static struct spi_mem_driver spi_nor_driver = {
3248 .spidrv = {
3249 .driver = {
3250 .name = "spi-nor",
3251 .of_match_table = spi_nor_of_table,
3252 .dev_groups = spi_nor_sysfs_groups,
3253 },
3254 .id_table = spi_nor_dev_ids,
3255 },
3256 .probe = spi_nor_probe,
3257 .remove = spi_nor_remove,
3258 .shutdown = spi_nor_shutdown,
3259 };
3260 module_spi_mem_driver(spi_nor_driver);
3261
3262 MODULE_LICENSE("GPL v2");
3263 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3264 MODULE_AUTHOR("Mike Lavender");
3265 MODULE_DESCRIPTION("framework for SPI NOR");