0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0027
0028 #include <linux/module.h>
0029 #include <linux/delay.h>
0030 #include <linux/errno.h>
0031 #include <linux/err.h>
0032 #include <linux/sched.h>
0033 #include <linux/slab.h>
0034 #include <linux/mm.h>
0035 #include <linux/types.h>
0036 #include <linux/mtd/mtd.h>
0037 #include <linux/mtd/nand.h>
0038 #include <linux/mtd/nand-ecc-sw-hamming.h>
0039 #include <linux/mtd/nand-ecc-sw-bch.h>
0040 #include <linux/interrupt.h>
0041 #include <linux/bitops.h>
0042 #include <linux/io.h>
0043 #include <linux/mtd/partitions.h>
0044 #include <linux/of.h>
0045 #include <linux/of_gpio.h>
0046 #include <linux/gpio/consumer.h>
0047
0048 #include "internals.h"
0049
0050 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
0051 struct mtd_pairing_info *info)
0052 {
0053 int lastpage = (mtd->erasesize / mtd->writesize) - 1;
0054 int dist = 3;
0055
0056 if (page == lastpage)
0057 dist = 2;
0058
0059 if (!page || (page & 1)) {
0060 info->group = 0;
0061 info->pair = (page + 1) / 2;
0062 } else {
0063 info->group = 1;
0064 info->pair = (page + 1 - dist) / 2;
0065 }
0066
0067 return 0;
0068 }
0069
0070 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
0071 const struct mtd_pairing_info *info)
0072 {
0073 int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
0074 int page = info->pair * 2;
0075 int dist = 3;
0076
0077 if (!info->group && !info->pair)
0078 return 0;
0079
0080 if (info->pair == lastpair && info->group)
0081 dist = 2;
0082
0083 if (!info->group)
0084 page--;
0085 else if (info->pair)
0086 page += dist - 1;
0087
0088 if (page >= mtd->erasesize / mtd->writesize)
0089 return -EINVAL;
0090
0091 return page;
0092 }
0093
0094 const struct mtd_pairing_scheme dist3_pairing_scheme = {
0095 .ngroups = 2,
0096 .get_info = nand_pairing_dist3_get_info,
0097 .get_wunit = nand_pairing_dist3_get_wunit,
0098 };
0099
0100 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
0101 {
0102 int ret = 0;
0103
0104
0105 if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
0106 pr_debug("%s: unaligned address\n", __func__);
0107 ret = -EINVAL;
0108 }
0109
0110
0111 if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
0112 pr_debug("%s: length not block aligned\n", __func__);
0113 ret = -EINVAL;
0114 }
0115
0116 return ret;
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
0130 unsigned int src_off, unsigned int nbits)
0131 {
0132 unsigned int tmp, n;
0133
0134 dst += dst_off / 8;
0135 dst_off %= 8;
0136 src += src_off / 8;
0137 src_off %= 8;
0138
0139 while (nbits) {
0140 n = min3(8 - dst_off, 8 - src_off, nbits);
0141
0142 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
0143 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
0144 *dst |= tmp << dst_off;
0145
0146 dst_off += n;
0147 if (dst_off >= 8) {
0148 dst++;
0149 dst_off -= 8;
0150 }
0151
0152 src_off += n;
0153 if (src_off >= 8) {
0154 src++;
0155 src_off -= 8;
0156 }
0157
0158 nbits -= n;
0159 }
0160 }
0161 EXPORT_SYMBOL_GPL(nand_extract_bits);
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172 void nand_select_target(struct nand_chip *chip, unsigned int cs)
0173 {
0174
0175
0176
0177
0178 if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
0179 return;
0180
0181 chip->cur_cs = cs;
0182
0183 if (chip->legacy.select_chip)
0184 chip->legacy.select_chip(chip, cs);
0185 }
0186 EXPORT_SYMBOL_GPL(nand_select_target);
0187
0188
0189
0190
0191
0192
0193
0194
0195 void nand_deselect_target(struct nand_chip *chip)
0196 {
0197 if (chip->legacy.select_chip)
0198 chip->legacy.select_chip(chip, -1);
0199
0200 chip->cur_cs = -1;
0201 }
0202 EXPORT_SYMBOL_GPL(nand_deselect_target);
0203
0204
0205
0206
0207
0208
0209
0210 static void nand_release_device(struct nand_chip *chip)
0211 {
0212
0213 mutex_unlock(&chip->controller->lock);
0214 mutex_unlock(&chip->lock);
0215 }
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
0227 {
0228 struct mtd_info *mtd = nand_to_mtd(chip);
0229 int last_page = ((mtd->erasesize - mtd->writesize) >>
0230 chip->page_shift) & chip->pagemask;
0231 unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
0232 | NAND_BBM_LASTPAGE;
0233
0234 if (page == 0 && !(chip->options & bbm_flags))
0235 return 0;
0236 if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
0237 return 0;
0238 if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
0239 return 1;
0240 if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
0241 return last_page;
0242
0243 return -EINVAL;
0244 }
0245
0246
0247
0248
0249
0250
0251
0252
0253 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
0254 {
0255 int first_page, page_offset;
0256 int res;
0257 u8 bad;
0258
0259 first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
0260 page_offset = nand_bbm_get_next_page(chip, 0);
0261
0262 while (page_offset >= 0) {
0263 res = chip->ecc.read_oob(chip, first_page + page_offset);
0264 if (res < 0)
0265 return res;
0266
0267 bad = chip->oob_poi[chip->badblockpos];
0268
0269 if (likely(chip->badblockbits == 8))
0270 res = bad != 0xFF;
0271 else
0272 res = hweight8(bad) < chip->badblockbits;
0273 if (res)
0274 return res;
0275
0276 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
0277 }
0278
0279 return 0;
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
0293 {
0294 int i;
0295
0296
0297 for (i = 0; i < chip->nr_secure_regions; i++) {
0298 const struct nand_secure_region *region = &chip->secure_regions[i];
0299
0300 if (offset + size <= region->offset ||
0301 offset >= region->offset + region->size)
0302 continue;
0303
0304 pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
0305 __func__, offset, offset + size);
0306
0307 return true;
0308 }
0309
0310 return false;
0311 }
0312
0313 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
0314 {
0315 struct mtd_info *mtd = nand_to_mtd(chip);
0316
0317 if (chip->options & NAND_NO_BBM_QUIRK)
0318 return 0;
0319
0320
0321 if (nand_region_is_secured(chip, ofs, mtd->erasesize))
0322 return -EIO;
0323
0324 if (mtd_check_expert_analysis_mode())
0325 return 0;
0326
0327 if (chip->legacy.block_bad)
0328 return chip->legacy.block_bad(chip, ofs);
0329
0330 return nand_block_bad(chip, ofs);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 static void nand_get_device(struct nand_chip *chip)
0342 {
0343
0344 while (1) {
0345 mutex_lock(&chip->lock);
0346 if (!chip->suspended) {
0347 mutex_lock(&chip->controller->lock);
0348 return;
0349 }
0350 mutex_unlock(&chip->lock);
0351
0352 wait_event(chip->resume_wq, !chip->suspended);
0353 }
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363 static int nand_check_wp(struct nand_chip *chip)
0364 {
0365 u8 status;
0366 int ret;
0367
0368
0369 if (chip->options & NAND_BROKEN_XD)
0370 return 0;
0371
0372
0373 ret = nand_status_op(chip, &status);
0374 if (ret)
0375 return ret;
0376
0377 return status & NAND_STATUS_WP ? 0 : 1;
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
0388 struct mtd_oob_ops *ops)
0389 {
0390 struct mtd_info *mtd = nand_to_mtd(chip);
0391 int ret;
0392
0393
0394
0395
0396
0397 memset(chip->oob_poi, 0xff, mtd->oobsize);
0398
0399 switch (ops->mode) {
0400
0401 case MTD_OPS_PLACE_OOB:
0402 case MTD_OPS_RAW:
0403 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
0404 return oob + len;
0405
0406 case MTD_OPS_AUTO_OOB:
0407 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
0408 ops->ooboffs, len);
0409 BUG_ON(ret);
0410 return oob + len;
0411
0412 default:
0413 BUG();
0414 }
0415 return NULL;
0416 }
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
0427 struct mtd_oob_ops *ops)
0428 {
0429 struct mtd_info *mtd = nand_to_mtd(chip);
0430 int chipnr, page, status, len, ret;
0431
0432 pr_debug("%s: to = 0x%08x, len = %i\n",
0433 __func__, (unsigned int)to, (int)ops->ooblen);
0434
0435 len = mtd_oobavail(mtd, ops);
0436
0437
0438 if ((ops->ooboffs + ops->ooblen) > len) {
0439 pr_debug("%s: attempt to write past end of page\n",
0440 __func__);
0441 return -EINVAL;
0442 }
0443
0444
0445 if (nand_region_is_secured(chip, to, ops->ooblen))
0446 return -EIO;
0447
0448 chipnr = (int)(to >> chip->chip_shift);
0449
0450
0451
0452
0453
0454
0455
0456 ret = nand_reset(chip, chipnr);
0457 if (ret)
0458 return ret;
0459
0460 nand_select_target(chip, chipnr);
0461
0462
0463 page = (int)(to >> chip->page_shift);
0464
0465
0466 if (nand_check_wp(chip)) {
0467 nand_deselect_target(chip);
0468 return -EROFS;
0469 }
0470
0471
0472 if (page == chip->pagecache.page)
0473 chip->pagecache.page = -1;
0474
0475 nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
0476
0477 if (ops->mode == MTD_OPS_RAW)
0478 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
0479 else
0480 status = chip->ecc.write_oob(chip, page & chip->pagemask);
0481
0482 nand_deselect_target(chip);
0483
0484 if (status)
0485 return status;
0486
0487 ops->oobretlen = ops->ooblen;
0488
0489 return 0;
0490 }
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
0502 {
0503 struct mtd_info *mtd = nand_to_mtd(chip);
0504 struct mtd_oob_ops ops;
0505 uint8_t buf[2] = { 0, 0 };
0506 int ret = 0, res, page_offset;
0507
0508 memset(&ops, 0, sizeof(ops));
0509 ops.oobbuf = buf;
0510 ops.ooboffs = chip->badblockpos;
0511 if (chip->options & NAND_BUSWIDTH_16) {
0512 ops.ooboffs &= ~0x01;
0513 ops.len = ops.ooblen = 2;
0514 } else {
0515 ops.len = ops.ooblen = 1;
0516 }
0517 ops.mode = MTD_OPS_PLACE_OOB;
0518
0519 page_offset = nand_bbm_get_next_page(chip, 0);
0520
0521 while (page_offset >= 0) {
0522 res = nand_do_write_oob(chip,
0523 ofs + (page_offset * mtd->writesize),
0524 &ops);
0525
0526 if (!ret)
0527 ret = res;
0528
0529 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
0530 }
0531
0532 return ret;
0533 }
0534
0535
0536
0537
0538
0539
0540 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
0541 {
0542 if (chip->legacy.block_markbad)
0543 return chip->legacy.block_markbad(chip, ofs);
0544
0545 return nand_default_block_markbad(chip, ofs);
0546 }
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
0568 {
0569 struct mtd_info *mtd = nand_to_mtd(chip);
0570 int res, ret = 0;
0571
0572 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
0573 struct erase_info einfo;
0574
0575
0576 memset(&einfo, 0, sizeof(einfo));
0577 einfo.addr = ofs;
0578 einfo.len = 1ULL << chip->phys_erase_shift;
0579 nand_erase_nand(chip, &einfo, 0);
0580
0581
0582 nand_get_device(chip);
0583
0584 ret = nand_markbad_bbm(chip, ofs);
0585 nand_release_device(chip);
0586 }
0587
0588
0589 if (chip->bbt) {
0590 res = nand_markbad_bbt(chip, ofs);
0591 if (!ret)
0592 ret = res;
0593 }
0594
0595 if (!ret)
0596 mtd->ecc_stats.badblocks++;
0597
0598 return ret;
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
0609 {
0610 struct nand_chip *chip = mtd_to_nand(mtd);
0611
0612 if (!chip->bbt)
0613 return 0;
0614
0615 return nand_isreserved_bbt(chip, ofs);
0616 }
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
0628 {
0629
0630 if (chip->bbt)
0631 return nand_isbad_bbt(chip, ofs, allowbbt);
0632
0633 return nand_isbad_bbm(chip, ofs);
0634 }
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
0654 {
0655 const struct nand_interface_config *conf;
0656 u8 status = 0;
0657 int ret;
0658
0659 if (!nand_has_exec_op(chip))
0660 return -ENOTSUPP;
0661
0662
0663 conf = nand_get_interface_config(chip);
0664 ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
0665
0666 ret = nand_status_op(chip, NULL);
0667 if (ret)
0668 return ret;
0669
0670
0671
0672
0673
0674
0675 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
0676 do {
0677 ret = nand_read_data_op(chip, &status, sizeof(status), true,
0678 false);
0679 if (ret)
0680 break;
0681
0682 if (status & NAND_STATUS_READY)
0683 break;
0684
0685
0686
0687
0688
0689
0690 udelay(10);
0691 } while (time_before(jiffies, timeout_ms));
0692
0693
0694
0695
0696
0697
0698 nand_exit_status_op(chip);
0699
0700 if (ret)
0701 return ret;
0702
0703 return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
0704 };
0705 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
0722 unsigned long timeout_ms)
0723 {
0724
0725
0726
0727
0728
0729
0730
0731 timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
0732 do {
0733 if (gpiod_get_value_cansleep(gpiod))
0734 return 0;
0735
0736 cond_resched();
0737 } while (time_before(jiffies, timeout_ms));
0738
0739 return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
0740 };
0741 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
0753 {
0754 int i;
0755 for (i = 0; i < timeo; i++) {
0756 if (chip->legacy.dev_ready) {
0757 if (chip->legacy.dev_ready(chip))
0758 break;
0759 } else {
0760 int ret;
0761 u8 status;
0762
0763 ret = nand_read_data_op(chip, &status, sizeof(status),
0764 true, false);
0765 if (ret)
0766 return;
0767
0768 if (status & NAND_STATUS_READY)
0769 break;
0770 }
0771 mdelay(1);
0772 }
0773 }
0774
0775 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
0776 {
0777 return (chip->parameters.supports_set_get_features &&
0778 test_bit(addr, chip->parameters.get_feature_list));
0779 }
0780
0781 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
0782 {
0783 return (chip->parameters.supports_set_get_features &&
0784 test_bit(addr, chip->parameters.set_feature_list));
0785 }
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
0797 {
0798 const struct nand_controller_ops *ops = chip->controller->ops;
0799 int ret;
0800
0801 if (!nand_controller_can_setup_interface(chip))
0802 return 0;
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818 chip->current_interface_config = nand_get_reset_interface_config();
0819 ret = ops->setup_interface(chip, chipnr,
0820 chip->current_interface_config);
0821 if (ret)
0822 pr_err("Failed to configure data interface to SDR timing mode 0\n");
0823
0824 return ret;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
0838 {
0839 const struct nand_controller_ops *ops = chip->controller->ops;
0840 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
0841 int ret;
0842
0843 if (!nand_controller_can_setup_interface(chip))
0844 return 0;
0845
0846
0847
0848
0849
0850
0851
0852
0853 if (!chip->best_interface_config)
0854 return 0;
0855
0856 request = chip->best_interface_config->timings.mode;
0857 if (nand_interface_is_sdr(chip->best_interface_config))
0858 request |= ONFI_DATA_INTERFACE_SDR;
0859 else
0860 request |= ONFI_DATA_INTERFACE_NVDDR;
0861 tmode_param[0] = request;
0862
0863
0864 if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
0865 nand_select_target(chip, chipnr);
0866 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
0867 tmode_param);
0868 nand_deselect_target(chip);
0869 if (ret)
0870 return ret;
0871 }
0872
0873
0874 ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
0875 if (ret)
0876 return ret;
0877
0878
0879 if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
0880 goto update_interface_config;
0881
0882 memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
0883 nand_select_target(chip, chipnr);
0884 ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
0885 tmode_param);
0886 nand_deselect_target(chip);
0887 if (ret)
0888 goto err_reset_chip;
0889
0890 if (request != tmode_param[0]) {
0891 pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
0892 nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
0893 chip->best_interface_config->timings.mode);
0894 pr_debug("NAND chip would work in %s timing mode %d\n",
0895 tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
0896 (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
0897 goto err_reset_chip;
0898 }
0899
0900 update_interface_config:
0901 chip->current_interface_config = chip->best_interface_config;
0902
0903 return 0;
0904
0905 err_reset_chip:
0906
0907
0908
0909
0910 nand_reset_interface(chip, chipnr);
0911 nand_select_target(chip, chipnr);
0912 nand_reset_op(chip);
0913 nand_deselect_target(chip);
0914
0915 return ret;
0916 }
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928 int nand_choose_best_sdr_timings(struct nand_chip *chip,
0929 struct nand_interface_config *iface,
0930 struct nand_sdr_timings *spec_timings)
0931 {
0932 const struct nand_controller_ops *ops = chip->controller->ops;
0933 int best_mode = 0, mode, ret = -EOPNOTSUPP;
0934
0935 iface->type = NAND_SDR_IFACE;
0936
0937 if (spec_timings) {
0938 iface->timings.sdr = *spec_timings;
0939 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
0940
0941
0942 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
0943 iface);
0944 if (!ret) {
0945 chip->best_interface_config = iface;
0946 return ret;
0947 }
0948
0949
0950 best_mode = iface->timings.mode;
0951 } else if (chip->parameters.onfi) {
0952 best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
0953 }
0954
0955 for (mode = best_mode; mode >= 0; mode--) {
0956 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
0957
0958 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
0959 iface);
0960 if (!ret) {
0961 chip->best_interface_config = iface;
0962 break;
0963 }
0964 }
0965
0966 return ret;
0967 }
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979 int nand_choose_best_nvddr_timings(struct nand_chip *chip,
0980 struct nand_interface_config *iface,
0981 struct nand_nvddr_timings *spec_timings)
0982 {
0983 const struct nand_controller_ops *ops = chip->controller->ops;
0984 int best_mode = 0, mode, ret = -EOPNOTSUPP;
0985
0986 iface->type = NAND_NVDDR_IFACE;
0987
0988 if (spec_timings) {
0989 iface->timings.nvddr = *spec_timings;
0990 iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
0991
0992
0993 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
0994 iface);
0995 if (!ret) {
0996 chip->best_interface_config = iface;
0997 return ret;
0998 }
0999
1000
1001 best_mode = iface->timings.mode;
1002 } else if (chip->parameters.onfi) {
1003 best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
1004 }
1005
1006 for (mode = best_mode; mode >= 0; mode--) {
1007 onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
1008
1009 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
1010 iface);
1011 if (!ret) {
1012 chip->best_interface_config = iface;
1013 break;
1014 }
1015 }
1016
1017 return ret;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 static int nand_choose_best_timings(struct nand_chip *chip,
1030 struct nand_interface_config *iface)
1031 {
1032 int ret;
1033
1034
1035 ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
1036 if (!ret)
1037 return 0;
1038
1039
1040 return nand_choose_best_sdr_timings(chip, iface, NULL);
1041 }
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 static int nand_choose_interface_config(struct nand_chip *chip)
1057 {
1058 struct nand_interface_config *iface;
1059 int ret;
1060
1061 if (!nand_controller_can_setup_interface(chip))
1062 return 0;
1063
1064 iface = kzalloc(sizeof(*iface), GFP_KERNEL);
1065 if (!iface)
1066 return -ENOMEM;
1067
1068 if (chip->ops.choose_interface_config)
1069 ret = chip->ops.choose_interface_config(chip, iface);
1070 else
1071 ret = nand_choose_best_timings(chip, iface);
1072
1073 if (ret)
1074 kfree(iface);
1075
1076 return ret;
1077 }
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
1092 unsigned int offset_in_page)
1093 {
1094 struct mtd_info *mtd = nand_to_mtd(chip);
1095
1096
1097 if (offset_in_page > mtd->writesize + mtd->oobsize)
1098 return -EINVAL;
1099
1100
1101
1102
1103
1104
1105 if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
1106 offset_in_page -= mtd->writesize;
1107
1108
1109
1110
1111
1112 if (chip->options & NAND_BUSWIDTH_16) {
1113 if (WARN_ON(offset_in_page % 2))
1114 return -EINVAL;
1115
1116 offset_in_page /= 2;
1117 }
1118
1119 addrs[0] = offset_in_page;
1120
1121
1122
1123
1124
1125 if (mtd->writesize <= 512)
1126 return 1;
1127
1128 addrs[1] = offset_in_page >> 8;
1129
1130 return 2;
1131 }
1132
1133 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1134 unsigned int offset_in_page, void *buf,
1135 unsigned int len)
1136 {
1137 const struct nand_interface_config *conf =
1138 nand_get_interface_config(chip);
1139 struct mtd_info *mtd = nand_to_mtd(chip);
1140 u8 addrs[4];
1141 struct nand_op_instr instrs[] = {
1142 NAND_OP_CMD(NAND_CMD_READ0, 0),
1143 NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1144 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1145 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1146 NAND_OP_DATA_IN(len, buf, 0),
1147 };
1148 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1149 int ret;
1150
1151
1152 if (!len)
1153 op.ninstrs--;
1154
1155 if (offset_in_page >= mtd->writesize)
1156 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1157 else if (offset_in_page >= 256 &&
1158 !(chip->options & NAND_BUSWIDTH_16))
1159 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1160
1161 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1162 if (ret < 0)
1163 return ret;
1164
1165 addrs[1] = page;
1166 addrs[2] = page >> 8;
1167
1168 if (chip->options & NAND_ROW_ADDR_3) {
1169 addrs[3] = page >> 16;
1170 instrs[1].ctx.addr.naddrs++;
1171 }
1172
1173 return nand_exec_op(chip, &op);
1174 }
1175
1176 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1177 unsigned int offset_in_page, void *buf,
1178 unsigned int len)
1179 {
1180 const struct nand_interface_config *conf =
1181 nand_get_interface_config(chip);
1182 u8 addrs[5];
1183 struct nand_op_instr instrs[] = {
1184 NAND_OP_CMD(NAND_CMD_READ0, 0),
1185 NAND_OP_ADDR(4, addrs, 0),
1186 NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
1187 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1188 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1189 NAND_OP_DATA_IN(len, buf, 0),
1190 };
1191 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1192 int ret;
1193
1194
1195 if (!len)
1196 op.ninstrs--;
1197
1198 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1199 if (ret < 0)
1200 return ret;
1201
1202 addrs[2] = page;
1203 addrs[3] = page >> 8;
1204
1205 if (chip->options & NAND_ROW_ADDR_3) {
1206 addrs[4] = page >> 16;
1207 instrs[1].ctx.addr.naddrs++;
1208 }
1209
1210 return nand_exec_op(chip, &op);
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1227 unsigned int offset_in_page, void *buf, unsigned int len)
1228 {
1229 struct mtd_info *mtd = nand_to_mtd(chip);
1230
1231 if (len && !buf)
1232 return -EINVAL;
1233
1234 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1235 return -EINVAL;
1236
1237 if (nand_has_exec_op(chip)) {
1238 if (mtd->writesize > 512)
1239 return nand_lp_exec_read_page_op(chip, page,
1240 offset_in_page, buf,
1241 len);
1242
1243 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1244 buf, len);
1245 }
1246
1247 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1248 if (len)
1249 chip->legacy.read_buf(chip, buf, len);
1250
1251 return 0;
1252 }
1253 EXPORT_SYMBOL_GPL(nand_read_page_op);
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1268 unsigned int len)
1269 {
1270 unsigned int i;
1271 u8 *p = buf;
1272
1273 if (len && !buf)
1274 return -EINVAL;
1275
1276 if (nand_has_exec_op(chip)) {
1277 const struct nand_interface_config *conf =
1278 nand_get_interface_config(chip);
1279 struct nand_op_instr instrs[] = {
1280 NAND_OP_CMD(NAND_CMD_PARAM, 0),
1281 NAND_OP_ADDR(1, &page,
1282 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1283 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
1284 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1285 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1286 };
1287 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1288
1289
1290 if (!len)
1291 op.ninstrs--;
1292
1293 return nand_exec_op(chip, &op);
1294 }
1295
1296 chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1297 for (i = 0; i < len; i++)
1298 p[i] = chip->legacy.read_byte(chip);
1299
1300 return 0;
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316 int nand_change_read_column_op(struct nand_chip *chip,
1317 unsigned int offset_in_page, void *buf,
1318 unsigned int len, bool force_8bit)
1319 {
1320 struct mtd_info *mtd = nand_to_mtd(chip);
1321
1322 if (len && !buf)
1323 return -EINVAL;
1324
1325 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1326 return -EINVAL;
1327
1328
1329 if (mtd->writesize <= 512)
1330 return -ENOTSUPP;
1331
1332 if (nand_has_exec_op(chip)) {
1333 const struct nand_interface_config *conf =
1334 nand_get_interface_config(chip);
1335 u8 addrs[2] = {};
1336 struct nand_op_instr instrs[] = {
1337 NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1338 NAND_OP_ADDR(2, addrs, 0),
1339 NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1340 NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1341 NAND_OP_DATA_IN(len, buf, 0),
1342 };
1343 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1344 int ret;
1345
1346 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1347 if (ret < 0)
1348 return ret;
1349
1350
1351 if (!len)
1352 op.ninstrs--;
1353
1354 instrs[3].ctx.data.force_8bit = force_8bit;
1355
1356 return nand_exec_op(chip, &op);
1357 }
1358
1359 chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1360 if (len)
1361 chip->legacy.read_buf(chip, buf, len);
1362
1363 return 0;
1364 }
1365 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1381 unsigned int offset_in_oob, void *buf, unsigned int len)
1382 {
1383 struct mtd_info *mtd = nand_to_mtd(chip);
1384
1385 if (len && !buf)
1386 return -EINVAL;
1387
1388 if (offset_in_oob + len > mtd->oobsize)
1389 return -EINVAL;
1390
1391 if (nand_has_exec_op(chip))
1392 return nand_read_page_op(chip, page,
1393 mtd->writesize + offset_in_oob,
1394 buf, len);
1395
1396 chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1397 if (len)
1398 chip->legacy.read_buf(chip, buf, len);
1399
1400 return 0;
1401 }
1402 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1403
1404 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1405 unsigned int offset_in_page, const void *buf,
1406 unsigned int len, bool prog)
1407 {
1408 const struct nand_interface_config *conf =
1409 nand_get_interface_config(chip);
1410 struct mtd_info *mtd = nand_to_mtd(chip);
1411 u8 addrs[5] = {};
1412 struct nand_op_instr instrs[] = {
1413
1414
1415
1416
1417
1418 NAND_OP_CMD(NAND_CMD_READ0, 0),
1419 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1420 NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
1421 NAND_OP_DATA_OUT(len, buf, 0),
1422 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1423 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1424 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
1425 };
1426 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1427 int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1428
1429 if (naddrs < 0)
1430 return naddrs;
1431
1432 addrs[naddrs++] = page;
1433 addrs[naddrs++] = page >> 8;
1434 if (chip->options & NAND_ROW_ADDR_3)
1435 addrs[naddrs++] = page >> 16;
1436
1437 instrs[2].ctx.addr.naddrs = naddrs;
1438
1439
1440 if (!prog) {
1441 op.ninstrs -= 2;
1442
1443 if (!len)
1444 op.ninstrs--;
1445 }
1446
1447 if (mtd->writesize <= 512) {
1448
1449
1450
1451
1452
1453 if (offset_in_page >= mtd->writesize)
1454 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1455 else if (offset_in_page >= 256 &&
1456 !(chip->options & NAND_BUSWIDTH_16))
1457 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1458 } else {
1459
1460
1461
1462
1463 op.instrs++;
1464 op.ninstrs--;
1465 }
1466
1467 return nand_exec_op(chip, &op);
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1484 unsigned int offset_in_page, const void *buf,
1485 unsigned int len)
1486 {
1487 struct mtd_info *mtd = nand_to_mtd(chip);
1488
1489 if (len && !buf)
1490 return -EINVAL;
1491
1492 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1493 return -EINVAL;
1494
1495 if (nand_has_exec_op(chip))
1496 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1497 len, false);
1498
1499 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1500
1501 if (buf)
1502 chip->legacy.write_buf(chip, buf, len);
1503
1504 return 0;
1505 }
1506 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 int nand_prog_page_end_op(struct nand_chip *chip)
1518 {
1519 int ret;
1520 u8 status;
1521
1522 if (nand_has_exec_op(chip)) {
1523 const struct nand_interface_config *conf =
1524 nand_get_interface_config(chip);
1525 struct nand_op_instr instrs[] = {
1526 NAND_OP_CMD(NAND_CMD_PAGEPROG,
1527 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1528 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
1529 0),
1530 };
1531 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1532
1533 ret = nand_exec_op(chip, &op);
1534 if (ret)
1535 return ret;
1536
1537 ret = nand_status_op(chip, &status);
1538 if (ret)
1539 return ret;
1540 } else {
1541 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1542 ret = chip->legacy.waitfunc(chip);
1543 if (ret < 0)
1544 return ret;
1545
1546 status = ret;
1547 }
1548
1549 if (status & NAND_STATUS_FAIL)
1550 return -EIO;
1551
1552 return 0;
1553 }
1554 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1570 unsigned int offset_in_page, const void *buf,
1571 unsigned int len)
1572 {
1573 struct mtd_info *mtd = nand_to_mtd(chip);
1574 u8 status;
1575 int ret;
1576
1577 if (!len || !buf)
1578 return -EINVAL;
1579
1580 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1581 return -EINVAL;
1582
1583 if (nand_has_exec_op(chip)) {
1584 ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1585 len, true);
1586 if (ret)
1587 return ret;
1588
1589 ret = nand_status_op(chip, &status);
1590 if (ret)
1591 return ret;
1592 } else {
1593 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1594 page);
1595 chip->legacy.write_buf(chip, buf, len);
1596 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1597 ret = chip->legacy.waitfunc(chip);
1598 if (ret < 0)
1599 return ret;
1600
1601 status = ret;
1602 }
1603
1604 if (status & NAND_STATUS_FAIL)
1605 return -EIO;
1606
1607 return 0;
1608 }
1609 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624 int nand_change_write_column_op(struct nand_chip *chip,
1625 unsigned int offset_in_page,
1626 const void *buf, unsigned int len,
1627 bool force_8bit)
1628 {
1629 struct mtd_info *mtd = nand_to_mtd(chip);
1630
1631 if (len && !buf)
1632 return -EINVAL;
1633
1634 if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1635 return -EINVAL;
1636
1637
1638 if (mtd->writesize <= 512)
1639 return -ENOTSUPP;
1640
1641 if (nand_has_exec_op(chip)) {
1642 const struct nand_interface_config *conf =
1643 nand_get_interface_config(chip);
1644 u8 addrs[2];
1645 struct nand_op_instr instrs[] = {
1646 NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1647 NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
1648 NAND_OP_DATA_OUT(len, buf, 0),
1649 };
1650 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1651 int ret;
1652
1653 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1654 if (ret < 0)
1655 return ret;
1656
1657 instrs[2].ctx.data.force_8bit = force_8bit;
1658
1659
1660 if (!len)
1661 op.ninstrs--;
1662
1663 return nand_exec_op(chip, &op);
1664 }
1665
1666 chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1667 if (len)
1668 chip->legacy.write_buf(chip, buf, len);
1669
1670 return 0;
1671 }
1672 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1688 unsigned int len)
1689 {
1690 unsigned int i;
1691 u8 *id = buf, *ddrbuf = NULL;
1692
1693 if (len && !buf)
1694 return -EINVAL;
1695
1696 if (nand_has_exec_op(chip)) {
1697 const struct nand_interface_config *conf =
1698 nand_get_interface_config(chip);
1699 struct nand_op_instr instrs[] = {
1700 NAND_OP_CMD(NAND_CMD_READID, 0),
1701 NAND_OP_ADDR(1, &addr,
1702 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1703 NAND_OP_8BIT_DATA_IN(len, buf, 0),
1704 };
1705 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1706 int ret;
1707
1708
1709 if (len && nand_interface_is_nvddr(conf)) {
1710 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
1711 if (!ddrbuf)
1712 return -ENOMEM;
1713
1714 instrs[2].ctx.data.len *= 2;
1715 instrs[2].ctx.data.buf.in = ddrbuf;
1716 }
1717
1718
1719 if (!len)
1720 op.ninstrs--;
1721
1722 ret = nand_exec_op(chip, &op);
1723 if (!ret && len && nand_interface_is_nvddr(conf)) {
1724 for (i = 0; i < len; i++)
1725 id[i] = ddrbuf[i * 2];
1726 }
1727
1728 kfree(ddrbuf);
1729
1730 return ret;
1731 }
1732
1733 chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1734
1735 for (i = 0; i < len; i++)
1736 id[i] = chip->legacy.read_byte(chip);
1737
1738 return 0;
1739 }
1740 EXPORT_SYMBOL_GPL(nand_readid_op);
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 int nand_status_op(struct nand_chip *chip, u8 *status)
1754 {
1755 if (nand_has_exec_op(chip)) {
1756 const struct nand_interface_config *conf =
1757 nand_get_interface_config(chip);
1758 u8 ddrstatus[2];
1759 struct nand_op_instr instrs[] = {
1760 NAND_OP_CMD(NAND_CMD_STATUS,
1761 NAND_COMMON_TIMING_NS(conf, tADL_min)),
1762 NAND_OP_8BIT_DATA_IN(1, status, 0),
1763 };
1764 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1765 int ret;
1766
1767
1768 if (status && nand_interface_is_nvddr(conf)) {
1769 instrs[1].ctx.data.len *= 2;
1770 instrs[1].ctx.data.buf.in = ddrstatus;
1771 }
1772
1773 if (!status)
1774 op.ninstrs--;
1775
1776 ret = nand_exec_op(chip, &op);
1777 if (!ret && status && nand_interface_is_nvddr(conf))
1778 *status = ddrstatus[0];
1779
1780 return ret;
1781 }
1782
1783 chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1784 if (status)
1785 *status = chip->legacy.read_byte(chip);
1786
1787 return 0;
1788 }
1789 EXPORT_SYMBOL_GPL(nand_status_op);
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802 int nand_exit_status_op(struct nand_chip *chip)
1803 {
1804 if (nand_has_exec_op(chip)) {
1805 struct nand_op_instr instrs[] = {
1806 NAND_OP_CMD(NAND_CMD_READ0, 0),
1807 };
1808 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1809
1810 return nand_exec_op(chip, &op);
1811 }
1812
1813 chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1814
1815 return 0;
1816 }
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1830 {
1831 unsigned int page = eraseblock <<
1832 (chip->phys_erase_shift - chip->page_shift);
1833 int ret;
1834 u8 status;
1835
1836 if (nand_has_exec_op(chip)) {
1837 const struct nand_interface_config *conf =
1838 nand_get_interface_config(chip);
1839 u8 addrs[3] = { page, page >> 8, page >> 16 };
1840 struct nand_op_instr instrs[] = {
1841 NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1842 NAND_OP_ADDR(2, addrs, 0),
1843 NAND_OP_CMD(NAND_CMD_ERASE2,
1844 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1845 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
1846 0),
1847 };
1848 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1849
1850 if (chip->options & NAND_ROW_ADDR_3)
1851 instrs[1].ctx.addr.naddrs++;
1852
1853 ret = nand_exec_op(chip, &op);
1854 if (ret)
1855 return ret;
1856
1857 ret = nand_status_op(chip, &status);
1858 if (ret)
1859 return ret;
1860 } else {
1861 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1862 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1863
1864 ret = chip->legacy.waitfunc(chip);
1865 if (ret < 0)
1866 return ret;
1867
1868 status = ret;
1869 }
1870
1871 if (status & NAND_STATUS_FAIL)
1872 return -EIO;
1873
1874 return 0;
1875 }
1876 EXPORT_SYMBOL_GPL(nand_erase_op);
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1891 const void *data)
1892 {
1893 const u8 *params = data;
1894 int i, ret;
1895
1896 if (nand_has_exec_op(chip)) {
1897 const struct nand_interface_config *conf =
1898 nand_get_interface_config(chip);
1899 struct nand_op_instr instrs[] = {
1900 NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1901 NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
1902 tADL_min)),
1903 NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1904 NAND_COMMON_TIMING_NS(conf,
1905 tWB_max)),
1906 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1907 0),
1908 };
1909 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1910
1911 return nand_exec_op(chip, &op);
1912 }
1913
1914 chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1915 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1916 chip->legacy.write_byte(chip, params[i]);
1917
1918 ret = chip->legacy.waitfunc(chip);
1919 if (ret < 0)
1920 return ret;
1921
1922 if (ret & NAND_STATUS_FAIL)
1923 return -EIO;
1924
1925 return 0;
1926 }
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1941 void *data)
1942 {
1943 u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
1944 int i;
1945
1946 if (nand_has_exec_op(chip)) {
1947 const struct nand_interface_config *conf =
1948 nand_get_interface_config(chip);
1949 struct nand_op_instr instrs[] = {
1950 NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1951 NAND_OP_ADDR(1, &feature,
1952 NAND_COMMON_TIMING_NS(conf, tWB_max)),
1953 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
1954 NAND_COMMON_TIMING_NS(conf, tRR_min)),
1955 NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1956 data, 0),
1957 };
1958 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1959 int ret;
1960
1961
1962 if (nand_interface_is_nvddr(conf)) {
1963 instrs[3].ctx.data.len *= 2;
1964 instrs[3].ctx.data.buf.in = ddrbuf;
1965 }
1966
1967 ret = nand_exec_op(chip, &op);
1968 if (nand_interface_is_nvddr(conf)) {
1969 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
1970 params[i] = ddrbuf[i * 2];
1971 }
1972
1973 return ret;
1974 }
1975
1976 chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1977 for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1978 params[i] = chip->legacy.read_byte(chip);
1979
1980 return 0;
1981 }
1982
1983 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1984 unsigned int delay_ns)
1985 {
1986 if (nand_has_exec_op(chip)) {
1987 struct nand_op_instr instrs[] = {
1988 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1989 PSEC_TO_NSEC(delay_ns)),
1990 };
1991 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1992
1993 return nand_exec_op(chip, &op);
1994 }
1995
1996
1997 if (!chip->legacy.dev_ready)
1998 udelay(chip->legacy.chip_delay);
1999 else
2000 nand_wait_ready(chip);
2001
2002 return 0;
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 int nand_reset_op(struct nand_chip *chip)
2016 {
2017 if (nand_has_exec_op(chip)) {
2018 const struct nand_interface_config *conf =
2019 nand_get_interface_config(chip);
2020 struct nand_op_instr instrs[] = {
2021 NAND_OP_CMD(NAND_CMD_RESET,
2022 NAND_COMMON_TIMING_NS(conf, tWB_max)),
2023 NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
2024 0),
2025 };
2026 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2027
2028 return nand_exec_op(chip, &op);
2029 }
2030
2031 chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
2032
2033 return 0;
2034 }
2035 EXPORT_SYMBOL_GPL(nand_reset_op);
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
2053 bool force_8bit, bool check_only)
2054 {
2055 if (!len || !buf)
2056 return -EINVAL;
2057
2058 if (nand_has_exec_op(chip)) {
2059 const struct nand_interface_config *conf =
2060 nand_get_interface_config(chip);
2061 struct nand_op_instr instrs[] = {
2062 NAND_OP_DATA_IN(len, buf, 0),
2063 };
2064 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2065 u8 *ddrbuf = NULL;
2066 int ret, i;
2067
2068 instrs[0].ctx.data.force_8bit = force_8bit;
2069
2070
2071
2072
2073
2074
2075
2076
2077 if (force_8bit && nand_interface_is_nvddr(conf)) {
2078 ddrbuf = kzalloc(len * 2, GFP_KERNEL);
2079 if (!ddrbuf)
2080 return -ENOMEM;
2081
2082 instrs[0].ctx.data.len *= 2;
2083 instrs[0].ctx.data.buf.in = ddrbuf;
2084 }
2085
2086 if (check_only) {
2087 ret = nand_check_op(chip, &op);
2088 kfree(ddrbuf);
2089 return ret;
2090 }
2091
2092 ret = nand_exec_op(chip, &op);
2093 if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
2094 u8 *dst = buf;
2095
2096 for (i = 0; i < len; i++)
2097 dst[i] = ddrbuf[i * 2];
2098 }
2099
2100 kfree(ddrbuf);
2101
2102 return ret;
2103 }
2104
2105 if (check_only)
2106 return 0;
2107
2108 if (force_8bit) {
2109 u8 *p = buf;
2110 unsigned int i;
2111
2112 for (i = 0; i < len; i++)
2113 p[i] = chip->legacy.read_byte(chip);
2114 } else {
2115 chip->legacy.read_buf(chip, buf, len);
2116 }
2117
2118 return 0;
2119 }
2120 EXPORT_SYMBOL_GPL(nand_read_data_op);
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135 int nand_write_data_op(struct nand_chip *chip, const void *buf,
2136 unsigned int len, bool force_8bit)
2137 {
2138 if (!len || !buf)
2139 return -EINVAL;
2140
2141 if (nand_has_exec_op(chip)) {
2142 struct nand_op_instr instrs[] = {
2143 NAND_OP_DATA_OUT(len, buf, 0),
2144 };
2145 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
2146
2147 instrs[0].ctx.data.force_8bit = force_8bit;
2148
2149 return nand_exec_op(chip, &op);
2150 }
2151
2152 if (force_8bit) {
2153 const u8 *p = buf;
2154 unsigned int i;
2155
2156 for (i = 0; i < len; i++)
2157 chip->legacy.write_byte(chip, p[i]);
2158 } else {
2159 chip->legacy.write_buf(chip, buf, len);
2160 }
2161
2162 return 0;
2163 }
2164 EXPORT_SYMBOL_GPL(nand_write_data_op);
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 struct nand_op_parser_ctx {
2176 const struct nand_op_instr *instrs;
2177 unsigned int ninstrs;
2178 struct nand_subop subop;
2179 };
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201 static bool
2202 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
2203 const struct nand_op_instr *instr,
2204 unsigned int *start_offset)
2205 {
2206 switch (pat->type) {
2207 case NAND_OP_ADDR_INSTR:
2208 if (!pat->ctx.addr.maxcycles)
2209 break;
2210
2211 if (instr->ctx.addr.naddrs - *start_offset >
2212 pat->ctx.addr.maxcycles) {
2213 *start_offset += pat->ctx.addr.maxcycles;
2214 return true;
2215 }
2216 break;
2217
2218 case NAND_OP_DATA_IN_INSTR:
2219 case NAND_OP_DATA_OUT_INSTR:
2220 if (!pat->ctx.data.maxlen)
2221 break;
2222
2223 if (instr->ctx.data.len - *start_offset >
2224 pat->ctx.data.maxlen) {
2225 *start_offset += pat->ctx.data.maxlen;
2226 return true;
2227 }
2228 break;
2229
2230 default:
2231 break;
2232 }
2233
2234 return false;
2235 }
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248 static bool
2249 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2250 struct nand_op_parser_ctx *ctx)
2251 {
2252 unsigned int instr_offset = ctx->subop.first_instr_start_off;
2253 const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2254 const struct nand_op_instr *instr = ctx->subop.instrs;
2255 unsigned int i, ninstrs;
2256
2257 for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2258
2259
2260
2261
2262
2263
2264
2265 if (instr->type != pat->elems[i].type) {
2266 if (!pat->elems[i].optional)
2267 return false;
2268
2269 continue;
2270 }
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2281 &instr_offset)) {
2282 ninstrs++;
2283 i++;
2284 break;
2285 }
2286
2287 instr++;
2288 ninstrs++;
2289 instr_offset = 0;
2290 }
2291
2292
2293
2294
2295
2296
2297
2298 if (!ninstrs)
2299 return false;
2300
2301
2302
2303
2304
2305
2306 for (; i < pat->nelems; i++) {
2307 if (!pat->elems[i].optional)
2308 return false;
2309 }
2310
2311
2312
2313
2314
2315 ctx->subop.ninstrs = ninstrs;
2316 ctx->subop.last_instr_end_off = instr_offset;
2317
2318 return true;
2319 }
2320
2321 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2322 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2323 {
2324 const struct nand_op_instr *instr;
2325 char *prefix = " ";
2326 unsigned int i;
2327
2328 pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2329
2330 for (i = 0; i < ctx->ninstrs; i++) {
2331 instr = &ctx->instrs[i];
2332
2333 if (instr == &ctx->subop.instrs[0])
2334 prefix = " ->";
2335
2336 nand_op_trace(prefix, instr);
2337
2338 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2339 prefix = " ";
2340 }
2341 }
2342 #else
2343 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2344 {
2345
2346 }
2347 #endif
2348
2349 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2350 const struct nand_op_parser_ctx *b)
2351 {
2352 if (a->subop.ninstrs < b->subop.ninstrs)
2353 return -1;
2354 else if (a->subop.ninstrs > b->subop.ninstrs)
2355 return 1;
2356
2357 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2358 return -1;
2359 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2360 return 1;
2361
2362 return 0;
2363 }
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 int nand_op_parser_exec_op(struct nand_chip *chip,
2388 const struct nand_op_parser *parser,
2389 const struct nand_operation *op, bool check_only)
2390 {
2391 struct nand_op_parser_ctx ctx = {
2392 .subop.cs = op->cs,
2393 .subop.instrs = op->instrs,
2394 .instrs = op->instrs,
2395 .ninstrs = op->ninstrs,
2396 };
2397 unsigned int i;
2398
2399 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2400 const struct nand_op_parser_pattern *pattern;
2401 struct nand_op_parser_ctx best_ctx;
2402 int ret, best_pattern = -1;
2403
2404 for (i = 0; i < parser->npatterns; i++) {
2405 struct nand_op_parser_ctx test_ctx = ctx;
2406
2407 pattern = &parser->patterns[i];
2408 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2409 continue;
2410
2411 if (best_pattern >= 0 &&
2412 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2413 continue;
2414
2415 best_pattern = i;
2416 best_ctx = test_ctx;
2417 }
2418
2419 if (best_pattern < 0) {
2420 pr_debug("->exec_op() parser: pattern not found!\n");
2421 return -ENOTSUPP;
2422 }
2423
2424 ctx = best_ctx;
2425 nand_op_parser_trace(&ctx);
2426
2427 if (!check_only) {
2428 pattern = &parser->patterns[best_pattern];
2429 ret = pattern->exec(chip, &ctx.subop);
2430 if (ret)
2431 return ret;
2432 }
2433
2434
2435
2436
2437
2438 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2439 if (ctx.subop.last_instr_end_off)
2440 ctx.subop.instrs -= 1;
2441
2442 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2443 }
2444
2445 return 0;
2446 }
2447 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2448
2449 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2450 {
2451 return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2452 instr->type == NAND_OP_DATA_OUT_INSTR);
2453 }
2454
2455 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2456 unsigned int instr_idx)
2457 {
2458 return subop && instr_idx < subop->ninstrs;
2459 }
2460
2461 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2462 unsigned int instr_idx)
2463 {
2464 if (instr_idx)
2465 return 0;
2466
2467 return subop->first_instr_start_off;
2468 }
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2482 unsigned int instr_idx)
2483 {
2484 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2485 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2486 return 0;
2487
2488 return nand_subop_get_start_off(subop, instr_idx);
2489 }
2490 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2504 unsigned int instr_idx)
2505 {
2506 int start_off, end_off;
2507
2508 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2509 subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2510 return 0;
2511
2512 start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2513
2514 if (instr_idx == subop->ninstrs - 1 &&
2515 subop->last_instr_end_off)
2516 end_off = subop->last_instr_end_off;
2517 else
2518 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2519
2520 return end_off - start_off;
2521 }
2522 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2536 unsigned int instr_idx)
2537 {
2538 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2539 !nand_instr_is_data(&subop->instrs[instr_idx])))
2540 return 0;
2541
2542 return nand_subop_get_start_off(subop, instr_idx);
2543 }
2544 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2558 unsigned int instr_idx)
2559 {
2560 int start_off = 0, end_off;
2561
2562 if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2563 !nand_instr_is_data(&subop->instrs[instr_idx])))
2564 return 0;
2565
2566 start_off = nand_subop_get_data_start_off(subop, instr_idx);
2567
2568 if (instr_idx == subop->ninstrs - 1 &&
2569 subop->last_instr_end_off)
2570 end_off = subop->last_instr_end_off;
2571 else
2572 end_off = subop->instrs[instr_idx].ctx.data.len;
2573
2574 return end_off - start_off;
2575 }
2576 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589 int nand_reset(struct nand_chip *chip, int chipnr)
2590 {
2591 int ret;
2592
2593 ret = nand_reset_interface(chip, chipnr);
2594 if (ret)
2595 return ret;
2596
2597
2598
2599
2600
2601
2602 nand_select_target(chip, chipnr);
2603 ret = nand_reset_op(chip);
2604 nand_deselect_target(chip);
2605 if (ret)
2606 return ret;
2607
2608 ret = nand_setup_interface(chip, chipnr);
2609 if (ret)
2610 return ret;
2611
2612 return 0;
2613 }
2614 EXPORT_SYMBOL_GPL(nand_reset);
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625 int nand_get_features(struct nand_chip *chip, int addr,
2626 u8 *subfeature_param)
2627 {
2628 if (!nand_supports_get_features(chip, addr))
2629 return -ENOTSUPP;
2630
2631 if (chip->legacy.get_features)
2632 return chip->legacy.get_features(chip, addr, subfeature_param);
2633
2634 return nand_get_features_op(chip, addr, subfeature_param);
2635 }
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646 int nand_set_features(struct nand_chip *chip, int addr,
2647 u8 *subfeature_param)
2648 {
2649 if (!nand_supports_set_features(chip, addr))
2650 return -ENOTSUPP;
2651
2652 if (chip->legacy.set_features)
2653 return chip->legacy.set_features(chip, addr, subfeature_param);
2654
2655 return nand_set_features_op(chip, addr, subfeature_param);
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2678 {
2679 const unsigned char *bitmap = buf;
2680 int bitflips = 0;
2681 int weight;
2682
2683 for (; len && ((uintptr_t)bitmap) % sizeof(long);
2684 len--, bitmap++) {
2685 weight = hweight8(*bitmap);
2686 bitflips += BITS_PER_BYTE - weight;
2687 if (unlikely(bitflips > bitflips_threshold))
2688 return -EBADMSG;
2689 }
2690
2691 for (; len >= sizeof(long);
2692 len -= sizeof(long), bitmap += sizeof(long)) {
2693 unsigned long d = *((unsigned long *)bitmap);
2694 if (d == ~0UL)
2695 continue;
2696 weight = hweight_long(d);
2697 bitflips += BITS_PER_LONG - weight;
2698 if (unlikely(bitflips > bitflips_threshold))
2699 return -EBADMSG;
2700 }
2701
2702 for (; len > 0; len--, bitmap++) {
2703 weight = hweight8(*bitmap);
2704 bitflips += BITS_PER_BYTE - weight;
2705 if (unlikely(bitflips > bitflips_threshold))
2706 return -EBADMSG;
2707 }
2708
2709 return bitflips;
2710 }
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751 int nand_check_erased_ecc_chunk(void *data, int datalen,
2752 void *ecc, int ecclen,
2753 void *extraoob, int extraooblen,
2754 int bitflips_threshold)
2755 {
2756 int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2757
2758 data_bitflips = nand_check_erased_buf(data, datalen,
2759 bitflips_threshold);
2760 if (data_bitflips < 0)
2761 return data_bitflips;
2762
2763 bitflips_threshold -= data_bitflips;
2764
2765 ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2766 if (ecc_bitflips < 0)
2767 return ecc_bitflips;
2768
2769 bitflips_threshold -= ecc_bitflips;
2770
2771 extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2772 bitflips_threshold);
2773 if (extraoob_bitflips < 0)
2774 return extraoob_bitflips;
2775
2776 if (data_bitflips)
2777 memset(data, 0xff, datalen);
2778
2779 if (ecc_bitflips)
2780 memset(ecc, 0xff, ecclen);
2781
2782 if (extraoob_bitflips)
2783 memset(extraoob, 0xff, extraooblen);
2784
2785 return data_bitflips + ecc_bitflips + extraoob_bitflips;
2786 }
2787 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2799 int oob_required, int page)
2800 {
2801 return -ENOTSUPP;
2802 }
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2814 int page)
2815 {
2816 struct mtd_info *mtd = nand_to_mtd(chip);
2817 int ret;
2818
2819 ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2820 if (ret)
2821 return ret;
2822
2823 if (oob_required) {
2824 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2825 false, false);
2826 if (ret)
2827 return ret;
2828 }
2829
2830 return 0;
2831 }
2832 EXPORT_SYMBOL(nand_read_page_raw);
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2850 int oob_required, int page)
2851 {
2852 struct mtd_info *mtd = nand_to_mtd(chip);
2853 unsigned int size = mtd->writesize;
2854 u8 *read_buf = buf;
2855 int ret;
2856
2857 if (oob_required) {
2858 size += mtd->oobsize;
2859
2860 if (buf != chip->data_buf)
2861 read_buf = nand_get_data_buf(chip);
2862 }
2863
2864 ret = nand_read_page_op(chip, page, 0, read_buf, size);
2865 if (ret)
2866 return ret;
2867
2868 if (buf != chip->data_buf)
2869 memcpy(buf, read_buf, mtd->writesize);
2870
2871 return 0;
2872 }
2873 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2885 int oob_required, int page)
2886 {
2887 struct mtd_info *mtd = nand_to_mtd(chip);
2888 int eccsize = chip->ecc.size;
2889 int eccbytes = chip->ecc.bytes;
2890 uint8_t *oob = chip->oob_poi;
2891 int steps, size, ret;
2892
2893 ret = nand_read_page_op(chip, page, 0, NULL, 0);
2894 if (ret)
2895 return ret;
2896
2897 for (steps = chip->ecc.steps; steps > 0; steps--) {
2898 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2899 if (ret)
2900 return ret;
2901
2902 buf += eccsize;
2903
2904 if (chip->ecc.prepad) {
2905 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2906 false, false);
2907 if (ret)
2908 return ret;
2909
2910 oob += chip->ecc.prepad;
2911 }
2912
2913 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2914 if (ret)
2915 return ret;
2916
2917 oob += eccbytes;
2918
2919 if (chip->ecc.postpad) {
2920 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2921 false, false);
2922 if (ret)
2923 return ret;
2924
2925 oob += chip->ecc.postpad;
2926 }
2927 }
2928
2929 size = mtd->oobsize - (oob - chip->oob_poi);
2930 if (size) {
2931 ret = nand_read_data_op(chip, oob, size, false, false);
2932 if (ret)
2933 return ret;
2934 }
2935
2936 return 0;
2937 }
2938
2939
2940
2941
2942
2943
2944
2945
2946 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2947 int oob_required, int page)
2948 {
2949 struct mtd_info *mtd = nand_to_mtd(chip);
2950 int i, eccsize = chip->ecc.size, ret;
2951 int eccbytes = chip->ecc.bytes;
2952 int eccsteps = chip->ecc.steps;
2953 uint8_t *p = buf;
2954 uint8_t *ecc_calc = chip->ecc.calc_buf;
2955 uint8_t *ecc_code = chip->ecc.code_buf;
2956 unsigned int max_bitflips = 0;
2957
2958 chip->ecc.read_page_raw(chip, buf, 1, page);
2959
2960 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2961 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2962
2963 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2964 chip->ecc.total);
2965 if (ret)
2966 return ret;
2967
2968 eccsteps = chip->ecc.steps;
2969 p = buf;
2970
2971 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2972 int stat;
2973
2974 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2975 if (stat < 0) {
2976 mtd->ecc_stats.failed++;
2977 } else {
2978 mtd->ecc_stats.corrected += stat;
2979 max_bitflips = max_t(unsigned int, max_bitflips, stat);
2980 }
2981 }
2982 return max_bitflips;
2983 }
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2994 uint32_t readlen, uint8_t *bufpoi, int page)
2995 {
2996 struct mtd_info *mtd = nand_to_mtd(chip);
2997 int start_step, end_step, num_steps, ret;
2998 uint8_t *p;
2999 int data_col_addr, i, gaps = 0;
3000 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
3001 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
3002 int index, section = 0;
3003 unsigned int max_bitflips = 0;
3004 struct mtd_oob_region oobregion = { };
3005
3006
3007 start_step = data_offs / chip->ecc.size;
3008 end_step = (data_offs + readlen - 1) / chip->ecc.size;
3009 num_steps = end_step - start_step + 1;
3010 index = start_step * chip->ecc.bytes;
3011
3012
3013 datafrag_len = num_steps * chip->ecc.size;
3014 eccfrag_len = num_steps * chip->ecc.bytes;
3015
3016 data_col_addr = start_step * chip->ecc.size;
3017
3018 p = bufpoi + data_col_addr;
3019 ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
3020 if (ret)
3021 return ret;
3022
3023
3024 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
3025 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
3026
3027
3028
3029
3030
3031 ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
3032 if (ret)
3033 return ret;
3034
3035 if (oobregion.length < eccfrag_len)
3036 gaps = 1;
3037
3038 if (gaps) {
3039 ret = nand_change_read_column_op(chip, mtd->writesize,
3040 chip->oob_poi, mtd->oobsize,
3041 false);
3042 if (ret)
3043 return ret;
3044 } else {
3045
3046
3047
3048
3049 aligned_pos = oobregion.offset & ~(busw - 1);
3050 aligned_len = eccfrag_len;
3051 if (oobregion.offset & (busw - 1))
3052 aligned_len++;
3053 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
3054 (busw - 1))
3055 aligned_len++;
3056
3057 ret = nand_change_read_column_op(chip,
3058 mtd->writesize + aligned_pos,
3059 &chip->oob_poi[aligned_pos],
3060 aligned_len, false);
3061 if (ret)
3062 return ret;
3063 }
3064
3065 ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
3066 chip->oob_poi, index, eccfrag_len);
3067 if (ret)
3068 return ret;
3069
3070 p = bufpoi + data_col_addr;
3071 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
3072 int stat;
3073
3074 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
3075 &chip->ecc.calc_buf[i]);
3076 if (stat == -EBADMSG &&
3077 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3078
3079 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3080 &chip->ecc.code_buf[i],
3081 chip->ecc.bytes,
3082 NULL, 0,
3083 chip->ecc.strength);
3084 }
3085
3086 if (stat < 0) {
3087 mtd->ecc_stats.failed++;
3088 } else {
3089 mtd->ecc_stats.corrected += stat;
3090 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3091 }
3092 }
3093 return max_bitflips;
3094 }
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
3106 int oob_required, int page)
3107 {
3108 struct mtd_info *mtd = nand_to_mtd(chip);
3109 int i, eccsize = chip->ecc.size, ret;
3110 int eccbytes = chip->ecc.bytes;
3111 int eccsteps = chip->ecc.steps;
3112 uint8_t *p = buf;
3113 uint8_t *ecc_calc = chip->ecc.calc_buf;
3114 uint8_t *ecc_code = chip->ecc.code_buf;
3115 unsigned int max_bitflips = 0;
3116
3117 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3118 if (ret)
3119 return ret;
3120
3121 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3122 chip->ecc.hwctl(chip, NAND_ECC_READ);
3123
3124 ret = nand_read_data_op(chip, p, eccsize, false, false);
3125 if (ret)
3126 return ret;
3127
3128 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3129 }
3130
3131 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
3132 false);
3133 if (ret)
3134 return ret;
3135
3136 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3137 chip->ecc.total);
3138 if (ret)
3139 return ret;
3140
3141 eccsteps = chip->ecc.steps;
3142 p = buf;
3143
3144 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3145 int stat;
3146
3147 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
3148 if (stat == -EBADMSG &&
3149 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3150
3151 stat = nand_check_erased_ecc_chunk(p, eccsize,
3152 &ecc_code[i], eccbytes,
3153 NULL, 0,
3154 chip->ecc.strength);
3155 }
3156
3157 if (stat < 0) {
3158 mtd->ecc_stats.failed++;
3159 } else {
3160 mtd->ecc_stats.corrected += stat;
3161 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3162 }
3163 }
3164 return max_bitflips;
3165 }
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178 int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
3179 int oob_required, int page)
3180 {
3181 struct mtd_info *mtd = nand_to_mtd(chip);
3182 int i, eccsize = chip->ecc.size, ret;
3183 int eccbytes = chip->ecc.bytes;
3184 int eccsteps = chip->ecc.steps;
3185 uint8_t *p = buf;
3186 uint8_t *ecc_code = chip->ecc.code_buf;
3187 unsigned int max_bitflips = 0;
3188
3189
3190 ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3191 if (ret)
3192 return ret;
3193
3194
3195 ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
3196 if (ret)
3197 return ret;
3198
3199 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
3200 chip->ecc.total);
3201 if (ret)
3202 return ret;
3203
3204 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3205 int stat;
3206
3207 chip->ecc.hwctl(chip, NAND_ECC_READ);
3208
3209 ret = nand_read_data_op(chip, p, eccsize, false, false);
3210 if (ret)
3211 return ret;
3212
3213 stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
3214 if (stat == -EBADMSG &&
3215 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3216
3217 stat = nand_check_erased_ecc_chunk(p, eccsize,
3218 &ecc_code[i],
3219 eccbytes, NULL, 0,
3220 chip->ecc.strength);
3221 }
3222
3223 if (stat < 0) {
3224 mtd->ecc_stats.failed++;
3225 } else {
3226 mtd->ecc_stats.corrected += stat;
3227 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3228 }
3229 }
3230 return max_bitflips;
3231 }
3232 EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
3245 int oob_required, int page)
3246 {
3247 struct mtd_info *mtd = nand_to_mtd(chip);
3248 int ret, i, eccsize = chip->ecc.size;
3249 int eccbytes = chip->ecc.bytes;
3250 int eccsteps = chip->ecc.steps;
3251 int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
3252 uint8_t *p = buf;
3253 uint8_t *oob = chip->oob_poi;
3254 unsigned int max_bitflips = 0;
3255
3256 ret = nand_read_page_op(chip, page, 0, NULL, 0);
3257 if (ret)
3258 return ret;
3259
3260 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3261 int stat;
3262
3263 chip->ecc.hwctl(chip, NAND_ECC_READ);
3264
3265 ret = nand_read_data_op(chip, p, eccsize, false, false);
3266 if (ret)
3267 return ret;
3268
3269 if (chip->ecc.prepad) {
3270 ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
3271 false, false);
3272 if (ret)
3273 return ret;
3274
3275 oob += chip->ecc.prepad;
3276 }
3277
3278 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
3279
3280 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
3281 if (ret)
3282 return ret;
3283
3284 stat = chip->ecc.correct(chip, p, oob, NULL);
3285
3286 oob += eccbytes;
3287
3288 if (chip->ecc.postpad) {
3289 ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3290 false, false);
3291 if (ret)
3292 return ret;
3293
3294 oob += chip->ecc.postpad;
3295 }
3296
3297 if (stat == -EBADMSG &&
3298 (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3299
3300 stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3301 oob - eccpadbytes,
3302 eccpadbytes,
3303 NULL, 0,
3304 chip->ecc.strength);
3305 }
3306
3307 if (stat < 0) {
3308 mtd->ecc_stats.failed++;
3309 } else {
3310 mtd->ecc_stats.corrected += stat;
3311 max_bitflips = max_t(unsigned int, max_bitflips, stat);
3312 }
3313 }
3314
3315
3316 i = mtd->oobsize - (oob - chip->oob_poi);
3317 if (i) {
3318 ret = nand_read_data_op(chip, oob, i, false, false);
3319 if (ret)
3320 return ret;
3321 }
3322
3323 return max_bitflips;
3324 }
3325
3326
3327
3328
3329
3330
3331
3332
3333 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3334 struct mtd_oob_ops *ops, size_t len)
3335 {
3336 struct mtd_info *mtd = nand_to_mtd(chip);
3337 int ret;
3338
3339 switch (ops->mode) {
3340
3341 case MTD_OPS_PLACE_OOB:
3342 case MTD_OPS_RAW:
3343 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3344 return oob + len;
3345
3346 case MTD_OPS_AUTO_OOB:
3347 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3348 ops->ooboffs, len);
3349 BUG_ON(ret);
3350 return oob + len;
3351
3352 default:
3353 BUG();
3354 }
3355 return NULL;
3356 }
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3368 {
3369 pr_debug("setting READ RETRY mode %d\n", retry_mode);
3370
3371 if (retry_mode >= chip->read_retries)
3372 return -EINVAL;
3373
3374 if (!chip->ops.setup_read_retry)
3375 return -EOPNOTSUPP;
3376
3377 return chip->ops.setup_read_retry(chip, retry_mode);
3378 }
3379
3380 static void nand_wait_readrdy(struct nand_chip *chip)
3381 {
3382 const struct nand_interface_config *conf;
3383
3384 if (!(chip->options & NAND_NEED_READRDY))
3385 return;
3386
3387 conf = nand_get_interface_config(chip);
3388 WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
3389 }
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3400 struct mtd_oob_ops *ops)
3401 {
3402 int chipnr, page, realpage, col, bytes, aligned, oob_required;
3403 struct mtd_info *mtd = nand_to_mtd(chip);
3404 int ret = 0;
3405 uint32_t readlen = ops->len;
3406 uint32_t oobreadlen = ops->ooblen;
3407 uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3408
3409 uint8_t *bufpoi, *oob, *buf;
3410 int use_bounce_buf;
3411 unsigned int max_bitflips = 0;
3412 int retry_mode = 0;
3413 bool ecc_fail = false;
3414
3415
3416 if (nand_region_is_secured(chip, from, readlen))
3417 return -EIO;
3418
3419 chipnr = (int)(from >> chip->chip_shift);
3420 nand_select_target(chip, chipnr);
3421
3422 realpage = (int)(from >> chip->page_shift);
3423 page = realpage & chip->pagemask;
3424
3425 col = (int)(from & (mtd->writesize - 1));
3426
3427 buf = ops->datbuf;
3428 oob = ops->oobbuf;
3429 oob_required = oob ? 1 : 0;
3430
3431 while (1) {
3432 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3433
3434 bytes = min(mtd->writesize - col, readlen);
3435 aligned = (bytes == mtd->writesize);
3436
3437 if (!aligned)
3438 use_bounce_buf = 1;
3439 else if (chip->options & NAND_USES_DMA)
3440 use_bounce_buf = !virt_addr_valid(buf) ||
3441 !IS_ALIGNED((unsigned long)buf,
3442 chip->buf_align);
3443 else
3444 use_bounce_buf = 0;
3445
3446
3447 if (realpage != chip->pagecache.page || oob) {
3448 bufpoi = use_bounce_buf ? chip->data_buf : buf;
3449
3450 if (use_bounce_buf && aligned)
3451 pr_debug("%s: using read bounce buffer for buf@%p\n",
3452 __func__, buf);
3453
3454 read_retry:
3455
3456
3457
3458
3459 if (unlikely(ops->mode == MTD_OPS_RAW))
3460 ret = chip->ecc.read_page_raw(chip, bufpoi,
3461 oob_required,
3462 page);
3463 else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3464 !oob)
3465 ret = chip->ecc.read_subpage(chip, col, bytes,
3466 bufpoi, page);
3467 else
3468 ret = chip->ecc.read_page(chip, bufpoi,
3469 oob_required, page);
3470 if (ret < 0) {
3471 if (use_bounce_buf)
3472
3473 chip->pagecache.page = -1;
3474 break;
3475 }
3476
3477
3478
3479
3480
3481 if (use_bounce_buf) {
3482 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3483 !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3484 (ops->mode != MTD_OPS_RAW)) {
3485 chip->pagecache.page = realpage;
3486 chip->pagecache.bitflips = ret;
3487 } else {
3488
3489 chip->pagecache.page = -1;
3490 }
3491 memcpy(buf, bufpoi + col, bytes);
3492 }
3493
3494 if (unlikely(oob)) {
3495 int toread = min(oobreadlen, max_oobsize);
3496
3497 if (toread) {
3498 oob = nand_transfer_oob(chip, oob, ops,
3499 toread);
3500 oobreadlen -= toread;
3501 }
3502 }
3503
3504 nand_wait_readrdy(chip);
3505
3506 if (mtd->ecc_stats.failed - ecc_stats.failed) {
3507 if (retry_mode + 1 < chip->read_retries) {
3508 retry_mode++;
3509 ret = nand_setup_read_retry(chip,
3510 retry_mode);
3511 if (ret < 0)
3512 break;
3513
3514
3515 mtd->ecc_stats = ecc_stats;
3516 goto read_retry;
3517 } else {
3518
3519 ecc_fail = true;
3520 }
3521 }
3522
3523 buf += bytes;
3524 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3525 } else {
3526 memcpy(buf, chip->data_buf + col, bytes);
3527 buf += bytes;
3528 max_bitflips = max_t(unsigned int, max_bitflips,
3529 chip->pagecache.bitflips);
3530 }
3531
3532 readlen -= bytes;
3533
3534
3535 if (retry_mode) {
3536 ret = nand_setup_read_retry(chip, 0);
3537 if (ret < 0)
3538 break;
3539 retry_mode = 0;
3540 }
3541
3542 if (!readlen)
3543 break;
3544
3545
3546 col = 0;
3547
3548 realpage++;
3549
3550 page = realpage & chip->pagemask;
3551
3552 if (!page) {
3553 chipnr++;
3554 nand_deselect_target(chip);
3555 nand_select_target(chip, chipnr);
3556 }
3557 }
3558 nand_deselect_target(chip);
3559
3560 ops->retlen = ops->len - (size_t) readlen;
3561 if (oob)
3562 ops->oobretlen = ops->ooblen - oobreadlen;
3563
3564 if (ret < 0)
3565 return ret;
3566
3567 if (ecc_fail)
3568 return -EBADMSG;
3569
3570 return max_bitflips;
3571 }
3572
3573
3574
3575
3576
3577
3578 int nand_read_oob_std(struct nand_chip *chip, int page)
3579 {
3580 struct mtd_info *mtd = nand_to_mtd(chip);
3581
3582 return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3583 }
3584 EXPORT_SYMBOL(nand_read_oob_std);
3585
3586
3587
3588
3589
3590
3591
3592 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3593 {
3594 struct mtd_info *mtd = nand_to_mtd(chip);
3595 int length = mtd->oobsize;
3596 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3597 int eccsize = chip->ecc.size;
3598 uint8_t *bufpoi = chip->oob_poi;
3599 int i, toread, sndrnd = 0, pos, ret;
3600
3601 ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3602 if (ret)
3603 return ret;
3604
3605 for (i = 0; i < chip->ecc.steps; i++) {
3606 if (sndrnd) {
3607 int ret;
3608
3609 pos = eccsize + i * (eccsize + chunk);
3610 if (mtd->writesize > 512)
3611 ret = nand_change_read_column_op(chip, pos,
3612 NULL, 0,
3613 false);
3614 else
3615 ret = nand_read_page_op(chip, page, pos, NULL,
3616 0);
3617
3618 if (ret)
3619 return ret;
3620 } else
3621 sndrnd = 1;
3622 toread = min_t(int, length, chunk);
3623
3624 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3625 if (ret)
3626 return ret;
3627
3628 bufpoi += toread;
3629 length -= toread;
3630 }
3631 if (length > 0) {
3632 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3633 if (ret)
3634 return ret;
3635 }
3636
3637 return 0;
3638 }
3639
3640
3641
3642
3643
3644
3645 int nand_write_oob_std(struct nand_chip *chip, int page)
3646 {
3647 struct mtd_info *mtd = nand_to_mtd(chip);
3648
3649 return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3650 mtd->oobsize);
3651 }
3652 EXPORT_SYMBOL(nand_write_oob_std);
3653
3654
3655
3656
3657
3658
3659
3660 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3661 {
3662 struct mtd_info *mtd = nand_to_mtd(chip);
3663 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3664 int eccsize = chip->ecc.size, length = mtd->oobsize;
3665 int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3666 const uint8_t *bufpoi = chip->oob_poi;
3667
3668
3669
3670
3671
3672
3673 if (!chip->ecc.prepad && !chip->ecc.postpad) {
3674 pos = steps * (eccsize + chunk);
3675 steps = 0;
3676 } else
3677 pos = eccsize;
3678
3679 ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3680 if (ret)
3681 return ret;
3682
3683 for (i = 0; i < steps; i++) {
3684 if (sndcmd) {
3685 if (mtd->writesize <= 512) {
3686 uint32_t fill = 0xFFFFFFFF;
3687
3688 len = eccsize;
3689 while (len > 0) {
3690 int num = min_t(int, len, 4);
3691
3692 ret = nand_write_data_op(chip, &fill,
3693 num, false);
3694 if (ret)
3695 return ret;
3696
3697 len -= num;
3698 }
3699 } else {
3700 pos = eccsize + i * (eccsize + chunk);
3701 ret = nand_change_write_column_op(chip, pos,
3702 NULL, 0,
3703 false);
3704 if (ret)
3705 return ret;
3706 }
3707 } else
3708 sndcmd = 1;
3709 len = min_t(int, length, chunk);
3710
3711 ret = nand_write_data_op(chip, bufpoi, len, false);
3712 if (ret)
3713 return ret;
3714
3715 bufpoi += len;
3716 length -= len;
3717 }
3718 if (length > 0) {
3719 ret = nand_write_data_op(chip, bufpoi, length, false);
3720 if (ret)
3721 return ret;
3722 }
3723
3724 return nand_prog_page_end_op(chip);
3725 }
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3736 struct mtd_oob_ops *ops)
3737 {
3738 struct mtd_info *mtd = nand_to_mtd(chip);
3739 unsigned int max_bitflips = 0;
3740 int page, realpage, chipnr;
3741 struct mtd_ecc_stats stats;
3742 int readlen = ops->ooblen;
3743 int len;
3744 uint8_t *buf = ops->oobbuf;
3745 int ret = 0;
3746
3747 pr_debug("%s: from = 0x%08Lx, len = %i\n",
3748 __func__, (unsigned long long)from, readlen);
3749
3750
3751 if (nand_region_is_secured(chip, from, readlen))
3752 return -EIO;
3753
3754 stats = mtd->ecc_stats;
3755
3756 len = mtd_oobavail(mtd, ops);
3757
3758 chipnr = (int)(from >> chip->chip_shift);
3759 nand_select_target(chip, chipnr);
3760
3761
3762 realpage = (int)(from >> chip->page_shift);
3763 page = realpage & chip->pagemask;
3764
3765 while (1) {
3766 if (ops->mode == MTD_OPS_RAW)
3767 ret = chip->ecc.read_oob_raw(chip, page);
3768 else
3769 ret = chip->ecc.read_oob(chip, page);
3770
3771 if (ret < 0)
3772 break;
3773
3774 len = min(len, readlen);
3775 buf = nand_transfer_oob(chip, buf, ops, len);
3776
3777 nand_wait_readrdy(chip);
3778
3779 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3780
3781 readlen -= len;
3782 if (!readlen)
3783 break;
3784
3785
3786 realpage++;
3787
3788 page = realpage & chip->pagemask;
3789
3790 if (!page) {
3791 chipnr++;
3792 nand_deselect_target(chip);
3793 nand_select_target(chip, chipnr);
3794 }
3795 }
3796 nand_deselect_target(chip);
3797
3798 ops->oobretlen = ops->ooblen - readlen;
3799
3800 if (ret < 0)
3801 return ret;
3802
3803 if (mtd->ecc_stats.failed - stats.failed)
3804 return -EBADMSG;
3805
3806 return max_bitflips;
3807 }
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3818 struct mtd_oob_ops *ops)
3819 {
3820 struct nand_chip *chip = mtd_to_nand(mtd);
3821 int ret;
3822
3823 ops->retlen = 0;
3824
3825 if (ops->mode != MTD_OPS_PLACE_OOB &&
3826 ops->mode != MTD_OPS_AUTO_OOB &&
3827 ops->mode != MTD_OPS_RAW)
3828 return -ENOTSUPP;
3829
3830 nand_get_device(chip);
3831
3832 if (!ops->datbuf)
3833 ret = nand_do_read_oob(chip, from, ops);
3834 else
3835 ret = nand_do_read_ops(chip, from, ops);
3836
3837 nand_release_device(chip);
3838 return ret;
3839 }
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3851 int oob_required, int page)
3852 {
3853 return -ENOTSUPP;
3854 }
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3866 int oob_required, int page)
3867 {
3868 struct mtd_info *mtd = nand_to_mtd(chip);
3869 int ret;
3870
3871 ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3872 if (ret)
3873 return ret;
3874
3875 if (oob_required) {
3876 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3877 false);
3878 if (ret)
3879 return ret;
3880 }
3881
3882 return nand_prog_page_end_op(chip);
3883 }
3884 EXPORT_SYMBOL(nand_write_page_raw);
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3903 int oob_required, int page)
3904 {
3905 struct mtd_info *mtd = nand_to_mtd(chip);
3906 unsigned int size = mtd->writesize;
3907 u8 *write_buf = (u8 *)buf;
3908
3909 if (oob_required) {
3910 size += mtd->oobsize;
3911
3912 if (buf != chip->data_buf) {
3913 write_buf = nand_get_data_buf(chip);
3914 memcpy(write_buf, buf, mtd->writesize);
3915 }
3916 }
3917
3918 return nand_prog_page_op(chip, page, 0, write_buf, size);
3919 }
3920 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3932 const uint8_t *buf, int oob_required,
3933 int page)
3934 {
3935 struct mtd_info *mtd = nand_to_mtd(chip);
3936 int eccsize = chip->ecc.size;
3937 int eccbytes = chip->ecc.bytes;
3938 uint8_t *oob = chip->oob_poi;
3939 int steps, size, ret;
3940
3941 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3942 if (ret)
3943 return ret;
3944
3945 for (steps = chip->ecc.steps; steps > 0; steps--) {
3946 ret = nand_write_data_op(chip, buf, eccsize, false);
3947 if (ret)
3948 return ret;
3949
3950 buf += eccsize;
3951
3952 if (chip->ecc.prepad) {
3953 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3954 false);
3955 if (ret)
3956 return ret;
3957
3958 oob += chip->ecc.prepad;
3959 }
3960
3961 ret = nand_write_data_op(chip, oob, eccbytes, false);
3962 if (ret)
3963 return ret;
3964
3965 oob += eccbytes;
3966
3967 if (chip->ecc.postpad) {
3968 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3969 false);
3970 if (ret)
3971 return ret;
3972
3973 oob += chip->ecc.postpad;
3974 }
3975 }
3976
3977 size = mtd->oobsize - (oob - chip->oob_poi);
3978 if (size) {
3979 ret = nand_write_data_op(chip, oob, size, false);
3980 if (ret)
3981 return ret;
3982 }
3983
3984 return nand_prog_page_end_op(chip);
3985 }
3986
3987
3988
3989
3990
3991
3992
3993 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3994 int oob_required, int page)
3995 {
3996 struct mtd_info *mtd = nand_to_mtd(chip);
3997 int i, eccsize = chip->ecc.size, ret;
3998 int eccbytes = chip->ecc.bytes;
3999 int eccsteps = chip->ecc.steps;
4000 uint8_t *ecc_calc = chip->ecc.calc_buf;
4001 const uint8_t *p = buf;
4002
4003
4004 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
4005 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4006
4007 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4008 chip->ecc.total);
4009 if (ret)
4010 return ret;
4011
4012 return chip->ecc.write_page_raw(chip, buf, 1, page);
4013 }
4014
4015
4016
4017
4018
4019
4020
4021
4022 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
4023 int oob_required, int page)
4024 {
4025 struct mtd_info *mtd = nand_to_mtd(chip);
4026 int i, eccsize = chip->ecc.size, ret;
4027 int eccbytes = chip->ecc.bytes;
4028 int eccsteps = chip->ecc.steps;
4029 uint8_t *ecc_calc = chip->ecc.calc_buf;
4030 const uint8_t *p = buf;
4031
4032 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4033 if (ret)
4034 return ret;
4035
4036 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4037 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4038
4039 ret = nand_write_data_op(chip, p, eccsize, false);
4040 if (ret)
4041 return ret;
4042
4043 chip->ecc.calculate(chip, p, &ecc_calc[i]);
4044 }
4045
4046 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4047 chip->ecc.total);
4048 if (ret)
4049 return ret;
4050
4051 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4052 if (ret)
4053 return ret;
4054
4055 return nand_prog_page_end_op(chip);
4056 }
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
4069 uint32_t data_len, const uint8_t *buf,
4070 int oob_required, int page)
4071 {
4072 struct mtd_info *mtd = nand_to_mtd(chip);
4073 uint8_t *oob_buf = chip->oob_poi;
4074 uint8_t *ecc_calc = chip->ecc.calc_buf;
4075 int ecc_size = chip->ecc.size;
4076 int ecc_bytes = chip->ecc.bytes;
4077 int ecc_steps = chip->ecc.steps;
4078 uint32_t start_step = offset / ecc_size;
4079 uint32_t end_step = (offset + data_len - 1) / ecc_size;
4080 int oob_bytes = mtd->oobsize / ecc_steps;
4081 int step, ret;
4082
4083 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4084 if (ret)
4085 return ret;
4086
4087 for (step = 0; step < ecc_steps; step++) {
4088
4089 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4090
4091
4092 ret = nand_write_data_op(chip, buf, ecc_size, false);
4093 if (ret)
4094 return ret;
4095
4096
4097 if ((step < start_step) || (step > end_step))
4098 memset(ecc_calc, 0xff, ecc_bytes);
4099 else
4100 chip->ecc.calculate(chip, buf, ecc_calc);
4101
4102
4103
4104 if (!oob_required || (step < start_step) || (step > end_step))
4105 memset(oob_buf, 0xff, oob_bytes);
4106
4107 buf += ecc_size;
4108 ecc_calc += ecc_bytes;
4109 oob_buf += oob_bytes;
4110 }
4111
4112
4113
4114 ecc_calc = chip->ecc.calc_buf;
4115 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
4116 chip->ecc.total);
4117 if (ret)
4118 return ret;
4119
4120
4121 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
4122 if (ret)
4123 return ret;
4124
4125 return nand_prog_page_end_op(chip);
4126 }
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
4140 int oob_required, int page)
4141 {
4142 struct mtd_info *mtd = nand_to_mtd(chip);
4143 int i, eccsize = chip->ecc.size;
4144 int eccbytes = chip->ecc.bytes;
4145 int eccsteps = chip->ecc.steps;
4146 const uint8_t *p = buf;
4147 uint8_t *oob = chip->oob_poi;
4148 int ret;
4149
4150 ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
4151 if (ret)
4152 return ret;
4153
4154 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
4155 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
4156
4157 ret = nand_write_data_op(chip, p, eccsize, false);
4158 if (ret)
4159 return ret;
4160
4161 if (chip->ecc.prepad) {
4162 ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
4163 false);
4164 if (ret)
4165 return ret;
4166
4167 oob += chip->ecc.prepad;
4168 }
4169
4170 chip->ecc.calculate(chip, p, oob);
4171
4172 ret = nand_write_data_op(chip, oob, eccbytes, false);
4173 if (ret)
4174 return ret;
4175
4176 oob += eccbytes;
4177
4178 if (chip->ecc.postpad) {
4179 ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
4180 false);
4181 if (ret)
4182 return ret;
4183
4184 oob += chip->ecc.postpad;
4185 }
4186 }
4187
4188
4189 i = mtd->oobsize - (oob - chip->oob_poi);
4190 if (i) {
4191 ret = nand_write_data_op(chip, oob, i, false);
4192 if (ret)
4193 return ret;
4194 }
4195
4196 return nand_prog_page_end_op(chip);
4197 }
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
4210 int data_len, const uint8_t *buf, int oob_required,
4211 int page, int raw)
4212 {
4213 struct mtd_info *mtd = nand_to_mtd(chip);
4214 int status, subpage;
4215
4216 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
4217 chip->ecc.write_subpage)
4218 subpage = offset || (data_len < mtd->writesize);
4219 else
4220 subpage = 0;
4221
4222 if (unlikely(raw))
4223 status = chip->ecc.write_page_raw(chip, buf, oob_required,
4224 page);
4225 else if (subpage)
4226 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
4227 oob_required, page);
4228 else
4229 status = chip->ecc.write_page(chip, buf, oob_required, page);
4230
4231 if (status < 0)
4232 return status;
4233
4234 return 0;
4235 }
4236
4237 #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
4248 struct mtd_oob_ops *ops)
4249 {
4250 struct mtd_info *mtd = nand_to_mtd(chip);
4251 int chipnr, realpage, page, column;
4252 uint32_t writelen = ops->len;
4253
4254 uint32_t oobwritelen = ops->ooblen;
4255 uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
4256
4257 uint8_t *oob = ops->oobbuf;
4258 uint8_t *buf = ops->datbuf;
4259 int ret;
4260 int oob_required = oob ? 1 : 0;
4261
4262 ops->retlen = 0;
4263 if (!writelen)
4264 return 0;
4265
4266
4267 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
4268 pr_notice("%s: attempt to write non page aligned data\n",
4269 __func__);
4270 return -EINVAL;
4271 }
4272
4273
4274 if (nand_region_is_secured(chip, to, writelen))
4275 return -EIO;
4276
4277 column = to & (mtd->writesize - 1);
4278
4279 chipnr = (int)(to >> chip->chip_shift);
4280 nand_select_target(chip, chipnr);
4281
4282
4283 if (nand_check_wp(chip)) {
4284 ret = -EIO;
4285 goto err_out;
4286 }
4287
4288 realpage = (int)(to >> chip->page_shift);
4289 page = realpage & chip->pagemask;
4290
4291
4292 if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
4293 ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
4294 chip->pagecache.page = -1;
4295
4296
4297 if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4298 ret = -EINVAL;
4299 goto err_out;
4300 }
4301
4302 while (1) {
4303 int bytes = mtd->writesize;
4304 uint8_t *wbuf = buf;
4305 int use_bounce_buf;
4306 int part_pagewr = (column || writelen < mtd->writesize);
4307
4308 if (part_pagewr)
4309 use_bounce_buf = 1;
4310 else if (chip->options & NAND_USES_DMA)
4311 use_bounce_buf = !virt_addr_valid(buf) ||
4312 !IS_ALIGNED((unsigned long)buf,
4313 chip->buf_align);
4314 else
4315 use_bounce_buf = 0;
4316
4317
4318
4319
4320
4321 if (use_bounce_buf) {
4322 pr_debug("%s: using write bounce buffer for buf@%p\n",
4323 __func__, buf);
4324 if (part_pagewr)
4325 bytes = min_t(int, bytes - column, writelen);
4326 wbuf = nand_get_data_buf(chip);
4327 memset(wbuf, 0xff, mtd->writesize);
4328 memcpy(&wbuf[column], buf, bytes);
4329 }
4330
4331 if (unlikely(oob)) {
4332 size_t len = min(oobwritelen, oobmaxlen);
4333 oob = nand_fill_oob(chip, oob, len, ops);
4334 oobwritelen -= len;
4335 } else {
4336
4337 memset(chip->oob_poi, 0xff, mtd->oobsize);
4338 }
4339
4340 ret = nand_write_page(chip, column, bytes, wbuf,
4341 oob_required, page,
4342 (ops->mode == MTD_OPS_RAW));
4343 if (ret)
4344 break;
4345
4346 writelen -= bytes;
4347 if (!writelen)
4348 break;
4349
4350 column = 0;
4351 buf += bytes;
4352 realpage++;
4353
4354 page = realpage & chip->pagemask;
4355
4356 if (!page) {
4357 chipnr++;
4358 nand_deselect_target(chip);
4359 nand_select_target(chip, chipnr);
4360 }
4361 }
4362
4363 ops->retlen = ops->len - writelen;
4364 if (unlikely(oob))
4365 ops->oobretlen = ops->ooblen;
4366
4367 err_out:
4368 nand_deselect_target(chip);
4369 return ret;
4370 }
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4384 size_t *retlen, const uint8_t *buf)
4385 {
4386 struct nand_chip *chip = mtd_to_nand(mtd);
4387 int chipnr = (int)(to >> chip->chip_shift);
4388 struct mtd_oob_ops ops;
4389 int ret;
4390
4391 nand_select_target(chip, chipnr);
4392
4393
4394 panic_nand_wait(chip, 400);
4395
4396 memset(&ops, 0, sizeof(ops));
4397 ops.len = len;
4398 ops.datbuf = (uint8_t *)buf;
4399 ops.mode = MTD_OPS_PLACE_OOB;
4400
4401 ret = nand_do_write_ops(chip, to, &ops);
4402
4403 *retlen = ops.retlen;
4404 return ret;
4405 }
4406
4407
4408
4409
4410
4411
4412
4413 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4414 struct mtd_oob_ops *ops)
4415 {
4416 struct nand_chip *chip = mtd_to_nand(mtd);
4417 int ret = 0;
4418
4419 ops->retlen = 0;
4420
4421 nand_get_device(chip);
4422
4423 switch (ops->mode) {
4424 case MTD_OPS_PLACE_OOB:
4425 case MTD_OPS_AUTO_OOB:
4426 case MTD_OPS_RAW:
4427 break;
4428
4429 default:
4430 goto out;
4431 }
4432
4433 if (!ops->datbuf)
4434 ret = nand_do_write_oob(chip, to, ops);
4435 else
4436 ret = nand_do_write_ops(chip, to, ops);
4437
4438 out:
4439 nand_release_device(chip);
4440 return ret;
4441 }
4442
4443
4444
4445
4446
4447
4448
4449
4450 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4451 {
4452 return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4453 }
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4464 int allowbbt)
4465 {
4466 int page, pages_per_block, ret, chipnr;
4467 loff_t len;
4468
4469 pr_debug("%s: start = 0x%012llx, len = %llu\n",
4470 __func__, (unsigned long long)instr->addr,
4471 (unsigned long long)instr->len);
4472
4473 if (check_offs_len(chip, instr->addr, instr->len))
4474 return -EINVAL;
4475
4476
4477 if (nand_region_is_secured(chip, instr->addr, instr->len))
4478 return -EIO;
4479
4480
4481 nand_get_device(chip);
4482
4483
4484 page = (int)(instr->addr >> chip->page_shift);
4485 chipnr = (int)(instr->addr >> chip->chip_shift);
4486
4487
4488 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4489
4490
4491 nand_select_target(chip, chipnr);
4492
4493
4494 if (nand_check_wp(chip)) {
4495 pr_debug("%s: device is write protected!\n",
4496 __func__);
4497 ret = -EIO;
4498 goto erase_exit;
4499 }
4500
4501
4502 len = instr->len;
4503
4504 while (len) {
4505 loff_t ofs = (loff_t)page << chip->page_shift;
4506
4507
4508 if (nand_block_checkbad(chip, ((loff_t) page) <<
4509 chip->page_shift, allowbbt)) {
4510 pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
4511 __func__, (unsigned long long)ofs);
4512 ret = -EIO;
4513 goto erase_exit;
4514 }
4515
4516
4517
4518
4519
4520 if (page <= chip->pagecache.page && chip->pagecache.page <
4521 (page + pages_per_block))
4522 chip->pagecache.page = -1;
4523
4524 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4525 (chip->phys_erase_shift - chip->page_shift));
4526 if (ret) {
4527 pr_debug("%s: failed erase, page 0x%08x\n",
4528 __func__, page);
4529 instr->fail_addr = ofs;
4530 goto erase_exit;
4531 }
4532
4533
4534 len -= (1ULL << chip->phys_erase_shift);
4535 page += pages_per_block;
4536
4537
4538 if (len && !(page & chip->pagemask)) {
4539 chipnr++;
4540 nand_deselect_target(chip);
4541 nand_select_target(chip, chipnr);
4542 }
4543 }
4544
4545 ret = 0;
4546 erase_exit:
4547
4548
4549 nand_deselect_target(chip);
4550 nand_release_device(chip);
4551
4552
4553 return ret;
4554 }
4555
4556
4557
4558
4559
4560
4561
4562 static void nand_sync(struct mtd_info *mtd)
4563 {
4564 struct nand_chip *chip = mtd_to_nand(mtd);
4565
4566 pr_debug("%s: called\n", __func__);
4567
4568
4569 nand_get_device(chip);
4570
4571 nand_release_device(chip);
4572 }
4573
4574
4575
4576
4577
4578
4579 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4580 {
4581 struct nand_chip *chip = mtd_to_nand(mtd);
4582 int chipnr = (int)(offs >> chip->chip_shift);
4583 int ret;
4584
4585
4586 nand_get_device(chip);
4587
4588 nand_select_target(chip, chipnr);
4589
4590 ret = nand_block_checkbad(chip, offs, 0);
4591
4592 nand_deselect_target(chip);
4593 nand_release_device(chip);
4594
4595 return ret;
4596 }
4597
4598
4599
4600
4601
4602
4603 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4604 {
4605 int ret;
4606
4607 ret = nand_block_isbad(mtd, ofs);
4608 if (ret) {
4609
4610 if (ret > 0)
4611 return 0;
4612 return ret;
4613 }
4614
4615 return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4616 }
4617
4618
4619
4620
4621
4622
4623
4624 static int nand_suspend(struct mtd_info *mtd)
4625 {
4626 struct nand_chip *chip = mtd_to_nand(mtd);
4627 int ret = 0;
4628
4629 mutex_lock(&chip->lock);
4630 if (chip->ops.suspend)
4631 ret = chip->ops.suspend(chip);
4632 if (!ret)
4633 chip->suspended = 1;
4634 mutex_unlock(&chip->lock);
4635
4636 return ret;
4637 }
4638
4639
4640
4641
4642
4643 static void nand_resume(struct mtd_info *mtd)
4644 {
4645 struct nand_chip *chip = mtd_to_nand(mtd);
4646
4647 mutex_lock(&chip->lock);
4648 if (chip->suspended) {
4649 if (chip->ops.resume)
4650 chip->ops.resume(chip);
4651 chip->suspended = 0;
4652 } else {
4653 pr_err("%s called for a chip which is not in suspended state\n",
4654 __func__);
4655 }
4656 mutex_unlock(&chip->lock);
4657
4658 wake_up_all(&chip->resume_wq);
4659 }
4660
4661
4662
4663
4664
4665
4666 static void nand_shutdown(struct mtd_info *mtd)
4667 {
4668 nand_suspend(mtd);
4669 }
4670
4671
4672
4673
4674
4675
4676
4677 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4678 {
4679 struct nand_chip *chip = mtd_to_nand(mtd);
4680
4681 if (!chip->ops.lock_area)
4682 return -ENOTSUPP;
4683
4684 return chip->ops.lock_area(chip, ofs, len);
4685 }
4686
4687
4688
4689
4690
4691
4692
4693 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4694 {
4695 struct nand_chip *chip = mtd_to_nand(mtd);
4696
4697 if (!chip->ops.unlock_area)
4698 return -ENOTSUPP;
4699
4700 return chip->ops.unlock_area(chip, ofs, len);
4701 }
4702
4703
4704 static void nand_set_defaults(struct nand_chip *chip)
4705 {
4706
4707 if (!chip->controller) {
4708 chip->controller = &chip->legacy.dummy_controller;
4709 nand_controller_init(chip->controller);
4710 }
4711
4712 nand_legacy_set_defaults(chip);
4713
4714 if (!chip->buf_align)
4715 chip->buf_align = 1;
4716 }
4717
4718
4719 void sanitize_string(uint8_t *s, size_t len)
4720 {
4721 ssize_t i;
4722
4723
4724 s[len - 1] = 0;
4725
4726
4727 for (i = 0; i < len - 1; i++) {
4728 if (s[i] < ' ' || s[i] > 127)
4729 s[i] = '?';
4730 }
4731
4732
4733 strim(s);
4734 }
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4748 {
4749 int i, j;
4750 for (i = 0; i < period; i++)
4751 for (j = i + period; j < arrlen; j += period)
4752 if (id_data[i] != id_data[j])
4753 return 0;
4754 return 1;
4755 }
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765 static int nand_id_len(u8 *id_data, int arrlen)
4766 {
4767 int last_nonzero, period;
4768
4769
4770 for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4771 if (id_data[last_nonzero])
4772 break;
4773
4774
4775 if (last_nonzero < 0)
4776 return 0;
4777
4778
4779 for (period = 1; period < arrlen; period++)
4780 if (nand_id_has_period(id_data, arrlen, period))
4781 break;
4782
4783
4784 if (period < arrlen)
4785 return period;
4786
4787
4788 if (last_nonzero < arrlen - 1)
4789 return last_nonzero + 1;
4790
4791
4792 return arrlen;
4793 }
4794
4795
4796 static int nand_get_bits_per_cell(u8 cellinfo)
4797 {
4798 int bits;
4799
4800 bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4801 bits >>= NAND_CI_CELLTYPE_SHIFT;
4802 return bits + 1;
4803 }
4804
4805
4806
4807
4808
4809
4810 void nand_decode_ext_id(struct nand_chip *chip)
4811 {
4812 struct nand_memory_organization *memorg;
4813 struct mtd_info *mtd = nand_to_mtd(chip);
4814 int extid;
4815 u8 *id_data = chip->id.data;
4816
4817 memorg = nanddev_get_memorg(&chip->base);
4818
4819
4820 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4821
4822 extid = id_data[3];
4823
4824
4825 memorg->pagesize = 1024 << (extid & 0x03);
4826 mtd->writesize = memorg->pagesize;
4827 extid >>= 2;
4828
4829 memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4830 mtd->oobsize = memorg->oobsize;
4831 extid >>= 2;
4832
4833 memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4834 memorg->pagesize;
4835 mtd->erasesize = (64 * 1024) << (extid & 0x03);
4836 extid >>= 2;
4837
4838 if (extid & 0x1)
4839 chip->options |= NAND_BUSWIDTH_16;
4840 }
4841 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4842
4843
4844
4845
4846
4847
4848 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4849 {
4850 struct mtd_info *mtd = nand_to_mtd(chip);
4851 struct nand_memory_organization *memorg;
4852
4853 memorg = nanddev_get_memorg(&chip->base);
4854
4855 memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4856 mtd->erasesize = type->erasesize;
4857 memorg->pagesize = type->pagesize;
4858 mtd->writesize = memorg->pagesize;
4859 memorg->oobsize = memorg->pagesize / 32;
4860 mtd->oobsize = memorg->oobsize;
4861
4862
4863 memorg->bits_per_cell = 1;
4864 }
4865
4866
4867
4868
4869
4870
4871 static void nand_decode_bbm_options(struct nand_chip *chip)
4872 {
4873 struct mtd_info *mtd = nand_to_mtd(chip);
4874
4875
4876 if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4877 chip->badblockpos = NAND_BBM_POS_LARGE;
4878 else
4879 chip->badblockpos = NAND_BBM_POS_SMALL;
4880 }
4881
4882 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4883 {
4884 return type->id_len;
4885 }
4886
4887 static bool find_full_id_nand(struct nand_chip *chip,
4888 struct nand_flash_dev *type)
4889 {
4890 struct nand_device *base = &chip->base;
4891 struct nand_ecc_props requirements;
4892 struct mtd_info *mtd = nand_to_mtd(chip);
4893 struct nand_memory_organization *memorg;
4894 u8 *id_data = chip->id.data;
4895
4896 memorg = nanddev_get_memorg(&chip->base);
4897
4898 if (!strncmp(type->id, id_data, type->id_len)) {
4899 memorg->pagesize = type->pagesize;
4900 mtd->writesize = memorg->pagesize;
4901 memorg->pages_per_eraseblock = type->erasesize /
4902 type->pagesize;
4903 mtd->erasesize = type->erasesize;
4904 memorg->oobsize = type->oobsize;
4905 mtd->oobsize = memorg->oobsize;
4906
4907 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4908 memorg->eraseblocks_per_lun =
4909 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4910 memorg->pagesize *
4911 memorg->pages_per_eraseblock);
4912 chip->options |= type->options;
4913 requirements.strength = NAND_ECC_STRENGTH(type);
4914 requirements.step_size = NAND_ECC_STEP(type);
4915 nanddev_set_ecc_requirements(base, &requirements);
4916
4917 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4918 if (!chip->parameters.model)
4919 return false;
4920
4921 return true;
4922 }
4923 return false;
4924 }
4925
4926
4927
4928
4929
4930
4931 static void nand_manufacturer_detect(struct nand_chip *chip)
4932 {
4933
4934
4935
4936
4937 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4938 chip->manufacturer.desc->ops->detect) {
4939 struct nand_memory_organization *memorg;
4940
4941 memorg = nanddev_get_memorg(&chip->base);
4942
4943
4944 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4945 chip->manufacturer.desc->ops->detect(chip);
4946 } else {
4947 nand_decode_ext_id(chip);
4948 }
4949 }
4950
4951
4952
4953
4954
4955
4956
4957 static int nand_manufacturer_init(struct nand_chip *chip)
4958 {
4959 if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4960 !chip->manufacturer.desc->ops->init)
4961 return 0;
4962
4963 return chip->manufacturer.desc->ops->init(chip);
4964 }
4965
4966
4967
4968
4969
4970
4971
4972 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4973 {
4974
4975 if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4976 chip->manufacturer.desc->ops->cleanup)
4977 chip->manufacturer.desc->ops->cleanup(chip);
4978 }
4979
4980 static const char *
4981 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4982 {
4983 return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4984 }
4985
4986
4987
4988
4989 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4990 {
4991 const struct nand_manufacturer_desc *manufacturer_desc;
4992 struct mtd_info *mtd = nand_to_mtd(chip);
4993 struct nand_memory_organization *memorg;
4994 int busw, ret;
4995 u8 *id_data = chip->id.data;
4996 u8 maf_id, dev_id;
4997 u64 targetsize;
4998
4999
5000
5001
5002
5003 memorg = nanddev_get_memorg(&chip->base);
5004 memorg->planes_per_lun = 1;
5005 memorg->luns_per_target = 1;
5006
5007
5008
5009
5010
5011 ret = nand_reset(chip, 0);
5012 if (ret)
5013 return ret;
5014
5015
5016 nand_select_target(chip, 0);
5017
5018
5019 ret = nand_readid_op(chip, 0, id_data, 2);
5020 if (ret)
5021 return ret;
5022
5023
5024 maf_id = id_data[0];
5025 dev_id = id_data[1];
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035 ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
5036 if (ret)
5037 return ret;
5038
5039 if (id_data[0] != maf_id || id_data[1] != dev_id) {
5040 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
5041 maf_id, dev_id, id_data[0], id_data[1]);
5042 return -ENODEV;
5043 }
5044
5045 chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
5046
5047
5048 manufacturer_desc = nand_get_manufacturer_desc(maf_id);
5049 chip->manufacturer.desc = manufacturer_desc;
5050
5051 if (!type)
5052 type = nand_flash_ids;
5053
5054
5055
5056
5057
5058
5059
5060
5061 busw = chip->options & NAND_BUSWIDTH_16;
5062
5063
5064
5065
5066
5067 chip->options &= ~NAND_BUSWIDTH_16;
5068
5069 for (; type->name != NULL; type++) {
5070 if (is_full_id_nand(type)) {
5071 if (find_full_id_nand(chip, type))
5072 goto ident_done;
5073 } else if (dev_id == type->dev_id) {
5074 break;
5075 }
5076 }
5077
5078 if (!type->name || !type->pagesize) {
5079
5080 ret = nand_onfi_detect(chip);
5081 if (ret < 0)
5082 return ret;
5083 else if (ret)
5084 goto ident_done;
5085
5086
5087 ret = nand_jedec_detect(chip);
5088 if (ret < 0)
5089 return ret;
5090 else if (ret)
5091 goto ident_done;
5092 }
5093
5094 if (!type->name)
5095 return -ENODEV;
5096
5097 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
5098 if (!chip->parameters.model)
5099 return -ENOMEM;
5100
5101 if (!type->pagesize)
5102 nand_manufacturer_detect(chip);
5103 else
5104 nand_decode_id(chip, type);
5105
5106
5107 chip->options |= type->options;
5108
5109 memorg->eraseblocks_per_lun =
5110 DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
5111 memorg->pagesize *
5112 memorg->pages_per_eraseblock);
5113
5114 ident_done:
5115 if (!mtd->name)
5116 mtd->name = chip->parameters.model;
5117
5118 if (chip->options & NAND_BUSWIDTH_AUTO) {
5119 WARN_ON(busw & NAND_BUSWIDTH_16);
5120 nand_set_defaults(chip);
5121 } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
5122
5123
5124
5125
5126 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5127 maf_id, dev_id);
5128 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5129 mtd->name);
5130 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
5131 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
5132 ret = -EINVAL;
5133
5134 goto free_detect_allocation;
5135 }
5136
5137 nand_decode_bbm_options(chip);
5138
5139
5140 chip->page_shift = ffs(mtd->writesize) - 1;
5141
5142 targetsize = nanddev_target_size(&chip->base);
5143 chip->pagemask = (targetsize >> chip->page_shift) - 1;
5144
5145 chip->bbt_erase_shift = chip->phys_erase_shift =
5146 ffs(mtd->erasesize) - 1;
5147 if (targetsize & 0xffffffff)
5148 chip->chip_shift = ffs((unsigned)targetsize) - 1;
5149 else {
5150 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
5151 chip->chip_shift += 32 - 1;
5152 }
5153
5154 if (chip->chip_shift - chip->page_shift > 16)
5155 chip->options |= NAND_ROW_ADDR_3;
5156
5157 chip->badblockbits = 8;
5158
5159 nand_legacy_adjust_cmdfunc(chip);
5160
5161 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
5162 maf_id, dev_id);
5163 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
5164 chip->parameters.model);
5165 pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
5166 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
5167 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
5168 return 0;
5169
5170 free_detect_allocation:
5171 kfree(chip->parameters.model);
5172
5173 return ret;
5174 }
5175
5176 static enum nand_ecc_engine_type
5177 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
5178 {
5179 enum nand_ecc_legacy_mode {
5180 NAND_ECC_INVALID,
5181 NAND_ECC_NONE,
5182 NAND_ECC_SOFT,
5183 NAND_ECC_SOFT_BCH,
5184 NAND_ECC_HW,
5185 NAND_ECC_HW_SYNDROME,
5186 NAND_ECC_ON_DIE,
5187 };
5188 const char * const nand_ecc_legacy_modes[] = {
5189 [NAND_ECC_NONE] = "none",
5190 [NAND_ECC_SOFT] = "soft",
5191 [NAND_ECC_SOFT_BCH] = "soft_bch",
5192 [NAND_ECC_HW] = "hw",
5193 [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
5194 [NAND_ECC_ON_DIE] = "on-die",
5195 };
5196 enum nand_ecc_legacy_mode eng_type;
5197 const char *pm;
5198 int err;
5199
5200 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5201 if (err)
5202 return NAND_ECC_ENGINE_TYPE_INVALID;
5203
5204 for (eng_type = NAND_ECC_NONE;
5205 eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
5206 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
5207 switch (eng_type) {
5208 case NAND_ECC_NONE:
5209 return NAND_ECC_ENGINE_TYPE_NONE;
5210 case NAND_ECC_SOFT:
5211 case NAND_ECC_SOFT_BCH:
5212 return NAND_ECC_ENGINE_TYPE_SOFT;
5213 case NAND_ECC_HW:
5214 case NAND_ECC_HW_SYNDROME:
5215 return NAND_ECC_ENGINE_TYPE_ON_HOST;
5216 case NAND_ECC_ON_DIE:
5217 return NAND_ECC_ENGINE_TYPE_ON_DIE;
5218 default:
5219 break;
5220 }
5221 }
5222 }
5223
5224 return NAND_ECC_ENGINE_TYPE_INVALID;
5225 }
5226
5227 static enum nand_ecc_placement
5228 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
5229 {
5230 const char *pm;
5231 int err;
5232
5233 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5234 if (!err) {
5235 if (!strcasecmp(pm, "hw_syndrome"))
5236 return NAND_ECC_PLACEMENT_INTERLEAVED;
5237 }
5238
5239 return NAND_ECC_PLACEMENT_UNKNOWN;
5240 }
5241
5242 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
5243 {
5244 const char *pm;
5245 int err;
5246
5247 err = of_property_read_string(np, "nand-ecc-mode", &pm);
5248 if (!err) {
5249 if (!strcasecmp(pm, "soft"))
5250 return NAND_ECC_ALGO_HAMMING;
5251 else if (!strcasecmp(pm, "soft_bch"))
5252 return NAND_ECC_ALGO_BCH;
5253 }
5254
5255 return NAND_ECC_ALGO_UNKNOWN;
5256 }
5257
5258 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
5259 {
5260 struct device_node *dn = nand_get_flash_node(chip);
5261 struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
5262
5263 if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5264 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
5265
5266 if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
5267 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
5268
5269 if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
5270 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
5271 }
5272
5273 static int of_get_nand_bus_width(struct nand_chip *chip)
5274 {
5275 struct device_node *dn = nand_get_flash_node(chip);
5276 u32 val;
5277 int ret;
5278
5279 ret = of_property_read_u32(dn, "nand-bus-width", &val);
5280 if (ret == -EINVAL)
5281
5282 return 0;
5283 else if (ret)
5284 return ret;
5285
5286 if (val == 16)
5287 chip->options |= NAND_BUSWIDTH_16;
5288 else if (val != 8)
5289 return -EINVAL;
5290 return 0;
5291 }
5292
5293 static int of_get_nand_secure_regions(struct nand_chip *chip)
5294 {
5295 struct device_node *dn = nand_get_flash_node(chip);
5296 struct property *prop;
5297 int nr_elem, i, j;
5298
5299
5300 prop = of_find_property(dn, "secure-regions", NULL);
5301 if (!prop)
5302 return 0;
5303
5304 nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
5305 if (nr_elem <= 0)
5306 return nr_elem;
5307
5308 chip->nr_secure_regions = nr_elem / 2;
5309 chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
5310 GFP_KERNEL);
5311 if (!chip->secure_regions)
5312 return -ENOMEM;
5313
5314 for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
5315 of_property_read_u64_index(dn, "secure-regions", j,
5316 &chip->secure_regions[i].offset);
5317 of_property_read_u64_index(dn, "secure-regions", j + 1,
5318 &chip->secure_regions[i].size);
5319 }
5320
5321 return 0;
5322 }
5323
5324
5325
5326
5327
5328
5329
5330
5331 int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
5332 unsigned int *ncs_array)
5333 {
5334 struct device_node *np = dev->of_node;
5335 struct gpio_desc **descs;
5336 int ndescs, i;
5337
5338 ndescs = of_gpio_named_count(np, "cs-gpios");
5339 if (ndescs < 0) {
5340 dev_dbg(dev, "No valid cs-gpios property\n");
5341 return 0;
5342 }
5343
5344 descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
5345 if (!descs)
5346 return -ENOMEM;
5347
5348 for (i = 0; i < ndescs; i++) {
5349 descs[i] = gpiod_get_index_optional(dev, "cs", i,
5350 GPIOD_OUT_HIGH);
5351 if (IS_ERR(descs[i]))
5352 return PTR_ERR(descs[i]);
5353 }
5354
5355 *ncs_array = ndescs;
5356 *cs_array = descs;
5357
5358 return 0;
5359 }
5360 EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
5361
5362 static int rawnand_dt_init(struct nand_chip *chip)
5363 {
5364 struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
5365 struct device_node *dn = nand_get_flash_node(chip);
5366 int ret;
5367
5368 if (!dn)
5369 return 0;
5370
5371 ret = of_get_nand_bus_width(chip);
5372 if (ret)
5373 return ret;
5374
5375 if (of_property_read_bool(dn, "nand-is-boot-medium"))
5376 chip->options |= NAND_IS_BOOT_MEDIUM;
5377
5378 if (of_property_read_bool(dn, "nand-on-flash-bbt"))
5379 chip->bbt_options |= NAND_BBT_USE_FLASH;
5380
5381 of_get_nand_ecc_user_config(nand);
5382 of_get_nand_ecc_legacy_user_config(chip);
5383
5384
5385
5386
5387
5388 nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5389
5390
5391
5392
5393
5394
5395 if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5396 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5397 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5398 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5399
5400 chip->ecc.placement = nand->ecc.user_conf.placement;
5401 chip->ecc.algo = nand->ecc.user_conf.algo;
5402 chip->ecc.strength = nand->ecc.user_conf.strength;
5403 chip->ecc.size = nand->ecc.user_conf.step_size;
5404
5405 return 0;
5406 }
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5423 struct nand_flash_dev *table)
5424 {
5425 struct mtd_info *mtd = nand_to_mtd(chip);
5426 struct nand_memory_organization *memorg;
5427 int nand_maf_id, nand_dev_id;
5428 unsigned int i;
5429 int ret;
5430
5431 memorg = nanddev_get_memorg(&chip->base);
5432
5433
5434 chip->cur_cs = -1;
5435
5436 mutex_init(&chip->lock);
5437 init_waitqueue_head(&chip->resume_wq);
5438
5439
5440 chip->current_interface_config = nand_get_reset_interface_config();
5441
5442 ret = rawnand_dt_init(chip);
5443 if (ret)
5444 return ret;
5445
5446 if (!mtd->name && mtd->dev.parent)
5447 mtd->name = dev_name(mtd->dev.parent);
5448
5449
5450 nand_set_defaults(chip);
5451
5452 ret = nand_legacy_check_hooks(chip);
5453 if (ret)
5454 return ret;
5455
5456 memorg->ntargets = maxchips;
5457
5458
5459 ret = nand_detect(chip, table);
5460 if (ret) {
5461 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5462 pr_warn("No NAND device found\n");
5463 nand_deselect_target(chip);
5464 return ret;
5465 }
5466
5467 nand_maf_id = chip->id.data[0];
5468 nand_dev_id = chip->id.data[1];
5469
5470 nand_deselect_target(chip);
5471
5472
5473 for (i = 1; i < maxchips; i++) {
5474 u8 id[2];
5475
5476
5477 ret = nand_reset(chip, i);
5478 if (ret)
5479 break;
5480
5481 nand_select_target(chip, i);
5482
5483 ret = nand_readid_op(chip, 0, id, sizeof(id));
5484 if (ret)
5485 break;
5486
5487 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5488 nand_deselect_target(chip);
5489 break;
5490 }
5491 nand_deselect_target(chip);
5492 }
5493 if (i > 1)
5494 pr_info("%d chips detected\n", i);
5495
5496
5497 memorg->ntargets = i;
5498 mtd->size = i * nanddev_target_size(&chip->base);
5499
5500 return 0;
5501 }
5502
5503 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5504 {
5505 kfree(chip->parameters.model);
5506 kfree(chip->parameters.onfi);
5507 }
5508
5509 int rawnand_sw_hamming_init(struct nand_chip *chip)
5510 {
5511 struct nand_ecc_sw_hamming_conf *engine_conf;
5512 struct nand_device *base = &chip->base;
5513 int ret;
5514
5515 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5516 base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
5517 base->ecc.user_conf.strength = chip->ecc.strength;
5518 base->ecc.user_conf.step_size = chip->ecc.size;
5519
5520 ret = nand_ecc_sw_hamming_init_ctx(base);
5521 if (ret)
5522 return ret;
5523
5524 engine_conf = base->ecc.ctx.priv;
5525
5526 if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
5527 engine_conf->sm_order = true;
5528
5529 chip->ecc.size = base->ecc.ctx.conf.step_size;
5530 chip->ecc.strength = base->ecc.ctx.conf.strength;
5531 chip->ecc.total = base->ecc.ctx.total;
5532 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5533 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5534
5535 return 0;
5536 }
5537 EXPORT_SYMBOL(rawnand_sw_hamming_init);
5538
5539 int rawnand_sw_hamming_calculate(struct nand_chip *chip,
5540 const unsigned char *buf,
5541 unsigned char *code)
5542 {
5543 struct nand_device *base = &chip->base;
5544
5545 return nand_ecc_sw_hamming_calculate(base, buf, code);
5546 }
5547 EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
5548
5549 int rawnand_sw_hamming_correct(struct nand_chip *chip,
5550 unsigned char *buf,
5551 unsigned char *read_ecc,
5552 unsigned char *calc_ecc)
5553 {
5554 struct nand_device *base = &chip->base;
5555
5556 return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
5557 }
5558 EXPORT_SYMBOL(rawnand_sw_hamming_correct);
5559
5560 void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
5561 {
5562 struct nand_device *base = &chip->base;
5563
5564 nand_ecc_sw_hamming_cleanup_ctx(base);
5565 }
5566 EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
5567
5568 int rawnand_sw_bch_init(struct nand_chip *chip)
5569 {
5570 struct nand_device *base = &chip->base;
5571 const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
5572 int ret;
5573
5574 base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5575 base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
5576 base->ecc.user_conf.step_size = chip->ecc.size;
5577 base->ecc.user_conf.strength = chip->ecc.strength;
5578
5579 ret = nand_ecc_sw_bch_init_ctx(base);
5580 if (ret)
5581 return ret;
5582
5583 chip->ecc.size = ecc_conf->step_size;
5584 chip->ecc.strength = ecc_conf->strength;
5585 chip->ecc.total = base->ecc.ctx.total;
5586 chip->ecc.steps = nanddev_get_ecc_nsteps(base);
5587 chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
5588
5589 return 0;
5590 }
5591 EXPORT_SYMBOL(rawnand_sw_bch_init);
5592
5593 static int rawnand_sw_bch_calculate(struct nand_chip *chip,
5594 const unsigned char *buf,
5595 unsigned char *code)
5596 {
5597 struct nand_device *base = &chip->base;
5598
5599 return nand_ecc_sw_bch_calculate(base, buf, code);
5600 }
5601
5602 int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
5603 unsigned char *read_ecc, unsigned char *calc_ecc)
5604 {
5605 struct nand_device *base = &chip->base;
5606
5607 return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
5608 }
5609 EXPORT_SYMBOL(rawnand_sw_bch_correct);
5610
5611 void rawnand_sw_bch_cleanup(struct nand_chip *chip)
5612 {
5613 struct nand_device *base = &chip->base;
5614
5615 nand_ecc_sw_bch_cleanup_ctx(base);
5616 }
5617 EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
5618
5619 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5620 {
5621 struct nand_ecc_ctrl *ecc = &chip->ecc;
5622
5623 switch (ecc->placement) {
5624 case NAND_ECC_PLACEMENT_UNKNOWN:
5625 case NAND_ECC_PLACEMENT_OOB:
5626
5627 if (!ecc->read_page)
5628 ecc->read_page = nand_read_page_hwecc;
5629 if (!ecc->write_page)
5630 ecc->write_page = nand_write_page_hwecc;
5631 if (!ecc->read_page_raw)
5632 ecc->read_page_raw = nand_read_page_raw;
5633 if (!ecc->write_page_raw)
5634 ecc->write_page_raw = nand_write_page_raw;
5635 if (!ecc->read_oob)
5636 ecc->read_oob = nand_read_oob_std;
5637 if (!ecc->write_oob)
5638 ecc->write_oob = nand_write_oob_std;
5639 if (!ecc->read_subpage)
5640 ecc->read_subpage = nand_read_subpage;
5641 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5642 ecc->write_subpage = nand_write_subpage_hwecc;
5643 fallthrough;
5644
5645 case NAND_ECC_PLACEMENT_INTERLEAVED:
5646 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5647 (!ecc->read_page ||
5648 ecc->read_page == nand_read_page_hwecc ||
5649 !ecc->write_page ||
5650 ecc->write_page == nand_write_page_hwecc)) {
5651 WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5652 return -EINVAL;
5653 }
5654
5655 if (!ecc->read_page)
5656 ecc->read_page = nand_read_page_syndrome;
5657 if (!ecc->write_page)
5658 ecc->write_page = nand_write_page_syndrome;
5659 if (!ecc->read_page_raw)
5660 ecc->read_page_raw = nand_read_page_raw_syndrome;
5661 if (!ecc->write_page_raw)
5662 ecc->write_page_raw = nand_write_page_raw_syndrome;
5663 if (!ecc->read_oob)
5664 ecc->read_oob = nand_read_oob_syndrome;
5665 if (!ecc->write_oob)
5666 ecc->write_oob = nand_write_oob_syndrome;
5667 break;
5668
5669 default:
5670 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5671 ecc->placement);
5672 return -EINVAL;
5673 }
5674
5675 return 0;
5676 }
5677
5678 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5679 {
5680 struct mtd_info *mtd = nand_to_mtd(chip);
5681 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5682 struct nand_ecc_ctrl *ecc = &chip->ecc;
5683 int ret;
5684
5685 if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5686 return -EINVAL;
5687
5688 switch (ecc->algo) {
5689 case NAND_ECC_ALGO_HAMMING:
5690 ecc->calculate = rawnand_sw_hamming_calculate;
5691 ecc->correct = rawnand_sw_hamming_correct;
5692 ecc->read_page = nand_read_page_swecc;
5693 ecc->read_subpage = nand_read_subpage;
5694 ecc->write_page = nand_write_page_swecc;
5695 if (!ecc->read_page_raw)
5696 ecc->read_page_raw = nand_read_page_raw;
5697 if (!ecc->write_page_raw)
5698 ecc->write_page_raw = nand_write_page_raw;
5699 ecc->read_oob = nand_read_oob_std;
5700 ecc->write_oob = nand_write_oob_std;
5701 if (!ecc->size)
5702 ecc->size = 256;
5703 ecc->bytes = 3;
5704 ecc->strength = 1;
5705
5706 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5707 ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5708
5709 ret = rawnand_sw_hamming_init(chip);
5710 if (ret) {
5711 WARN(1, "Hamming ECC initialization failed!\n");
5712 return ret;
5713 }
5714
5715 return 0;
5716 case NAND_ECC_ALGO_BCH:
5717 if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
5718 WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5719 return -EINVAL;
5720 }
5721 ecc->calculate = rawnand_sw_bch_calculate;
5722 ecc->correct = rawnand_sw_bch_correct;
5723 ecc->read_page = nand_read_page_swecc;
5724 ecc->read_subpage = nand_read_subpage;
5725 ecc->write_page = nand_write_page_swecc;
5726 if (!ecc->read_page_raw)
5727 ecc->read_page_raw = nand_read_page_raw;
5728 if (!ecc->write_page_raw)
5729 ecc->write_page_raw = nand_write_page_raw;
5730 ecc->read_oob = nand_read_oob_std;
5731 ecc->write_oob = nand_write_oob_std;
5732
5733
5734
5735
5736
5737
5738 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
5739 mtd->ooblayout != nand_get_large_page_ooblayout())
5740 nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
5741
5742 ret = rawnand_sw_bch_init(chip);
5743 if (ret) {
5744 WARN(1, "BCH ECC initialization failed!\n");
5745 return ret;
5746 }
5747
5748 return 0;
5749 default:
5750 WARN(1, "Unsupported ECC algorithm!\n");
5751 return -EINVAL;
5752 }
5753 }
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765 static int
5766 nand_check_ecc_caps(struct nand_chip *chip,
5767 const struct nand_ecc_caps *caps, int oobavail)
5768 {
5769 struct mtd_info *mtd = nand_to_mtd(chip);
5770 const struct nand_ecc_step_info *stepinfo;
5771 int preset_step = chip->ecc.size;
5772 int preset_strength = chip->ecc.strength;
5773 int ecc_bytes, nsteps = mtd->writesize / preset_step;
5774 int i, j;
5775
5776 for (i = 0; i < caps->nstepinfos; i++) {
5777 stepinfo = &caps->stepinfos[i];
5778
5779 if (stepinfo->stepsize != preset_step)
5780 continue;
5781
5782 for (j = 0; j < stepinfo->nstrengths; j++) {
5783 if (stepinfo->strengths[j] != preset_strength)
5784 continue;
5785
5786 ecc_bytes = caps->calc_ecc_bytes(preset_step,
5787 preset_strength);
5788 if (WARN_ON_ONCE(ecc_bytes < 0))
5789 return ecc_bytes;
5790
5791 if (ecc_bytes * nsteps > oobavail) {
5792 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5793 preset_step, preset_strength);
5794 return -ENOSPC;
5795 }
5796
5797 chip->ecc.bytes = ecc_bytes;
5798
5799 return 0;
5800 }
5801 }
5802
5803 pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5804 preset_step, preset_strength);
5805
5806 return -ENOTSUPP;
5807 }
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819 static int
5820 nand_match_ecc_req(struct nand_chip *chip,
5821 const struct nand_ecc_caps *caps, int oobavail)
5822 {
5823 const struct nand_ecc_props *requirements =
5824 nanddev_get_ecc_requirements(&chip->base);
5825 struct mtd_info *mtd = nand_to_mtd(chip);
5826 const struct nand_ecc_step_info *stepinfo;
5827 int req_step = requirements->step_size;
5828 int req_strength = requirements->strength;
5829 int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5830 int best_step, best_strength, best_ecc_bytes;
5831 int best_ecc_bytes_total = INT_MAX;
5832 int i, j;
5833
5834
5835 if (!req_step || !req_strength)
5836 return -ENOTSUPP;
5837
5838
5839 req_corr = mtd->writesize / req_step * req_strength;
5840
5841 for (i = 0; i < caps->nstepinfos; i++) {
5842 stepinfo = &caps->stepinfos[i];
5843 step_size = stepinfo->stepsize;
5844
5845 for (j = 0; j < stepinfo->nstrengths; j++) {
5846 strength = stepinfo->strengths[j];
5847
5848
5849
5850
5851
5852
5853 if (step_size < req_step && strength < req_strength)
5854 continue;
5855
5856 if (mtd->writesize % step_size)
5857 continue;
5858
5859 nsteps = mtd->writesize / step_size;
5860
5861 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5862 if (WARN_ON_ONCE(ecc_bytes < 0))
5863 continue;
5864 ecc_bytes_total = ecc_bytes * nsteps;
5865
5866 if (ecc_bytes_total > oobavail ||
5867 strength * nsteps < req_corr)
5868 continue;
5869
5870
5871
5872
5873
5874 if (ecc_bytes_total < best_ecc_bytes_total) {
5875 best_ecc_bytes_total = ecc_bytes_total;
5876 best_step = step_size;
5877 best_strength = strength;
5878 best_ecc_bytes = ecc_bytes;
5879 }
5880 }
5881 }
5882
5883 if (best_ecc_bytes_total == INT_MAX)
5884 return -ENOTSUPP;
5885
5886 chip->ecc.size = best_step;
5887 chip->ecc.strength = best_strength;
5888 chip->ecc.bytes = best_ecc_bytes;
5889
5890 return 0;
5891 }
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902 static int
5903 nand_maximize_ecc(struct nand_chip *chip,
5904 const struct nand_ecc_caps *caps, int oobavail)
5905 {
5906 struct mtd_info *mtd = nand_to_mtd(chip);
5907 const struct nand_ecc_step_info *stepinfo;
5908 int step_size, strength, nsteps, ecc_bytes, corr;
5909 int best_corr = 0;
5910 int best_step = 0;
5911 int best_strength, best_ecc_bytes;
5912 int i, j;
5913
5914 for (i = 0; i < caps->nstepinfos; i++) {
5915 stepinfo = &caps->stepinfos[i];
5916 step_size = stepinfo->stepsize;
5917
5918
5919 if (chip->ecc.size && step_size != chip->ecc.size)
5920 continue;
5921
5922 for (j = 0; j < stepinfo->nstrengths; j++) {
5923 strength = stepinfo->strengths[j];
5924
5925 if (mtd->writesize % step_size)
5926 continue;
5927
5928 nsteps = mtd->writesize / step_size;
5929
5930 ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5931 if (WARN_ON_ONCE(ecc_bytes < 0))
5932 continue;
5933
5934 if (ecc_bytes * nsteps > oobavail)
5935 continue;
5936
5937 corr = strength * nsteps;
5938
5939
5940
5941
5942
5943 if (corr > best_corr ||
5944 (corr == best_corr && step_size > best_step)) {
5945 best_corr = corr;
5946 best_step = step_size;
5947 best_strength = strength;
5948 best_ecc_bytes = ecc_bytes;
5949 }
5950 }
5951 }
5952
5953 if (!best_corr)
5954 return -ENOTSUPP;
5955
5956 chip->ecc.size = best_step;
5957 chip->ecc.strength = best_strength;
5958 chip->ecc.bytes = best_ecc_bytes;
5959
5960 return 0;
5961 }
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981 int nand_ecc_choose_conf(struct nand_chip *chip,
5982 const struct nand_ecc_caps *caps, int oobavail)
5983 {
5984 struct mtd_info *mtd = nand_to_mtd(chip);
5985 struct nand_device *nanddev = mtd_to_nanddev(mtd);
5986
5987 if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5988 return -EINVAL;
5989
5990 if (chip->ecc.size && chip->ecc.strength)
5991 return nand_check_ecc_caps(chip, caps, oobavail);
5992
5993 if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5994 return nand_maximize_ecc(chip, caps, oobavail);
5995
5996 if (!nand_match_ecc_req(chip, caps, oobavail))
5997 return 0;
5998
5999 return nand_maximize_ecc(chip, caps, oobavail);
6000 }
6001 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
6002
6003 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
6004 {
6005 struct nand_chip *chip = container_of(nand, struct nand_chip,
6006 base);
6007 unsigned int eb = nanddev_pos_to_row(nand, pos);
6008 int ret;
6009
6010 eb >>= nand->rowconv.eraseblock_addr_shift;
6011
6012 nand_select_target(chip, pos->target);
6013 ret = nand_erase_op(chip, eb);
6014 nand_deselect_target(chip);
6015
6016 return ret;
6017 }
6018
6019 static int rawnand_markbad(struct nand_device *nand,
6020 const struct nand_pos *pos)
6021 {
6022 struct nand_chip *chip = container_of(nand, struct nand_chip,
6023 base);
6024
6025 return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6026 }
6027
6028 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
6029 {
6030 struct nand_chip *chip = container_of(nand, struct nand_chip,
6031 base);
6032 int ret;
6033
6034 nand_select_target(chip, pos->target);
6035 ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
6036 nand_deselect_target(chip);
6037
6038 return ret;
6039 }
6040
6041 static const struct nand_ops rawnand_ops = {
6042 .erase = rawnand_erase,
6043 .markbad = rawnand_markbad,
6044 .isbad = rawnand_isbad,
6045 };
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055 static int nand_scan_tail(struct nand_chip *chip)
6056 {
6057 struct mtd_info *mtd = nand_to_mtd(chip);
6058 struct nand_ecc_ctrl *ecc = &chip->ecc;
6059 int ret, i;
6060
6061
6062 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
6063 !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
6064 return -EINVAL;
6065 }
6066
6067 chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
6068 if (!chip->data_buf)
6069 return -ENOMEM;
6070
6071
6072
6073
6074
6075
6076
6077 nand_select_target(chip, 0);
6078 ret = nand_manufacturer_init(chip);
6079 nand_deselect_target(chip);
6080 if (ret)
6081 goto err_free_buf;
6082
6083
6084 chip->oob_poi = chip->data_buf + mtd->writesize;
6085
6086
6087
6088
6089 if (!mtd->ooblayout &&
6090 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6091 ecc->algo == NAND_ECC_ALGO_BCH) &&
6092 !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
6093 ecc->algo == NAND_ECC_ALGO_HAMMING)) {
6094 switch (mtd->oobsize) {
6095 case 8:
6096 case 16:
6097 mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
6098 break;
6099 case 64:
6100 case 128:
6101 mtd_set_ooblayout(mtd,
6102 nand_get_large_page_hamming_ooblayout());
6103 break;
6104 default:
6105
6106
6107
6108
6109
6110
6111
6112 if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
6113 mtd_set_ooblayout(mtd,
6114 nand_get_large_page_ooblayout());
6115 break;
6116 }
6117
6118 WARN(1, "No oob scheme defined for oobsize %d\n",
6119 mtd->oobsize);
6120 ret = -EINVAL;
6121 goto err_nand_manuf_cleanup;
6122 }
6123 }
6124
6125
6126
6127
6128
6129
6130 switch (ecc->engine_type) {
6131 case NAND_ECC_ENGINE_TYPE_ON_HOST:
6132 ret = nand_set_ecc_on_host_ops(chip);
6133 if (ret)
6134 goto err_nand_manuf_cleanup;
6135
6136 if (mtd->writesize >= ecc->size) {
6137 if (!ecc->strength) {
6138 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
6139 ret = -EINVAL;
6140 goto err_nand_manuf_cleanup;
6141 }
6142 break;
6143 }
6144 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
6145 ecc->size, mtd->writesize);
6146 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
6147 ecc->algo = NAND_ECC_ALGO_HAMMING;
6148 fallthrough;
6149
6150 case NAND_ECC_ENGINE_TYPE_SOFT:
6151 ret = nand_set_ecc_soft_ops(chip);
6152 if (ret)
6153 goto err_nand_manuf_cleanup;
6154 break;
6155
6156 case NAND_ECC_ENGINE_TYPE_ON_DIE:
6157 if (!ecc->read_page || !ecc->write_page) {
6158 WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
6159 ret = -EINVAL;
6160 goto err_nand_manuf_cleanup;
6161 }
6162 if (!ecc->read_oob)
6163 ecc->read_oob = nand_read_oob_std;
6164 if (!ecc->write_oob)
6165 ecc->write_oob = nand_write_oob_std;
6166 break;
6167
6168 case NAND_ECC_ENGINE_TYPE_NONE:
6169 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
6170 ecc->read_page = nand_read_page_raw;
6171 ecc->write_page = nand_write_page_raw;
6172 ecc->read_oob = nand_read_oob_std;
6173 ecc->read_page_raw = nand_read_page_raw;
6174 ecc->write_page_raw = nand_write_page_raw;
6175 ecc->write_oob = nand_write_oob_std;
6176 ecc->size = mtd->writesize;
6177 ecc->bytes = 0;
6178 ecc->strength = 0;
6179 break;
6180
6181 default:
6182 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
6183 ret = -EINVAL;
6184 goto err_nand_manuf_cleanup;
6185 }
6186
6187 if (ecc->correct || ecc->calculate) {
6188 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6189 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
6190 if (!ecc->calc_buf || !ecc->code_buf) {
6191 ret = -ENOMEM;
6192 goto err_nand_manuf_cleanup;
6193 }
6194 }
6195
6196
6197 if (!ecc->read_oob_raw)
6198 ecc->read_oob_raw = ecc->read_oob;
6199 if (!ecc->write_oob_raw)
6200 ecc->write_oob_raw = ecc->write_oob;
6201
6202
6203 mtd->ecc_strength = ecc->strength;
6204 mtd->ecc_step_size = ecc->size;
6205
6206
6207
6208
6209
6210 if (!ecc->steps)
6211 ecc->steps = mtd->writesize / ecc->size;
6212 if (ecc->steps * ecc->size != mtd->writesize) {
6213 WARN(1, "Invalid ECC parameters\n");
6214 ret = -EINVAL;
6215 goto err_nand_manuf_cleanup;
6216 }
6217
6218 if (!ecc->total) {
6219 ecc->total = ecc->steps * ecc->bytes;
6220 chip->base.ecc.ctx.total = ecc->total;
6221 }
6222
6223 if (ecc->total > mtd->oobsize) {
6224 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
6225 ret = -EINVAL;
6226 goto err_nand_manuf_cleanup;
6227 }
6228
6229
6230
6231
6232
6233 ret = mtd_ooblayout_count_freebytes(mtd);
6234 if (ret < 0)
6235 ret = 0;
6236
6237 mtd->oobavail = ret;
6238
6239
6240 if (!nand_ecc_is_strong_enough(&chip->base))
6241 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
6242 mtd->name, chip->ecc.strength, chip->ecc.size,
6243 nanddev_get_ecc_requirements(&chip->base)->strength,
6244 nanddev_get_ecc_requirements(&chip->base)->step_size);
6245
6246
6247 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
6248 switch (ecc->steps) {
6249 case 2:
6250 mtd->subpage_sft = 1;
6251 break;
6252 case 4:
6253 case 8:
6254 case 16:
6255 mtd->subpage_sft = 2;
6256 break;
6257 }
6258 }
6259 chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
6260
6261
6262 chip->pagecache.page = -1;
6263
6264
6265 switch (ecc->engine_type) {
6266 case NAND_ECC_ENGINE_TYPE_SOFT:
6267 if (chip->page_shift > 9)
6268 chip->options |= NAND_SUBPAGE_READ;
6269 break;
6270
6271 default:
6272 break;
6273 }
6274
6275 ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
6276 if (ret)
6277 goto err_nand_manuf_cleanup;
6278
6279
6280 if (chip->options & NAND_ROM)
6281 mtd->flags = MTD_CAP_ROM;
6282
6283
6284 mtd->_erase = nand_erase;
6285 mtd->_point = NULL;
6286 mtd->_unpoint = NULL;
6287 mtd->_panic_write = panic_nand_write;
6288 mtd->_read_oob = nand_read_oob;
6289 mtd->_write_oob = nand_write_oob;
6290 mtd->_sync = nand_sync;
6291 mtd->_lock = nand_lock;
6292 mtd->_unlock = nand_unlock;
6293 mtd->_suspend = nand_suspend;
6294 mtd->_resume = nand_resume;
6295 mtd->_reboot = nand_shutdown;
6296 mtd->_block_isreserved = nand_block_isreserved;
6297 mtd->_block_isbad = nand_block_isbad;
6298 mtd->_block_markbad = nand_block_markbad;
6299 mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
6300
6301
6302
6303
6304
6305
6306 if (!mtd->bitflip_threshold)
6307 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
6308
6309
6310 ret = nand_choose_interface_config(chip);
6311 if (ret)
6312 goto err_nanddev_cleanup;
6313
6314
6315 for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
6316 ret = nand_setup_interface(chip, i);
6317 if (ret)
6318 goto err_free_interface_config;
6319 }
6320
6321
6322
6323
6324
6325
6326
6327 ret = of_get_nand_secure_regions(chip);
6328 if (ret)
6329 goto err_free_interface_config;
6330
6331
6332 if (chip->options & NAND_SKIP_BBTSCAN)
6333 return 0;
6334
6335
6336 ret = nand_create_bbt(chip);
6337 if (ret)
6338 goto err_free_secure_regions;
6339
6340 return 0;
6341
6342 err_free_secure_regions:
6343 kfree(chip->secure_regions);
6344
6345 err_free_interface_config:
6346 kfree(chip->best_interface_config);
6347
6348 err_nanddev_cleanup:
6349 nanddev_cleanup(&chip->base);
6350
6351 err_nand_manuf_cleanup:
6352 nand_manufacturer_cleanup(chip);
6353
6354 err_free_buf:
6355 kfree(chip->data_buf);
6356 kfree(ecc->code_buf);
6357 kfree(ecc->calc_buf);
6358
6359 return ret;
6360 }
6361
6362 static int nand_attach(struct nand_chip *chip)
6363 {
6364 if (chip->controller->ops && chip->controller->ops->attach_chip)
6365 return chip->controller->ops->attach_chip(chip);
6366
6367 return 0;
6368 }
6369
6370 static void nand_detach(struct nand_chip *chip)
6371 {
6372 if (chip->controller->ops && chip->controller->ops->detach_chip)
6373 chip->controller->ops->detach_chip(chip);
6374 }
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
6387 struct nand_flash_dev *ids)
6388 {
6389 int ret;
6390
6391 if (!maxchips)
6392 return -EINVAL;
6393
6394 ret = nand_scan_ident(chip, maxchips, ids);
6395 if (ret)
6396 return ret;
6397
6398 ret = nand_attach(chip);
6399 if (ret)
6400 goto cleanup_ident;
6401
6402 ret = nand_scan_tail(chip);
6403 if (ret)
6404 goto detach_chip;
6405
6406 return 0;
6407
6408 detach_chip:
6409 nand_detach(chip);
6410 cleanup_ident:
6411 nand_scan_ident_cleanup(chip);
6412
6413 return ret;
6414 }
6415 EXPORT_SYMBOL(nand_scan_with_ids);
6416
6417
6418
6419
6420
6421 void nand_cleanup(struct nand_chip *chip)
6422 {
6423 if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
6424 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
6425 rawnand_sw_hamming_cleanup(chip);
6426 else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
6427 rawnand_sw_bch_cleanup(chip);
6428 }
6429
6430 nanddev_cleanup(&chip->base);
6431
6432
6433 kfree(chip->secure_regions);
6434
6435
6436 kfree(chip->bbt);
6437 kfree(chip->data_buf);
6438 kfree(chip->ecc.code_buf);
6439 kfree(chip->ecc.calc_buf);
6440
6441
6442 if (chip->badblock_pattern && chip->badblock_pattern->options
6443 & NAND_BBT_DYNAMICSTRUCT)
6444 kfree(chip->badblock_pattern);
6445
6446
6447 kfree(chip->best_interface_config);
6448
6449
6450 nand_manufacturer_cleanup(chip);
6451
6452
6453 nand_detach(chip);
6454
6455
6456 nand_scan_ident_cleanup(chip);
6457 }
6458
6459 EXPORT_SYMBOL_GPL(nand_cleanup);
6460
6461 MODULE_LICENSE("GPL");
6462 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
6463 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
6464 MODULE_DESCRIPTION("Generic NAND flash driver code");