0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/sizes.h>
0010 #include <linux/slab.h>
0011
0012 #include "internals.h"
0013
0014 #define NAND_HYNIX_CMD_SET_PARAMS 0x36
0015 #define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
0016
0017 #define NAND_HYNIX_1XNM_RR_REPEAT 8
0018
0019
0020
0021
0022
0023
0024
0025
0026 struct hynix_read_retry {
0027 int nregs;
0028 const u8 *regs;
0029 u8 values[];
0030 };
0031
0032
0033
0034
0035
0036
0037 struct hynix_nand {
0038 const struct hynix_read_retry *read_retry;
0039 };
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct hynix_read_retry_otp {
0053 int nregs;
0054 const u8 *regs;
0055 const u8 *values;
0056 int page;
0057 int size;
0058 };
0059
0060 static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
0061 {
0062 u8 jedecid[5] = { };
0063 int ret;
0064
0065 ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
0066 if (ret)
0067 return false;
0068
0069 return !strncmp("JEDEC", jedecid, sizeof(jedecid));
0070 }
0071
0072 static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
0073 {
0074 if (nand_has_exec_op(chip)) {
0075 struct nand_op_instr instrs[] = {
0076 NAND_OP_CMD(cmd, 0),
0077 };
0078 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
0079
0080 return nand_exec_op(chip, &op);
0081 }
0082
0083 chip->legacy.cmdfunc(chip, cmd, -1, -1);
0084
0085 return 0;
0086 }
0087
0088 static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
0089 {
0090 u16 column = ((u16)addr << 8) | addr;
0091
0092 if (nand_has_exec_op(chip)) {
0093 struct nand_op_instr instrs[] = {
0094 NAND_OP_ADDR(1, &addr, 0),
0095 NAND_OP_8BIT_DATA_OUT(1, &val, 0),
0096 };
0097 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
0098
0099 return nand_exec_op(chip, &op);
0100 }
0101
0102 chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
0103 chip->legacy.write_byte(chip, val);
0104
0105 return 0;
0106 }
0107
0108 static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
0109 {
0110 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
0111 const u8 *values;
0112 int i, ret;
0113
0114 values = hynix->read_retry->values +
0115 (retry_mode * hynix->read_retry->nregs);
0116
0117
0118 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
0119 if (ret)
0120 return ret;
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131 for (i = 0; i < hynix->read_retry->nregs; i++) {
0132 ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
0133 values[i]);
0134 if (ret)
0135 return ret;
0136 }
0137
0138
0139 return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
0160 {
0161 int i, j, half = repeat / 2;
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 for (i = 0; i < half; i++) {
0172 int cnt = 0;
0173 u8 val = in[i];
0174
0175
0176 for (j = i + 1; j < repeat; j++) {
0177 if (in[j] == val)
0178 cnt++;
0179 }
0180
0181
0182 if (cnt > half) {
0183 *out = val;
0184 return 0;
0185 }
0186 }
0187
0188 return -EIO;
0189 }
0190
0191 static int hynix_read_rr_otp(struct nand_chip *chip,
0192 const struct hynix_read_retry_otp *info,
0193 void *buf)
0194 {
0195 int i, ret;
0196
0197 ret = nand_reset_op(chip);
0198 if (ret)
0199 return ret;
0200
0201 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
0202 if (ret)
0203 return ret;
0204
0205 for (i = 0; i < info->nregs; i++) {
0206 ret = hynix_nand_reg_write_op(chip, info->regs[i],
0207 info->values[i]);
0208 if (ret)
0209 return ret;
0210 }
0211
0212 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
0213 if (ret)
0214 return ret;
0215
0216
0217 ret = hynix_nand_cmd_op(chip, 0x17);
0218 if (ret)
0219 return ret;
0220
0221 ret = hynix_nand_cmd_op(chip, 0x4);
0222 if (ret)
0223 return ret;
0224
0225 ret = hynix_nand_cmd_op(chip, 0x19);
0226 if (ret)
0227 return ret;
0228
0229
0230 ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
0231 if (ret)
0232 return ret;
0233
0234
0235 ret = nand_reset_op(chip);
0236 if (ret)
0237 return ret;
0238
0239 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
0240 if (ret)
0241 return ret;
0242
0243 ret = hynix_nand_reg_write_op(chip, 0x38, 0);
0244 if (ret)
0245 return ret;
0246
0247 ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
0248 if (ret)
0249 return ret;
0250
0251 return nand_read_page_op(chip, 0, 0, NULL, 0);
0252 }
0253
0254 #define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
0255 #define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
0256 #define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
0257 (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
0258
0259 static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
0260 int mode, int reg, bool inv, u8 *val)
0261 {
0262 u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
0263 int val_offs = (mode * nregs) + reg;
0264 int set_size = nmodes * nregs;
0265 int i, ret;
0266
0267 for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
0268 int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
0269
0270 tmp[i] = buf[val_offs + set_offs];
0271 }
0272
0273 ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
0274 if (ret)
0275 return ret;
0276
0277 if (inv)
0278 *val = ~*val;
0279
0280 return 0;
0281 }
0282
0283 static u8 hynix_1xnm_mlc_read_retry_regs[] = {
0284 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
0285 };
0286
0287 static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
0288 const struct hynix_read_retry_otp *info)
0289 {
0290 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
0291 struct hynix_read_retry *rr = NULL;
0292 int ret, i, j;
0293 u8 nregs, nmodes;
0294 u8 *buf;
0295
0296 buf = kmalloc(info->size, GFP_KERNEL);
0297 if (!buf)
0298 return -ENOMEM;
0299
0300 ret = hynix_read_rr_otp(chip, info, buf);
0301 if (ret)
0302 goto out;
0303
0304 ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
0305 &nmodes);
0306 if (ret)
0307 goto out;
0308
0309 ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
0310 NAND_HYNIX_1XNM_RR_REPEAT,
0311 &nregs);
0312 if (ret)
0313 goto out;
0314
0315 rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
0316 if (!rr) {
0317 ret = -ENOMEM;
0318 goto out;
0319 }
0320
0321 for (i = 0; i < nmodes; i++) {
0322 for (j = 0; j < nregs; j++) {
0323 u8 *val = rr->values + (i * nregs);
0324
0325 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
0326 false, val);
0327 if (!ret)
0328 continue;
0329
0330 ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
0331 true, val);
0332 if (ret)
0333 goto out;
0334 }
0335 }
0336
0337 rr->nregs = nregs;
0338 rr->regs = hynix_1xnm_mlc_read_retry_regs;
0339 hynix->read_retry = rr;
0340 chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
0341 chip->read_retries = nmodes;
0342
0343 out:
0344 kfree(buf);
0345
0346 if (ret)
0347 kfree(rr);
0348
0349 return ret;
0350 }
0351
0352 static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
0353 static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
0354
0355 static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
0356 {
0357 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
0358 .regs = hynix_mlc_1xnm_rr_otp_regs,
0359 .values = hynix_mlc_1xnm_rr_otp_values,
0360 .page = 0x21f,
0361 .size = 784
0362 },
0363 {
0364 .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
0365 .regs = hynix_mlc_1xnm_rr_otp_regs,
0366 .values = hynix_mlc_1xnm_rr_otp_values,
0367 .page = 0x200,
0368 .size = 528,
0369 },
0370 };
0371
0372 static int hynix_nand_rr_init(struct nand_chip *chip)
0373 {
0374 int i, ret = 0;
0375 bool valid_jedecid;
0376
0377 valid_jedecid = hynix_nand_has_valid_jedecid(chip);
0378
0379
0380
0381
0382
0383 if (valid_jedecid) {
0384 u8 nand_tech = chip->id.data[5] >> 4;
0385
0386
0387 if (nand_tech == 4) {
0388 for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
0389 i++) {
0390
0391
0392
0393
0394 ret = hynix_mlc_1xnm_rr_init(chip,
0395 hynix_mlc_1xnm_rr_otps);
0396 if (!ret)
0397 break;
0398 }
0399 }
0400 }
0401
0402 if (ret)
0403 pr_warn("failed to initialize read-retry infrastructure");
0404
0405 return 0;
0406 }
0407
0408 static void hynix_nand_extract_oobsize(struct nand_chip *chip,
0409 bool valid_jedecid)
0410 {
0411 struct mtd_info *mtd = nand_to_mtd(chip);
0412 struct nand_memory_organization *memorg;
0413 u8 oobsize;
0414
0415 memorg = nanddev_get_memorg(&chip->base);
0416
0417 oobsize = ((chip->id.data[3] >> 2) & 0x3) |
0418 ((chip->id.data[3] >> 4) & 0x4);
0419
0420 if (valid_jedecid) {
0421 switch (oobsize) {
0422 case 0:
0423 memorg->oobsize = 2048;
0424 break;
0425 case 1:
0426 memorg->oobsize = 1664;
0427 break;
0428 case 2:
0429 memorg->oobsize = 1024;
0430 break;
0431 case 3:
0432 memorg->oobsize = 640;
0433 break;
0434 default:
0435
0436
0437
0438
0439
0440
0441 WARN(1, "Invalid OOB size");
0442 break;
0443 }
0444 } else {
0445 switch (oobsize) {
0446 case 0:
0447 memorg->oobsize = 128;
0448 break;
0449 case 1:
0450 memorg->oobsize = 224;
0451 break;
0452 case 2:
0453 memorg->oobsize = 448;
0454 break;
0455 case 3:
0456 memorg->oobsize = 64;
0457 break;
0458 case 4:
0459 memorg->oobsize = 32;
0460 break;
0461 case 5:
0462 memorg->oobsize = 16;
0463 break;
0464 case 6:
0465 memorg->oobsize = 640;
0466 break;
0467 default:
0468
0469
0470
0471
0472
0473
0474 WARN(1, "Invalid OOB size");
0475 break;
0476 }
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 if (chip->id.data[1] == 0xde)
0489 memorg->oobsize *= memorg->pagesize / SZ_8K;
0490 }
0491
0492 mtd->oobsize = memorg->oobsize;
0493 }
0494
0495 static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
0496 bool valid_jedecid)
0497 {
0498 struct nand_device *base = &chip->base;
0499 struct nand_ecc_props requirements = {};
0500 u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
0501
0502 if (valid_jedecid) {
0503
0504 requirements.step_size = 1024;
0505
0506 switch (ecc_level) {
0507 case 0:
0508 requirements.step_size = 0;
0509 requirements.strength = 0;
0510 break;
0511 case 1:
0512 requirements.strength = 4;
0513 break;
0514 case 2:
0515 requirements.strength = 24;
0516 break;
0517 case 3:
0518 requirements.strength = 32;
0519 break;
0520 case 4:
0521 requirements.strength = 40;
0522 break;
0523 case 5:
0524 requirements.strength = 50;
0525 break;
0526 case 6:
0527 requirements.strength = 60;
0528 break;
0529 default:
0530
0531
0532
0533
0534
0535
0536 WARN(1, "Invalid ECC requirements");
0537 }
0538 } else {
0539
0540
0541
0542
0543 u8 nand_tech = chip->id.data[5] & 0x7;
0544
0545 if (nand_tech < 3) {
0546
0547 if (ecc_level < 5) {
0548 requirements.step_size = 512;
0549 requirements.strength = 1 << ecc_level;
0550 } else if (ecc_level < 7) {
0551 if (ecc_level == 5)
0552 requirements.step_size = 2048;
0553 else
0554 requirements.step_size = 1024;
0555 requirements.strength = 24;
0556 } else {
0557
0558
0559
0560
0561
0562
0563 WARN(1, "Invalid ECC requirements");
0564 }
0565 } else {
0566
0567 if (!ecc_level) {
0568 requirements.step_size = 0;
0569 requirements.strength = 0;
0570 } else if (ecc_level < 5) {
0571 requirements.step_size = 512;
0572 requirements.strength = 1 << (ecc_level - 1);
0573 } else {
0574 requirements.step_size = 1024;
0575 requirements.strength = 24 +
0576 (8 * (ecc_level - 5));
0577 }
0578 }
0579 }
0580
0581 nanddev_set_ecc_requirements(base, &requirements);
0582 }
0583
0584 static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
0585 bool valid_jedecid)
0586 {
0587 u8 nand_tech;
0588
0589
0590 if (nanddev_bits_per_cell(&chip->base) > 2)
0591 chip->options |= NAND_NEED_SCRAMBLING;
0592
0593
0594 if (valid_jedecid) {
0595 nand_tech = chip->id.data[5] >> 4;
0596
0597
0598 if (nand_tech > 0)
0599 chip->options |= NAND_NEED_SCRAMBLING;
0600 } else {
0601 nand_tech = chip->id.data[5] & 0x7;
0602
0603
0604 if (nand_tech > 2)
0605 chip->options |= NAND_NEED_SCRAMBLING;
0606 }
0607 }
0608
0609 static void hynix_nand_decode_id(struct nand_chip *chip)
0610 {
0611 struct mtd_info *mtd = nand_to_mtd(chip);
0612 struct nand_memory_organization *memorg;
0613 bool valid_jedecid;
0614 u8 tmp;
0615
0616 memorg = nanddev_get_memorg(&chip->base);
0617
0618
0619
0620
0621
0622
0623
0624
0625 if (chip->id.len < 6 || nand_is_slc(chip)) {
0626 nand_decode_ext_id(chip);
0627 return;
0628 }
0629
0630
0631 memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
0632 mtd->writesize = memorg->pagesize;
0633
0634 tmp = (chip->id.data[3] >> 4) & 0x3;
0635
0636
0637
0638
0639
0640
0641
0642 if (chip->id.data[3] & 0x80) {
0643 memorg->pages_per_eraseblock = (SZ_1M << tmp) /
0644 memorg->pagesize;
0645 mtd->erasesize = SZ_1M << tmp;
0646 } else if (tmp == 3) {
0647 memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
0648 memorg->pagesize;
0649 mtd->erasesize = SZ_512K + SZ_256K;
0650 } else {
0651 memorg->pages_per_eraseblock = (SZ_128K << tmp) /
0652 memorg->pagesize;
0653 mtd->erasesize = SZ_128K << tmp;
0654 }
0655
0656
0657
0658
0659
0660
0661 valid_jedecid = hynix_nand_has_valid_jedecid(chip);
0662
0663 hynix_nand_extract_oobsize(chip, valid_jedecid);
0664 hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
0665 hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
0666 }
0667
0668 static void hynix_nand_cleanup(struct nand_chip *chip)
0669 {
0670 struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
0671
0672 if (!hynix)
0673 return;
0674
0675 kfree(hynix->read_retry);
0676 kfree(hynix);
0677 nand_set_manufacturer_data(chip, NULL);
0678 }
0679
0680 static int
0681 h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
0682 struct nand_interface_config *iface)
0683 {
0684 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
0685
0686 return nand_choose_best_sdr_timings(chip, iface, NULL);
0687 }
0688
0689 static int h27ucg8t2etrbc_init(struct nand_chip *chip)
0690 {
0691 struct mtd_info *mtd = nand_to_mtd(chip);
0692
0693 chip->options |= NAND_NEED_SCRAMBLING;
0694 mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme);
0695
0696 return 0;
0697 }
0698
0699 static int hynix_nand_init(struct nand_chip *chip)
0700 {
0701 struct hynix_nand *hynix;
0702 int ret;
0703
0704 if (!nand_is_slc(chip))
0705 chip->options |= NAND_BBM_LASTPAGE;
0706 else
0707 chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
0708
0709 hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
0710 if (!hynix)
0711 return -ENOMEM;
0712
0713 nand_set_manufacturer_data(chip, hynix);
0714
0715 if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
0716 sizeof("H27UCG8T2ATR-BC") - 1))
0717 chip->ops.choose_interface_config =
0718 h27ucg8t2atrbc_choose_interface_config;
0719
0720 if (!strncmp("H27UCG8T2ETR-BC", chip->parameters.model,
0721 sizeof("H27UCG8T2ETR-BC") - 1))
0722 h27ucg8t2etrbc_init(chip);
0723
0724 ret = hynix_nand_rr_init(chip);
0725 if (ret)
0726 hynix_nand_cleanup(chip);
0727
0728 return ret;
0729 }
0730
0731 const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
0732 .detect = hynix_nand_decode_id,
0733 .init = hynix_nand_init,
0734 .cleanup = hynix_nand_cleanup,
0735 };