0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/platform_device.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/clk.h>
0013 #include <linux/module.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/of.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/mutex.h>
0018 #include <linux/mtd/nand-ecc-mtk.h>
0019
0020 #define ECC_IDLE_MASK BIT(0)
0021 #define ECC_IRQ_EN BIT(0)
0022 #define ECC_PG_IRQ_SEL BIT(1)
0023 #define ECC_OP_ENABLE (1)
0024 #define ECC_OP_DISABLE (0)
0025
0026 #define ECC_ENCCON (0x00)
0027 #define ECC_ENCCNFG (0x04)
0028 #define ECC_MS_SHIFT (16)
0029 #define ECC_ENCDIADDR (0x08)
0030 #define ECC_ENCIDLE (0x0C)
0031 #define ECC_DECCON (0x100)
0032 #define ECC_DECCNFG (0x104)
0033 #define DEC_EMPTY_EN BIT(31)
0034 #define DEC_CNFG_CORRECT (0x3 << 12)
0035 #define ECC_DECIDLE (0x10C)
0036 #define ECC_DECENUM0 (0x114)
0037
0038 #define ECC_TIMEOUT (500000)
0039
0040 #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
0041 #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
0042
0043 struct mtk_ecc_caps {
0044 u32 err_mask;
0045 u32 err_shift;
0046 const u8 *ecc_strength;
0047 const u32 *ecc_regs;
0048 u8 num_ecc_strength;
0049 u8 ecc_mode_shift;
0050 u32 parity_bits;
0051 int pg_irq_sel;
0052 };
0053
0054 struct mtk_ecc {
0055 struct device *dev;
0056 const struct mtk_ecc_caps *caps;
0057 void __iomem *regs;
0058 struct clk *clk;
0059
0060 struct completion done;
0061 struct mutex lock;
0062 u32 sectors;
0063
0064 u8 *eccdata;
0065 };
0066
0067
0068 static const u8 ecc_strength_mt2701[] = {
0069 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
0070 40, 44, 48, 52, 56, 60
0071 };
0072
0073 static const u8 ecc_strength_mt2712[] = {
0074 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
0075 40, 44, 48, 52, 56, 60, 68, 72, 80
0076 };
0077
0078 static const u8 ecc_strength_mt7622[] = {
0079 4, 6, 8, 10, 12
0080 };
0081
0082 enum mtk_ecc_regs {
0083 ECC_ENCPAR00,
0084 ECC_ENCIRQ_EN,
0085 ECC_ENCIRQ_STA,
0086 ECC_DECDONE,
0087 ECC_DECIRQ_EN,
0088 ECC_DECIRQ_STA,
0089 };
0090
0091 static int mt2701_ecc_regs[] = {
0092 [ECC_ENCPAR00] = 0x10,
0093 [ECC_ENCIRQ_EN] = 0x80,
0094 [ECC_ENCIRQ_STA] = 0x84,
0095 [ECC_DECDONE] = 0x124,
0096 [ECC_DECIRQ_EN] = 0x200,
0097 [ECC_DECIRQ_STA] = 0x204,
0098 };
0099
0100 static int mt2712_ecc_regs[] = {
0101 [ECC_ENCPAR00] = 0x300,
0102 [ECC_ENCIRQ_EN] = 0x80,
0103 [ECC_ENCIRQ_STA] = 0x84,
0104 [ECC_DECDONE] = 0x124,
0105 [ECC_DECIRQ_EN] = 0x200,
0106 [ECC_DECIRQ_STA] = 0x204,
0107 };
0108
0109 static int mt7622_ecc_regs[] = {
0110 [ECC_ENCPAR00] = 0x10,
0111 [ECC_ENCIRQ_EN] = 0x30,
0112 [ECC_ENCIRQ_STA] = 0x34,
0113 [ECC_DECDONE] = 0x11c,
0114 [ECC_DECIRQ_EN] = 0x140,
0115 [ECC_DECIRQ_STA] = 0x144,
0116 };
0117
0118 static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
0119 enum mtk_ecc_operation op)
0120 {
0121 struct device *dev = ecc->dev;
0122 u32 val;
0123 int ret;
0124
0125 ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
0126 val & ECC_IDLE_MASK,
0127 10, ECC_TIMEOUT);
0128 if (ret)
0129 dev_warn(dev, "%s NOT idle\n",
0130 op == ECC_ENCODE ? "encoder" : "decoder");
0131 }
0132
0133 static irqreturn_t mtk_ecc_irq(int irq, void *id)
0134 {
0135 struct mtk_ecc *ecc = id;
0136 u32 dec, enc;
0137
0138 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
0139 & ECC_IRQ_EN;
0140 if (dec) {
0141 dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
0142 if (dec & ecc->sectors) {
0143
0144
0145
0146
0147 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
0148 ecc->sectors = 0;
0149 complete(&ecc->done);
0150 } else {
0151 return IRQ_HANDLED;
0152 }
0153 } else {
0154 enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
0155 & ECC_IRQ_EN;
0156 if (enc)
0157 complete(&ecc->done);
0158 else
0159 return IRQ_NONE;
0160 }
0161
0162 return IRQ_HANDLED;
0163 }
0164
0165 static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
0166 {
0167 u32 ecc_bit, dec_sz, enc_sz;
0168 u32 reg, i;
0169
0170 for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
0171 if (ecc->caps->ecc_strength[i] == config->strength)
0172 break;
0173 }
0174
0175 if (i == ecc->caps->num_ecc_strength) {
0176 dev_err(ecc->dev, "invalid ecc strength %d\n",
0177 config->strength);
0178 return -EINVAL;
0179 }
0180
0181 ecc_bit = i;
0182
0183 if (config->op == ECC_ENCODE) {
0184
0185 enc_sz = config->len << 3;
0186
0187 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
0188 reg |= (enc_sz << ECC_MS_SHIFT);
0189 writel(reg, ecc->regs + ECC_ENCCNFG);
0190
0191 if (config->mode != ECC_NFI_MODE)
0192 writel(lower_32_bits(config->addr),
0193 ecc->regs + ECC_ENCDIADDR);
0194
0195 } else {
0196
0197 dec_sz = (config->len << 3) +
0198 config->strength * ecc->caps->parity_bits;
0199
0200 reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
0201 reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
0202 reg |= DEC_EMPTY_EN;
0203 writel(reg, ecc->regs + ECC_DECCNFG);
0204
0205 if (config->sectors)
0206 ecc->sectors = 1 << (config->sectors - 1);
0207 }
0208
0209 return 0;
0210 }
0211
0212 void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
0213 int sectors)
0214 {
0215 u32 offset, i, err;
0216 u32 bitflips = 0;
0217
0218 stats->corrected = 0;
0219 stats->failed = 0;
0220
0221 for (i = 0; i < sectors; i++) {
0222 offset = (i >> 2) << 2;
0223 err = readl(ecc->regs + ECC_DECENUM0 + offset);
0224 err = err >> ((i % 4) * ecc->caps->err_shift);
0225 err &= ecc->caps->err_mask;
0226 if (err == ecc->caps->err_mask) {
0227
0228 stats->failed++;
0229 continue;
0230 }
0231
0232 stats->corrected += err;
0233 bitflips = max_t(u32, bitflips, err);
0234 }
0235
0236 stats->bitflips = bitflips;
0237 }
0238 EXPORT_SYMBOL(mtk_ecc_get_stats);
0239
0240 void mtk_ecc_release(struct mtk_ecc *ecc)
0241 {
0242 clk_disable_unprepare(ecc->clk);
0243 put_device(ecc->dev);
0244 }
0245 EXPORT_SYMBOL(mtk_ecc_release);
0246
0247 static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
0248 {
0249 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
0250 writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
0251
0252 mtk_ecc_wait_idle(ecc, ECC_DECODE);
0253 writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
0254 }
0255
0256 static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
0257 {
0258 struct platform_device *pdev;
0259 struct mtk_ecc *ecc;
0260
0261 pdev = of_find_device_by_node(np);
0262 if (!pdev)
0263 return ERR_PTR(-EPROBE_DEFER);
0264
0265 ecc = platform_get_drvdata(pdev);
0266 if (!ecc) {
0267 put_device(&pdev->dev);
0268 return ERR_PTR(-EPROBE_DEFER);
0269 }
0270
0271 clk_prepare_enable(ecc->clk);
0272 mtk_ecc_hw_init(ecc);
0273
0274 return ecc;
0275 }
0276
0277 struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
0278 {
0279 struct mtk_ecc *ecc = NULL;
0280 struct device_node *np;
0281
0282 np = of_parse_phandle(of_node, "nand-ecc-engine", 0);
0283
0284 if (!np)
0285 np = of_parse_phandle(of_node, "ecc-engine", 0);
0286 if (np) {
0287 ecc = mtk_ecc_get(np);
0288 of_node_put(np);
0289 }
0290
0291 return ecc;
0292 }
0293 EXPORT_SYMBOL(of_mtk_ecc_get);
0294
0295 int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
0296 {
0297 enum mtk_ecc_operation op = config->op;
0298 u16 reg_val;
0299 int ret;
0300
0301 ret = mutex_lock_interruptible(&ecc->lock);
0302 if (ret) {
0303 dev_err(ecc->dev, "interrupted when attempting to lock\n");
0304 return ret;
0305 }
0306
0307 mtk_ecc_wait_idle(ecc, op);
0308
0309 ret = mtk_ecc_config(ecc, config);
0310 if (ret) {
0311 mutex_unlock(&ecc->lock);
0312 return ret;
0313 }
0314
0315 if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) {
0316 init_completion(&ecc->done);
0317 reg_val = ECC_IRQ_EN;
0318
0319
0320
0321
0322
0323 if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
0324 reg_val |= ECC_PG_IRQ_SEL;
0325 if (op == ECC_ENCODE)
0326 writew(reg_val, ecc->regs +
0327 ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
0328 else
0329 writew(reg_val, ecc->regs +
0330 ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
0331 }
0332
0333 writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
0334
0335 return 0;
0336 }
0337 EXPORT_SYMBOL(mtk_ecc_enable);
0338
0339 void mtk_ecc_disable(struct mtk_ecc *ecc)
0340 {
0341 enum mtk_ecc_operation op = ECC_ENCODE;
0342
0343
0344 if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
0345 op = ECC_DECODE;
0346
0347
0348 mtk_ecc_wait_idle(ecc, op);
0349 if (op == ECC_DECODE) {
0350
0351
0352
0353
0354 readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
0355 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
0356 } else {
0357 writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
0358 }
0359
0360 writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
0361
0362 mutex_unlock(&ecc->lock);
0363 }
0364 EXPORT_SYMBOL(mtk_ecc_disable);
0365
0366 int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
0367 {
0368 int ret;
0369
0370 ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
0371 if (!ret) {
0372 dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
0373 (op == ECC_ENCODE) ? "encoder" : "decoder");
0374 return -ETIMEDOUT;
0375 }
0376
0377 return 0;
0378 }
0379 EXPORT_SYMBOL(mtk_ecc_wait_done);
0380
0381 int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
0382 u8 *data, u32 bytes)
0383 {
0384 dma_addr_t addr;
0385 u32 len;
0386 int ret;
0387
0388 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
0389 ret = dma_mapping_error(ecc->dev, addr);
0390 if (ret) {
0391 dev_err(ecc->dev, "dma mapping error\n");
0392 return -EINVAL;
0393 }
0394
0395 config->op = ECC_ENCODE;
0396 config->addr = addr;
0397 ret = mtk_ecc_enable(ecc, config);
0398 if (ret) {
0399 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
0400 return ret;
0401 }
0402
0403 ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
0404 if (ret)
0405 goto timeout;
0406
0407 mtk_ecc_wait_idle(ecc, ECC_ENCODE);
0408
0409
0410 len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
0411
0412
0413 __ioread32_copy(ecc->eccdata,
0414 ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
0415 round_up(len, 4));
0416
0417
0418 memcpy(data + bytes, ecc->eccdata, len);
0419 timeout:
0420
0421 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
0422 mtk_ecc_disable(ecc);
0423
0424 return ret;
0425 }
0426 EXPORT_SYMBOL(mtk_ecc_encode);
0427
0428 void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
0429 {
0430 const u8 *ecc_strength = ecc->caps->ecc_strength;
0431 int i;
0432
0433 for (i = 0; i < ecc->caps->num_ecc_strength; i++) {
0434 if (*p <= ecc_strength[i]) {
0435 if (!i)
0436 *p = ecc_strength[i];
0437 else if (*p != ecc_strength[i])
0438 *p = ecc_strength[i - 1];
0439 return;
0440 }
0441 }
0442
0443 *p = ecc_strength[ecc->caps->num_ecc_strength - 1];
0444 }
0445 EXPORT_SYMBOL(mtk_ecc_adjust_strength);
0446
0447 unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
0448 {
0449 return ecc->caps->parity_bits;
0450 }
0451 EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
0452
0453 static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
0454 .err_mask = 0x3f,
0455 .err_shift = 8,
0456 .ecc_strength = ecc_strength_mt2701,
0457 .ecc_regs = mt2701_ecc_regs,
0458 .num_ecc_strength = 20,
0459 .ecc_mode_shift = 5,
0460 .parity_bits = 14,
0461 .pg_irq_sel = 0,
0462 };
0463
0464 static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
0465 .err_mask = 0x7f,
0466 .err_shift = 8,
0467 .ecc_strength = ecc_strength_mt2712,
0468 .ecc_regs = mt2712_ecc_regs,
0469 .num_ecc_strength = 23,
0470 .ecc_mode_shift = 5,
0471 .parity_bits = 14,
0472 .pg_irq_sel = 1,
0473 };
0474
0475 static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
0476 .err_mask = 0x1f,
0477 .err_shift = 5,
0478 .ecc_strength = ecc_strength_mt7622,
0479 .ecc_regs = mt7622_ecc_regs,
0480 .num_ecc_strength = 5,
0481 .ecc_mode_shift = 4,
0482 .parity_bits = 13,
0483 .pg_irq_sel = 0,
0484 };
0485
0486 static const struct of_device_id mtk_ecc_dt_match[] = {
0487 {
0488 .compatible = "mediatek,mt2701-ecc",
0489 .data = &mtk_ecc_caps_mt2701,
0490 }, {
0491 .compatible = "mediatek,mt2712-ecc",
0492 .data = &mtk_ecc_caps_mt2712,
0493 }, {
0494 .compatible = "mediatek,mt7622-ecc",
0495 .data = &mtk_ecc_caps_mt7622,
0496 },
0497 {},
0498 };
0499
0500 static int mtk_ecc_probe(struct platform_device *pdev)
0501 {
0502 struct device *dev = &pdev->dev;
0503 struct mtk_ecc *ecc;
0504 u32 max_eccdata_size;
0505 int irq, ret;
0506
0507 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
0508 if (!ecc)
0509 return -ENOMEM;
0510
0511 ecc->caps = of_device_get_match_data(dev);
0512
0513 max_eccdata_size = ecc->caps->num_ecc_strength - 1;
0514 max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
0515 max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
0516 max_eccdata_size = round_up(max_eccdata_size, 4);
0517 ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
0518 if (!ecc->eccdata)
0519 return -ENOMEM;
0520
0521 ecc->regs = devm_platform_ioremap_resource(pdev, 0);
0522 if (IS_ERR(ecc->regs))
0523 return PTR_ERR(ecc->regs);
0524
0525 ecc->clk = devm_clk_get(dev, NULL);
0526 if (IS_ERR(ecc->clk)) {
0527 dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
0528 return PTR_ERR(ecc->clk);
0529 }
0530
0531 irq = platform_get_irq(pdev, 0);
0532 if (irq < 0)
0533 return irq;
0534
0535 ret = dma_set_mask(dev, DMA_BIT_MASK(32));
0536 if (ret) {
0537 dev_err(dev, "failed to set DMA mask\n");
0538 return ret;
0539 }
0540
0541 ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
0542 if (ret) {
0543 dev_err(dev, "failed to request irq\n");
0544 return -EINVAL;
0545 }
0546
0547 ecc->dev = dev;
0548 mutex_init(&ecc->lock);
0549 platform_set_drvdata(pdev, ecc);
0550 dev_info(dev, "probed\n");
0551
0552 return 0;
0553 }
0554
0555 #ifdef CONFIG_PM_SLEEP
0556 static int mtk_ecc_suspend(struct device *dev)
0557 {
0558 struct mtk_ecc *ecc = dev_get_drvdata(dev);
0559
0560 clk_disable_unprepare(ecc->clk);
0561
0562 return 0;
0563 }
0564
0565 static int mtk_ecc_resume(struct device *dev)
0566 {
0567 struct mtk_ecc *ecc = dev_get_drvdata(dev);
0568 int ret;
0569
0570 ret = clk_prepare_enable(ecc->clk);
0571 if (ret) {
0572 dev_err(dev, "failed to enable clk\n");
0573 return ret;
0574 }
0575
0576 return 0;
0577 }
0578
0579 static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
0580 #endif
0581
0582 MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
0583
0584 static struct platform_driver mtk_ecc_driver = {
0585 .probe = mtk_ecc_probe,
0586 .driver = {
0587 .name = "mtk-ecc",
0588 .of_match_table = mtk_ecc_dt_match,
0589 #ifdef CONFIG_PM_SLEEP
0590 .pm = &mtk_ecc_pm_ops,
0591 #endif
0592 },
0593 };
0594
0595 module_platform_driver(mtk_ecc_driver);
0596
0597 MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
0598 MODULE_DESCRIPTION("MTK Nand ECC Driver");
0599 MODULE_LICENSE("Dual MIT/GPL");