Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Support for Macronix external hardware ECC engine for NAND devices, also
0004  * called DPE for Data Processing Engine.
0005  *
0006  * Copyright © 2019 Macronix
0007  * Author: Miquel Raynal <miquel.raynal@bootlin.com>
0008  */
0009 
0010 #include <linux/dma-mapping.h>
0011 #include <linux/init.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/iopoll.h>
0015 #include <linux/kernel.h>
0016 #include <linux/module.h>
0017 #include <linux/mtd/mtd.h>
0018 #include <linux/mtd/nand.h>
0019 #include <linux/mtd/nand-ecc-mxic.h>
0020 #include <linux/mutex.h>
0021 #include <linux/of_device.h>
0022 #include <linux/of_platform.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/slab.h>
0025 
0026 /* DPE Configuration */
0027 #define DP_CONFIG 0x00
0028 #define   ECC_EN BIT(0)
0029 #define   ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
0030 /* DPE Interrupt Status */
0031 #define INTRPT_STS 0x04
0032 #define   TRANS_CMPLT BIT(0)
0033 #define   SDMA_MAIN BIT(1)
0034 #define   SDMA_SPARE BIT(2)
0035 #define   ECC_ERR BIT(3)
0036 #define   TO_SPARE BIT(4)
0037 #define   TO_MAIN BIT(5)
0038 /* DPE Interrupt Status Enable */
0039 #define INTRPT_STS_EN 0x08
0040 /* DPE Interrupt Signal Enable */
0041 #define INTRPT_SIG_EN 0x0C
0042 /* Host Controller Configuration */
0043 #define HC_CONFIG 0x10
0044 #define   DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
0045 #define   MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
0046 #define   MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
0047 #define   ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
0048 #define   ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
0049 #define   BURST_TYP_FIXED 0
0050 #define   BURST_TYP_INCREASING BIT(0)
0051 /* Host Controller Slave Address */
0052 #define HC_SLV_ADDR 0x14
0053 /* ECC Chunk Size */
0054 #define CHUNK_SIZE 0x20
0055 /* Main Data Size */
0056 #define MAIN_SIZE 0x24
0057 /* Spare Data Size */
0058 #define SPARE_SIZE 0x28
0059 #define   META_SZ(reg) ((reg) & GENMASK(7, 0))
0060 #define   PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
0061 #define   RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
0062 #define   SPARE_SZ(reg) ((reg) >> 24)
0063 /* ECC Chunk Count */
0064 #define CHUNK_CNT 0x30
0065 /* SDMA Control */
0066 #define SDMA_CTRL 0x40
0067 #define   WRITE_NAND 0
0068 #define   READ_NAND BIT(1)
0069 #define   CONT_NAND BIT(29)
0070 #define   CONT_SYSM BIT(30) /* Continue System Memory? */
0071 #define   SDMA_STRT BIT(31)
0072 /* SDMA Address of Main Data */
0073 #define SDMA_MAIN_ADDR 0x44
0074 /* SDMA Address of Spare Data */
0075 #define SDMA_SPARE_ADDR 0x48
0076 /* DPE Version Number */
0077 #define DP_VER 0xD0
0078 #define   DP_VER_OFFSET 16
0079 
0080 /* Status bytes between each chunk of spare data */
0081 #define STAT_BYTES 4
0082 #define   NO_ERR 0x00
0083 #define   MAX_CORR_ERR 0x28
0084 #define   UNCORR_ERR 0xFE
0085 #define   ERASED_CHUNK 0xFF
0086 
0087 struct mxic_ecc_engine {
0088     struct device *dev;
0089     void __iomem *regs;
0090     int irq;
0091     struct completion complete;
0092     struct nand_ecc_engine external_engine;
0093     struct nand_ecc_engine pipelined_engine;
0094     struct mutex lock;
0095 };
0096 
0097 struct mxic_ecc_ctx {
0098     /* ECC machinery */
0099     unsigned int data_step_sz;
0100     unsigned int oob_step_sz;
0101     unsigned int parity_sz;
0102     unsigned int meta_sz;
0103     u8 *status;
0104     int steps;
0105 
0106     /* DMA boilerplate */
0107     struct nand_ecc_req_tweak_ctx req_ctx;
0108     u8 *oobwithstat;
0109     struct scatterlist sg[2];
0110     struct nand_page_io_req *req;
0111     unsigned int pageoffs;
0112 };
0113 
0114 static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
0115 {
0116     return container_of(eng, struct mxic_ecc_engine, external_engine);
0117 }
0118 
0119 static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
0120 {
0121     return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
0122 }
0123 
0124 static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
0125 {
0126     struct nand_ecc_engine *eng = nand->ecc.engine;
0127 
0128     if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
0129         return ext_ecc_eng_to_mxic(eng);
0130     else
0131         return pip_ecc_eng_to_mxic(eng);
0132 }
0133 
0134 static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
0135                   struct mtd_oob_region *oobregion)
0136 {
0137     struct nand_device *nand = mtd_to_nanddev(mtd);
0138     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0139 
0140     if (section < 0 || section >= ctx->steps)
0141         return -ERANGE;
0142 
0143     oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
0144     oobregion->length = ctx->parity_sz;
0145 
0146     return 0;
0147 }
0148 
0149 static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
0150                    struct mtd_oob_region *oobregion)
0151 {
0152     struct nand_device *nand = mtd_to_nanddev(mtd);
0153     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0154 
0155     if (section < 0 || section >= ctx->steps)
0156         return -ERANGE;
0157 
0158     if (!section) {
0159         oobregion->offset = 2;
0160         oobregion->length = ctx->meta_sz - 2;
0161     } else {
0162         oobregion->offset = section * ctx->oob_step_sz;
0163         oobregion->length = ctx->meta_sz;
0164     }
0165 
0166     return 0;
0167 }
0168 
0169 static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
0170     .ecc = mxic_ecc_ooblayout_ecc,
0171     .free = mxic_ecc_ooblayout_free,
0172 };
0173 
0174 static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
0175 {
0176     u32 reg;
0177 
0178     reg = readl(mxic->regs + DP_CONFIG);
0179     reg &= ~ECC_EN;
0180     writel(reg, mxic->regs + DP_CONFIG);
0181 }
0182 
0183 static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
0184 {
0185     u32 reg;
0186 
0187     reg = readl(mxic->regs + DP_CONFIG);
0188     reg |= ECC_EN;
0189     writel(reg, mxic->regs + DP_CONFIG);
0190 }
0191 
0192 static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
0193 {
0194     writel(0, mxic->regs + INTRPT_SIG_EN);
0195 }
0196 
0197 static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
0198 {
0199     writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
0200 }
0201 
0202 static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
0203 {
0204     struct mxic_ecc_engine *mxic = dev_id;
0205     u32 sts;
0206 
0207     sts = readl(mxic->regs + INTRPT_STS);
0208     if (!sts)
0209         return IRQ_NONE;
0210 
0211     if (sts & TRANS_CMPLT)
0212         complete(&mxic->complete);
0213 
0214     writel(sts, mxic->regs + INTRPT_STS);
0215 
0216     return IRQ_HANDLED;
0217 }
0218 
0219 static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
0220 {
0221     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0222     struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
0223     struct nand_ecc_props *reqs = &nand->ecc.requirements;
0224     struct nand_ecc_props *user = &nand->ecc.user_conf;
0225     struct mtd_info *mtd = nanddev_to_mtd(nand);
0226     int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
0227     static const int possible_strength[] = {4, 8, 40, 48};
0228     static const int spare_size[] = {32, 32, 96, 96};
0229     struct mxic_ecc_ctx *ctx;
0230     u32 spare_reg;
0231     int ret;
0232 
0233     ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
0234     if (!ctx)
0235         return -ENOMEM;
0236 
0237     nand->ecc.ctx.priv = ctx;
0238 
0239     /* Only large page NAND chips may use BCH */
0240     if (mtd->oobsize < 64) {
0241         pr_err("BCH cannot be used with small page NAND chips\n");
0242         return -EINVAL;
0243     }
0244 
0245     mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
0246 
0247     /* Enable all status bits */
0248     writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
0249            TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
0250 
0251     /* Configure the correction depending on the NAND device topology */
0252     if (user->step_size && user->strength) {
0253         step_size = user->step_size;
0254         strength = user->strength;
0255     } else if (reqs->step_size && reqs->strength) {
0256         step_size = reqs->step_size;
0257         strength = reqs->strength;
0258     }
0259 
0260     if (step_size && strength) {
0261         steps = mtd->writesize / step_size;
0262         desired_correction = steps * strength;
0263     }
0264 
0265     /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
0266     conf->step_size = SZ_1K;
0267     steps = mtd->writesize / conf->step_size;
0268 
0269     ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
0270     if (!ctx->status)
0271         return -ENOMEM;
0272 
0273     if (desired_correction) {
0274         strength = desired_correction / steps;
0275 
0276         for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
0277             if (possible_strength[idx] >= strength)
0278                 break;
0279 
0280         idx = min_t(unsigned int, idx,
0281                 ARRAY_SIZE(possible_strength) - 1);
0282     } else {
0283         /* Missing data, maximize the correction */
0284         idx = ARRAY_SIZE(possible_strength) - 1;
0285     }
0286 
0287     /* Tune the selected strength until it fits in the OOB area */
0288     for (; idx >= 0; idx--) {
0289         if (spare_size[idx] * steps <= mtd->oobsize)
0290             break;
0291     }
0292 
0293     /* This engine cannot be used with this NAND device */
0294     if (idx < 0)
0295         return -EINVAL;
0296 
0297     /* Configure the engine for the desired strength */
0298     writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
0299     conf->strength = possible_strength[idx];
0300     spare_reg = readl(mxic->regs + SPARE_SIZE);
0301 
0302     ctx->steps = steps;
0303     ctx->data_step_sz = mtd->writesize / steps;
0304     ctx->oob_step_sz = mtd->oobsize / steps;
0305     ctx->parity_sz = PARITY_SZ(spare_reg);
0306     ctx->meta_sz = META_SZ(spare_reg);
0307 
0308     /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
0309     ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
0310                     (ctx->steps * STAT_BYTES);
0311     ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
0312     if (ret)
0313         return ret;
0314 
0315     ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
0316                    GFP_KERNEL);
0317     if (!ctx->oobwithstat) {
0318         ret = -ENOMEM;
0319         goto cleanup_req_tweak;
0320     }
0321 
0322     sg_init_table(ctx->sg, 2);
0323 
0324     /* Configuration dump and sanity checks */
0325     dev_err(dev, "DPE version number: %d\n",
0326         readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
0327     dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
0328     dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
0329     dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
0330     dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
0331     dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
0332     dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
0333 
0334     if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
0335         SPARE_SZ(spare_reg)) {
0336         dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
0337             ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
0338             SPARE_SZ(spare_reg));
0339         ret = -EINVAL;
0340         goto free_oobwithstat;
0341     }
0342 
0343     if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
0344         dev_err(dev, "Wrong OOB configuration: %d != %d\n",
0345             ctx->oob_step_sz, SPARE_SZ(spare_reg));
0346         ret = -EINVAL;
0347         goto free_oobwithstat;
0348     }
0349 
0350     return 0;
0351 
0352 free_oobwithstat:
0353     kfree(ctx->oobwithstat);
0354 cleanup_req_tweak:
0355     nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
0356 
0357     return ret;
0358 }
0359 
0360 static int mxic_ecc_init_ctx_external(struct nand_device *nand)
0361 {
0362     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0363     struct device *dev = nand->ecc.engine->dev;
0364     int ret;
0365 
0366     dev_info(dev, "Macronix ECC engine in external mode\n");
0367 
0368     ret = mxic_ecc_init_ctx(nand, dev);
0369     if (ret)
0370         return ret;
0371 
0372     /* Trigger each step manually */
0373     writel(1, mxic->regs + CHUNK_CNT);
0374     writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
0375            mxic->regs + HC_CONFIG);
0376 
0377     return 0;
0378 }
0379 
0380 static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
0381 {
0382     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0383     struct mxic_ecc_ctx *ctx;
0384     struct device *dev;
0385     int ret;
0386 
0387     dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
0388     if (!dev)
0389         return -EINVAL;
0390 
0391     dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
0392 
0393     ret = mxic_ecc_init_ctx(nand, dev);
0394     if (ret)
0395         return ret;
0396 
0397     ctx = nand_to_ecc_ctx(nand);
0398 
0399     /* All steps should be handled in one go directly by the internal DMA */
0400     writel(ctx->steps, mxic->regs + CHUNK_CNT);
0401 
0402     /*
0403      * Interleaved ECC scheme cannot be used otherwise factory bad block
0404      * markers would be lost. A packed layout is mandatory.
0405      */
0406     writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
0407            mxic->regs + HC_CONFIG);
0408 
0409     return 0;
0410 }
0411 
0412 static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
0413 {
0414     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0415 
0416     if (ctx) {
0417         nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
0418         kfree(ctx->oobwithstat);
0419     }
0420 }
0421 
0422 static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
0423 {
0424     u32 val;
0425     int ret;
0426 
0427     if (mxic->irq) {
0428         reinit_completion(&mxic->complete);
0429         mxic_ecc_enable_int(mxic);
0430         ret = wait_for_completion_timeout(&mxic->complete,
0431                           msecs_to_jiffies(1000));
0432         mxic_ecc_disable_int(mxic);
0433     } else {
0434         ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
0435                      val & TRANS_CMPLT, 10, USEC_PER_SEC);
0436         writel(val, mxic->regs + INTRPT_STS);
0437     }
0438 
0439     if (ret) {
0440         dev_err(mxic->dev, "Timeout on data xfer completion\n");
0441         return -ETIMEDOUT;
0442     }
0443 
0444     return 0;
0445 }
0446 
0447 static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
0448                  unsigned int direction)
0449 {
0450     unsigned int dir = (direction == NAND_PAGE_READ) ?
0451                READ_NAND : WRITE_NAND;
0452     int ret;
0453 
0454     mxic_ecc_enable_engine(mxic);
0455 
0456     /* Trigger processing */
0457     writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
0458 
0459     /* Wait for completion */
0460     ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
0461 
0462     mxic_ecc_disable_engine(mxic);
0463 
0464     return ret;
0465 }
0466 
0467 int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
0468                     unsigned int direction, dma_addr_t dirmap)
0469 {
0470     struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
0471 
0472     if (dirmap)
0473         writel(dirmap, mxic->regs + HC_SLV_ADDR);
0474 
0475     return mxic_ecc_process_data(mxic, direction);
0476 }
0477 EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
0478 
0479 static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
0480 {
0481     u8 *buf = ctx->oobwithstat;
0482     int next_stat_pos;
0483     int step;
0484 
0485     /* Extract the ECC status */
0486     for (step = 0; step < ctx->steps; step++) {
0487         next_stat_pos = ctx->oob_step_sz +
0488                 ((STAT_BYTES + ctx->oob_step_sz) * step);
0489 
0490         ctx->status[step] = buf[next_stat_pos];
0491     }
0492 }
0493 
0494 static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
0495                     u8 *dst, const u8 *src)
0496 {
0497     int step;
0498 
0499     /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
0500     for (step = 0; step < ctx->steps; step++)
0501         memcpy(dst + (step * ctx->oob_step_sz),
0502                src + (step * (ctx->oob_step_sz + STAT_BYTES)),
0503                ctx->oob_step_sz);
0504 }
0505 
0506 static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
0507                     u8 *dst, const u8 *src)
0508 {
0509     int step;
0510 
0511     /* Add some space in the OOB buffer for the status bytes */
0512     for (step = 0; step < ctx->steps; step++)
0513         memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
0514                src + (step * ctx->oob_step_sz),
0515                ctx->oob_step_sz);
0516 }
0517 
0518 static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
0519                   struct nand_device *nand)
0520 {
0521     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0522     struct mtd_info *mtd = nanddev_to_mtd(nand);
0523     struct device *dev = mxic->dev;
0524     unsigned int max_bf = 0;
0525     bool failure = false;
0526     int step;
0527 
0528     for (step = 0; step < ctx->steps; step++) {
0529         u8 stat = ctx->status[step];
0530 
0531         if (stat == NO_ERR) {
0532             dev_dbg(dev, "ECC step %d: no error\n", step);
0533         } else if (stat == ERASED_CHUNK) {
0534             dev_dbg(dev, "ECC step %d: erased\n", step);
0535         } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
0536             dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
0537             mtd->ecc_stats.failed++;
0538             failure = true;
0539         } else {
0540             dev_dbg(dev, "ECC step %d: %d bits corrected\n",
0541                 step, stat);
0542             max_bf = max_t(unsigned int, max_bf, stat);
0543             mtd->ecc_stats.corrected += stat;
0544         }
0545     }
0546 
0547     return failure ? -EBADMSG : max_bf;
0548 }
0549 
0550 /* External ECC engine helpers */
0551 static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
0552                         struct nand_page_io_req *req)
0553 {
0554     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0555     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0556     struct mtd_info *mtd = nanddev_to_mtd(nand);
0557     int offset, nents, step, ret;
0558 
0559     if (req->mode == MTD_OPS_RAW)
0560         return 0;
0561 
0562     nand_ecc_tweak_req(&ctx->req_ctx, req);
0563     ctx->req = req;
0564 
0565     if (req->type == NAND_PAGE_READ)
0566         return 0;
0567 
0568     mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
0569                     ctx->req->oobbuf.out);
0570 
0571     sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
0572     sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
0573            req->ooblen + (ctx->steps * STAT_BYTES));
0574 
0575     nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0576     if (!nents)
0577         return -EINVAL;
0578 
0579     mutex_lock(&mxic->lock);
0580 
0581     for (step = 0; step < ctx->steps; step++) {
0582         writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
0583                mxic->regs + SDMA_MAIN_ADDR);
0584         writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
0585                mxic->regs + SDMA_SPARE_ADDR);
0586         ret = mxic_ecc_process_data(mxic, ctx->req->type);
0587         if (ret)
0588             break;
0589     }
0590 
0591     mutex_unlock(&mxic->lock);
0592 
0593     dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0594 
0595     if (ret)
0596         return ret;
0597 
0598     /* Retrieve the calculated ECC bytes */
0599     for (step = 0; step < ctx->steps; step++) {
0600         offset = ctx->meta_sz + (step * ctx->oob_step_sz);
0601         mtd_ooblayout_get_eccbytes(mtd,
0602                        (u8 *)ctx->req->oobbuf.out + offset,
0603                        ctx->oobwithstat + (step * STAT_BYTES),
0604                        step * ctx->parity_sz,
0605                        ctx->parity_sz);
0606     }
0607 
0608     return 0;
0609 }
0610 
0611 static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
0612                        struct nand_page_io_req *req)
0613 {
0614     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0615     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0616     int nents, step, ret;
0617 
0618     if (req->mode == MTD_OPS_RAW)
0619         return 0;
0620 
0621     if (req->type == NAND_PAGE_WRITE) {
0622         nand_ecc_restore_req(&ctx->req_ctx, req);
0623         return 0;
0624     }
0625 
0626     /* Copy the OOB buffer and add room for the ECC engine status bytes */
0627     mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
0628 
0629     sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
0630     sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
0631            req->ooblen + (ctx->steps * STAT_BYTES));
0632     nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0633     if (!nents)
0634         return -EINVAL;
0635 
0636     mutex_lock(&mxic->lock);
0637 
0638     for (step = 0; step < ctx->steps; step++) {
0639         writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
0640                mxic->regs + SDMA_MAIN_ADDR);
0641         writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
0642                mxic->regs + SDMA_SPARE_ADDR);
0643         ret = mxic_ecc_process_data(mxic, ctx->req->type);
0644         if (ret)
0645             break;
0646     }
0647 
0648     mutex_unlock(&mxic->lock);
0649 
0650     dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0651 
0652     if (ret) {
0653         nand_ecc_restore_req(&ctx->req_ctx, req);
0654         return ret;
0655     }
0656 
0657     /* Extract the status bytes and reconstruct the buffer */
0658     mxic_ecc_extract_status_bytes(ctx);
0659     mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
0660 
0661     nand_ecc_restore_req(&ctx->req_ctx, req);
0662 
0663     return mxic_ecc_count_biterrs(mxic, nand);
0664 }
0665 
0666 /* Pipelined ECC engine helpers */
0667 static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
0668                          struct nand_page_io_req *req)
0669 {
0670     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0671     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0672     int nents;
0673 
0674     if (req->mode == MTD_OPS_RAW)
0675         return 0;
0676 
0677     nand_ecc_tweak_req(&ctx->req_ctx, req);
0678     ctx->req = req;
0679 
0680     /* Copy the OOB buffer and add room for the ECC engine status bytes */
0681     mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
0682 
0683     sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
0684     sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
0685            req->ooblen + (ctx->steps * STAT_BYTES));
0686 
0687     nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0688     if (!nents)
0689         return -EINVAL;
0690 
0691     mutex_lock(&mxic->lock);
0692 
0693     writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
0694     writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
0695 
0696     return 0;
0697 }
0698 
0699 static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
0700                         struct nand_page_io_req *req)
0701 {
0702     struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
0703     struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
0704     int ret = 0;
0705 
0706     if (req->mode == MTD_OPS_RAW)
0707         return 0;
0708 
0709     mutex_unlock(&mxic->lock);
0710 
0711     dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
0712 
0713     if (req->type == NAND_PAGE_READ) {
0714         mxic_ecc_extract_status_bytes(ctx);
0715         mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
0716                         ctx->oobwithstat);
0717         ret = mxic_ecc_count_biterrs(mxic, nand);
0718     }
0719 
0720     nand_ecc_restore_req(&ctx->req_ctx, req);
0721 
0722     return ret;
0723 }
0724 
0725 static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
0726     .init_ctx = mxic_ecc_init_ctx_external,
0727     .cleanup_ctx = mxic_ecc_cleanup_ctx,
0728     .prepare_io_req = mxic_ecc_prepare_io_req_external,
0729     .finish_io_req = mxic_ecc_finish_io_req_external,
0730 };
0731 
0732 static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
0733     .init_ctx = mxic_ecc_init_ctx_pipelined,
0734     .cleanup_ctx = mxic_ecc_cleanup_ctx,
0735     .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
0736     .finish_io_req = mxic_ecc_finish_io_req_pipelined,
0737 };
0738 
0739 struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
0740 {
0741     return &mxic_ecc_engine_pipelined_ops;
0742 }
0743 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
0744 
0745 static struct platform_device *
0746 mxic_ecc_get_pdev(struct platform_device *spi_pdev)
0747 {
0748     struct platform_device *eng_pdev;
0749     struct device_node *np;
0750 
0751     /* Retrieve the nand-ecc-engine phandle */
0752     np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
0753     if (!np)
0754         return NULL;
0755 
0756     /* Jump to the engine's device node */
0757     eng_pdev = of_find_device_by_node(np);
0758     of_node_put(np);
0759 
0760     return eng_pdev;
0761 }
0762 
0763 void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
0764 {
0765     struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
0766 
0767     platform_device_put(to_platform_device(mxic->dev));
0768 }
0769 EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
0770 
0771 struct nand_ecc_engine *
0772 mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
0773 {
0774     struct platform_device *eng_pdev;
0775     struct mxic_ecc_engine *mxic;
0776 
0777     eng_pdev = mxic_ecc_get_pdev(spi_pdev);
0778     if (!eng_pdev)
0779         return ERR_PTR(-ENODEV);
0780 
0781     mxic = platform_get_drvdata(eng_pdev);
0782     if (!mxic) {
0783         platform_device_put(eng_pdev);
0784         return ERR_PTR(-EPROBE_DEFER);
0785     }
0786 
0787     return &mxic->pipelined_engine;
0788 }
0789 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
0790 
0791 /*
0792  * Only the external ECC engine is exported as the pipelined is SoC specific, so
0793  * it is registered directly by the drivers that wrap it.
0794  */
0795 static int mxic_ecc_probe(struct platform_device *pdev)
0796 {
0797     struct device *dev = &pdev->dev;
0798     struct mxic_ecc_engine *mxic;
0799     int ret;
0800 
0801     mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
0802     if (!mxic)
0803         return -ENOMEM;
0804 
0805     mxic->dev = &pdev->dev;
0806 
0807     /*
0808      * Both memory regions for the ECC engine itself and the AXI slave
0809      * address are mandatory.
0810      */
0811     mxic->regs = devm_platform_ioremap_resource(pdev, 0);
0812     if (IS_ERR(mxic->regs)) {
0813         dev_err(&pdev->dev, "Missing memory region\n");
0814         return PTR_ERR(mxic->regs);
0815     }
0816 
0817     mxic_ecc_disable_engine(mxic);
0818     mxic_ecc_disable_int(mxic);
0819 
0820     /* IRQ is optional yet much more efficient */
0821     mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
0822     if (mxic->irq > 0) {
0823         ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
0824                        "mxic-ecc", mxic);
0825         if (ret)
0826             return ret;
0827     } else {
0828         dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
0829         mxic->irq = 0;
0830     }
0831 
0832     mutex_init(&mxic->lock);
0833 
0834     /*
0835      * In external mode, the device is the ECC engine. In pipelined mode,
0836      * the device is the host controller. The device is used to match the
0837      * right ECC engine based on the DT properties.
0838      */
0839     mxic->external_engine.dev = &pdev->dev;
0840     mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
0841     mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
0842 
0843     nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
0844 
0845     platform_set_drvdata(pdev, mxic);
0846 
0847     return 0;
0848 }
0849 
0850 static int mxic_ecc_remove(struct platform_device *pdev)
0851 {
0852     struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
0853 
0854     nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
0855 
0856     return 0;
0857 }
0858 
0859 static const struct of_device_id mxic_ecc_of_ids[] = {
0860     {
0861         .compatible = "mxicy,nand-ecc-engine-rev3",
0862     },
0863     { /* sentinel */ },
0864 };
0865 MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
0866 
0867 static struct platform_driver mxic_ecc_driver = {
0868     .driver = {
0869         .name = "mxic-nand-ecc-engine",
0870         .of_match_table = mxic_ecc_of_ids,
0871     },
0872     .probe = mxic_ecc_probe,
0873     .remove = mxic_ecc_remove,
0874 };
0875 module_platform_driver(mxic_ecc_driver);
0876 
0877 MODULE_LICENSE("GPL");
0878 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
0879 MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");