Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Generic Error-Correcting Code (ECC) engine
0004  *
0005  * Copyright (C) 2019 Macronix
0006  * Author:
0007  *     Miquèl RAYNAL <miquel.raynal@bootlin.com>
0008  *
0009  *
0010  * This file describes the abstraction of any NAND ECC engine. It has been
0011  * designed to fit most cases, including parallel NANDs and SPI-NANDs.
0012  *
0013  * There are three main situations where instantiating this ECC engine makes
0014  * sense:
0015  *   - external: The ECC engine is outside the NAND pipeline, typically this
0016  *               is a software ECC engine, or an hardware engine that is
0017  *               outside the NAND controller pipeline.
0018  *   - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
0019  *                controller's side. This is the case of most of the raw NAND
0020  *                controllers. In the pipeline case, the ECC bytes are
0021  *                generated/data corrected on the fly when a page is
0022  *                written/read.
0023  *   - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
0024  *            Some NAND chips can correct themselves the data.
0025  *
0026  * Besides the initial setup and final cleanups, the interfaces are rather
0027  * simple:
0028  *   - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
0029  *              the I/O request type. In case of software correction or external
0030  *              engine, this step may involve to derive the ECC bytes and place
0031  *              them in the OOB area before a write.
0032  *   - finish: Finish an I/O request. Correct the data in case of a read
0033  *             request and report the number of corrected bits/uncorrectable
0034  *             errors. Most likely empty for write operations, unless you have
0035  *             hardware specific stuff to do, like shutting down the engine to
0036  *             save power.
0037  *
0038  * The I/O request should be enclosed in a prepare()/finish() pair of calls
0039  * and will behave differently depending on the requested I/O type:
0040  *   - raw: Correction disabled
0041  *   - ecc: Correction enabled
0042  *
0043  * The request direction is impacting the logic as well:
0044  *   - read: Load data from the NAND chip
0045  *   - write: Store data in the NAND chip
0046  *
0047  * Mixing all this combinations together gives the following behavior.
0048  * Those are just examples, drivers are free to add custom steps in their
0049  * prepare/finish hook.
0050  *
0051  * [external ECC engine]
0052  *   - external + prepare + raw + read: do nothing
0053  *   - external + finish  + raw + read: do nothing
0054  *   - external + prepare + raw + write: do nothing
0055  *   - external + finish  + raw + write: do nothing
0056  *   - external + prepare + ecc + read: do nothing
0057  *   - external + finish  + ecc + read: calculate expected ECC bytes, extract
0058  *                                      ECC bytes from OOB buffer, correct
0059  *                                      and report any bitflip/error
0060  *   - external + prepare + ecc + write: calculate ECC bytes and store them at
0061  *                                       the right place in the OOB buffer based
0062  *                                       on the OOB layout
0063  *   - external + finish  + ecc + write: do nothing
0064  *
0065  * [pipelined ECC engine]
0066  *   - pipelined + prepare + raw + read: disable the controller's ECC engine if
0067  *                                       activated
0068  *   - pipelined + finish  + raw + read: do nothing
0069  *   - pipelined + prepare + raw + write: disable the controller's ECC engine if
0070  *                                        activated
0071  *   - pipelined + finish  + raw + write: do nothing
0072  *   - pipelined + prepare + ecc + read: enable the controller's ECC engine if
0073  *                                       deactivated
0074  *   - pipelined + finish  + ecc + read: check the status, report any
0075  *                                       error/bitflip
0076  *   - pipelined + prepare + ecc + write: enable the controller's ECC engine if
0077  *                                        deactivated
0078  *   - pipelined + finish  + ecc + write: do nothing
0079  *
0080  * [ondie ECC engine]
0081  *   - ondie + prepare + raw + read: send commands to disable the on-chip ECC
0082  *                                   engine if activated
0083  *   - ondie + finish  + raw + read: do nothing
0084  *   - ondie + prepare + raw + write: send commands to disable the on-chip ECC
0085  *                                    engine if activated
0086  *   - ondie + finish  + raw + write: do nothing
0087  *   - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
0088  *                                   engine if deactivated
0089  *   - ondie + finish  + ecc + read: send commands to check the status, report
0090  *                                   any error/bitflip
0091  *   - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
0092  *                                    engine if deactivated
0093  *   - ondie + finish  + ecc + write: do nothing
0094  */
0095 
0096 #include <linux/module.h>
0097 #include <linux/mtd/nand.h>
0098 #include <linux/slab.h>
0099 #include <linux/of.h>
0100 #include <linux/of_device.h>
0101 #include <linux/of_platform.h>
0102 
0103 static LIST_HEAD(on_host_hw_engines);
0104 static DEFINE_MUTEX(on_host_hw_engines_mutex);
0105 
0106 /**
0107  * nand_ecc_init_ctx - Init the ECC engine context
0108  * @nand: the NAND device
0109  *
0110  * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
0111  */
0112 int nand_ecc_init_ctx(struct nand_device *nand)
0113 {
0114     if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
0115         return 0;
0116 
0117     return nand->ecc.engine->ops->init_ctx(nand);
0118 }
0119 EXPORT_SYMBOL(nand_ecc_init_ctx);
0120 
0121 /**
0122  * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
0123  * @nand: the NAND device
0124  */
0125 void nand_ecc_cleanup_ctx(struct nand_device *nand)
0126 {
0127     if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
0128         nand->ecc.engine->ops->cleanup_ctx(nand);
0129 }
0130 EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
0131 
0132 /**
0133  * nand_ecc_prepare_io_req - Prepare an I/O request
0134  * @nand: the NAND device
0135  * @req: the I/O request
0136  */
0137 int nand_ecc_prepare_io_req(struct nand_device *nand,
0138                 struct nand_page_io_req *req)
0139 {
0140     if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
0141         return 0;
0142 
0143     return nand->ecc.engine->ops->prepare_io_req(nand, req);
0144 }
0145 EXPORT_SYMBOL(nand_ecc_prepare_io_req);
0146 
0147 /**
0148  * nand_ecc_finish_io_req - Finish an I/O request
0149  * @nand: the NAND device
0150  * @req: the I/O request
0151  */
0152 int nand_ecc_finish_io_req(struct nand_device *nand,
0153                struct nand_page_io_req *req)
0154 {
0155     if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
0156         return 0;
0157 
0158     return nand->ecc.engine->ops->finish_io_req(nand, req);
0159 }
0160 EXPORT_SYMBOL(nand_ecc_finish_io_req);
0161 
0162 /* Define default OOB placement schemes for large and small page devices */
0163 static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
0164                  struct mtd_oob_region *oobregion)
0165 {
0166     struct nand_device *nand = mtd_to_nanddev(mtd);
0167     unsigned int total_ecc_bytes = nand->ecc.ctx.total;
0168 
0169     if (section > 1)
0170         return -ERANGE;
0171 
0172     if (!section) {
0173         oobregion->offset = 0;
0174         if (mtd->oobsize == 16)
0175             oobregion->length = 4;
0176         else
0177             oobregion->length = 3;
0178     } else {
0179         if (mtd->oobsize == 8)
0180             return -ERANGE;
0181 
0182         oobregion->offset = 6;
0183         oobregion->length = total_ecc_bytes - 4;
0184     }
0185 
0186     return 0;
0187 }
0188 
0189 static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
0190                   struct mtd_oob_region *oobregion)
0191 {
0192     if (section > 1)
0193         return -ERANGE;
0194 
0195     if (mtd->oobsize == 16) {
0196         if (section)
0197             return -ERANGE;
0198 
0199         oobregion->length = 8;
0200         oobregion->offset = 8;
0201     } else {
0202         oobregion->length = 2;
0203         if (!section)
0204             oobregion->offset = 3;
0205         else
0206             oobregion->offset = 6;
0207     }
0208 
0209     return 0;
0210 }
0211 
0212 static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
0213     .ecc = nand_ooblayout_ecc_sp,
0214     .free = nand_ooblayout_free_sp,
0215 };
0216 
0217 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
0218 {
0219     return &nand_ooblayout_sp_ops;
0220 }
0221 EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
0222 
0223 static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
0224                  struct mtd_oob_region *oobregion)
0225 {
0226     struct nand_device *nand = mtd_to_nanddev(mtd);
0227     unsigned int total_ecc_bytes = nand->ecc.ctx.total;
0228 
0229     if (section || !total_ecc_bytes)
0230         return -ERANGE;
0231 
0232     oobregion->length = total_ecc_bytes;
0233     oobregion->offset = mtd->oobsize - oobregion->length;
0234 
0235     return 0;
0236 }
0237 
0238 static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
0239                   struct mtd_oob_region *oobregion)
0240 {
0241     struct nand_device *nand = mtd_to_nanddev(mtd);
0242     unsigned int total_ecc_bytes = nand->ecc.ctx.total;
0243 
0244     if (section)
0245         return -ERANGE;
0246 
0247     oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
0248     oobregion->offset = 2;
0249 
0250     return 0;
0251 }
0252 
0253 static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
0254     .ecc = nand_ooblayout_ecc_lp,
0255     .free = nand_ooblayout_free_lp,
0256 };
0257 
0258 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
0259 {
0260     return &nand_ooblayout_lp_ops;
0261 }
0262 EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
0263 
0264 /*
0265  * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
0266  * are placed at a fixed offset.
0267  */
0268 static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
0269                      struct mtd_oob_region *oobregion)
0270 {
0271     struct nand_device *nand = mtd_to_nanddev(mtd);
0272     unsigned int total_ecc_bytes = nand->ecc.ctx.total;
0273 
0274     if (section)
0275         return -ERANGE;
0276 
0277     switch (mtd->oobsize) {
0278     case 64:
0279         oobregion->offset = 40;
0280         break;
0281     case 128:
0282         oobregion->offset = 80;
0283         break;
0284     default:
0285         return -EINVAL;
0286     }
0287 
0288     oobregion->length = total_ecc_bytes;
0289     if (oobregion->offset + oobregion->length > mtd->oobsize)
0290         return -ERANGE;
0291 
0292     return 0;
0293 }
0294 
0295 static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
0296                       struct mtd_oob_region *oobregion)
0297 {
0298     struct nand_device *nand = mtd_to_nanddev(mtd);
0299     unsigned int total_ecc_bytes = nand->ecc.ctx.total;
0300     int ecc_offset = 0;
0301 
0302     if (section < 0 || section > 1)
0303         return -ERANGE;
0304 
0305     switch (mtd->oobsize) {
0306     case 64:
0307         ecc_offset = 40;
0308         break;
0309     case 128:
0310         ecc_offset = 80;
0311         break;
0312     default:
0313         return -EINVAL;
0314     }
0315 
0316     if (section == 0) {
0317         oobregion->offset = 2;
0318         oobregion->length = ecc_offset - 2;
0319     } else {
0320         oobregion->offset = ecc_offset + total_ecc_bytes;
0321         oobregion->length = mtd->oobsize - oobregion->offset;
0322     }
0323 
0324     return 0;
0325 }
0326 
0327 static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
0328     .ecc = nand_ooblayout_ecc_lp_hamming,
0329     .free = nand_ooblayout_free_lp_hamming,
0330 };
0331 
0332 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
0333 {
0334     return &nand_ooblayout_lp_hamming_ops;
0335 }
0336 EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
0337 
0338 static enum nand_ecc_engine_type
0339 of_get_nand_ecc_engine_type(struct device_node *np)
0340 {
0341     struct device_node *eng_np;
0342 
0343     if (of_property_read_bool(np, "nand-no-ecc-engine"))
0344         return NAND_ECC_ENGINE_TYPE_NONE;
0345 
0346     if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
0347         return NAND_ECC_ENGINE_TYPE_SOFT;
0348 
0349     eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
0350     of_node_put(eng_np);
0351 
0352     if (eng_np) {
0353         if (eng_np == np)
0354             return NAND_ECC_ENGINE_TYPE_ON_DIE;
0355         else
0356             return NAND_ECC_ENGINE_TYPE_ON_HOST;
0357     }
0358 
0359     return NAND_ECC_ENGINE_TYPE_INVALID;
0360 }
0361 
0362 static const char * const nand_ecc_placement[] = {
0363     [NAND_ECC_PLACEMENT_OOB] = "oob",
0364     [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
0365 };
0366 
0367 static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
0368 {
0369     enum nand_ecc_placement placement;
0370     const char *pm;
0371     int err;
0372 
0373     err = of_property_read_string(np, "nand-ecc-placement", &pm);
0374     if (!err) {
0375         for (placement = NAND_ECC_PLACEMENT_OOB;
0376              placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
0377             if (!strcasecmp(pm, nand_ecc_placement[placement]))
0378                 return placement;
0379         }
0380     }
0381 
0382     return NAND_ECC_PLACEMENT_UNKNOWN;
0383 }
0384 
0385 static const char * const nand_ecc_algos[] = {
0386     [NAND_ECC_ALGO_HAMMING] = "hamming",
0387     [NAND_ECC_ALGO_BCH] = "bch",
0388     [NAND_ECC_ALGO_RS] = "rs",
0389 };
0390 
0391 static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
0392 {
0393     enum nand_ecc_algo ecc_algo;
0394     const char *pm;
0395     int err;
0396 
0397     err = of_property_read_string(np, "nand-ecc-algo", &pm);
0398     if (!err) {
0399         for (ecc_algo = NAND_ECC_ALGO_HAMMING;
0400              ecc_algo < ARRAY_SIZE(nand_ecc_algos);
0401              ecc_algo++) {
0402             if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
0403                 return ecc_algo;
0404         }
0405     }
0406 
0407     return NAND_ECC_ALGO_UNKNOWN;
0408 }
0409 
0410 static int of_get_nand_ecc_step_size(struct device_node *np)
0411 {
0412     int ret;
0413     u32 val;
0414 
0415     ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
0416     return ret ? ret : val;
0417 }
0418 
0419 static int of_get_nand_ecc_strength(struct device_node *np)
0420 {
0421     int ret;
0422     u32 val;
0423 
0424     ret = of_property_read_u32(np, "nand-ecc-strength", &val);
0425     return ret ? ret : val;
0426 }
0427 
0428 void of_get_nand_ecc_user_config(struct nand_device *nand)
0429 {
0430     struct device_node *dn = nanddev_get_of_node(nand);
0431     int strength, size;
0432 
0433     nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
0434     nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
0435     nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
0436 
0437     strength = of_get_nand_ecc_strength(dn);
0438     if (strength >= 0)
0439         nand->ecc.user_conf.strength = strength;
0440 
0441     size = of_get_nand_ecc_step_size(dn);
0442     if (size >= 0)
0443         nand->ecc.user_conf.step_size = size;
0444 
0445     if (of_property_read_bool(dn, "nand-ecc-maximize"))
0446         nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
0447 }
0448 EXPORT_SYMBOL(of_get_nand_ecc_user_config);
0449 
0450 /**
0451  * nand_ecc_is_strong_enough - Check if the chip configuration meets the
0452  *                             datasheet requirements.
0453  *
0454  * @nand: Device to check
0455  *
0456  * If our configuration corrects A bits per B bytes and the minimum
0457  * required correction level is X bits per Y bytes, then we must ensure
0458  * both of the following are true:
0459  *
0460  * (1) A / B >= X / Y
0461  * (2) A >= X
0462  *
0463  * Requirement (1) ensures we can correct for the required bitflip density.
0464  * Requirement (2) ensures we can correct even when all bitflips are clumped
0465  * in the same sector.
0466  */
0467 bool nand_ecc_is_strong_enough(struct nand_device *nand)
0468 {
0469     const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
0470     const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
0471     struct mtd_info *mtd = nanddev_to_mtd(nand);
0472     int corr, ds_corr;
0473 
0474     if (conf->step_size == 0 || reqs->step_size == 0)
0475         /* Not enough information */
0476         return true;
0477 
0478     /*
0479      * We get the number of corrected bits per page to compare
0480      * the correction density.
0481      */
0482     corr = (mtd->writesize * conf->strength) / conf->step_size;
0483     ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
0484 
0485     return corr >= ds_corr && conf->strength >= reqs->strength;
0486 }
0487 EXPORT_SYMBOL(nand_ecc_is_strong_enough);
0488 
0489 /* ECC engine driver internal helpers */
0490 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
0491                    struct nand_device *nand)
0492 {
0493     unsigned int total_buffer_size;
0494 
0495     ctx->nand = nand;
0496 
0497     /* Let the user decide the exact length of each buffer */
0498     if (!ctx->page_buffer_size)
0499         ctx->page_buffer_size = nanddev_page_size(nand);
0500     if (!ctx->oob_buffer_size)
0501         ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
0502 
0503     total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
0504 
0505     ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
0506     if (!ctx->spare_databuf)
0507         return -ENOMEM;
0508 
0509     ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
0510 
0511     return 0;
0512 }
0513 EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
0514 
0515 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
0516 {
0517     kfree(ctx->spare_databuf);
0518 }
0519 EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
0520 
0521 /*
0522  * Ensure data and OOB area is fully read/written otherwise the correction might
0523  * not work as expected.
0524  */
0525 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
0526             struct nand_page_io_req *req)
0527 {
0528     struct nand_device *nand = ctx->nand;
0529     struct nand_page_io_req *orig, *tweak;
0530 
0531     /* Save the original request */
0532     ctx->orig_req = *req;
0533     ctx->bounce_data = false;
0534     ctx->bounce_oob = false;
0535     orig = &ctx->orig_req;
0536     tweak = req;
0537 
0538     /* Ensure the request covers the entire page */
0539     if (orig->datalen < nanddev_page_size(nand)) {
0540         ctx->bounce_data = true;
0541         tweak->dataoffs = 0;
0542         tweak->datalen = nanddev_page_size(nand);
0543         tweak->databuf.in = ctx->spare_databuf;
0544         memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
0545     }
0546 
0547     if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
0548         ctx->bounce_oob = true;
0549         tweak->ooboffs = 0;
0550         tweak->ooblen = nanddev_per_page_oobsize(nand);
0551         tweak->oobbuf.in = ctx->spare_oobbuf;
0552         memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
0553     }
0554 
0555     /* Copy the data that must be writen in the bounce buffers, if needed */
0556     if (orig->type == NAND_PAGE_WRITE) {
0557         if (ctx->bounce_data)
0558             memcpy((void *)tweak->databuf.out + orig->dataoffs,
0559                    orig->databuf.out, orig->datalen);
0560 
0561         if (ctx->bounce_oob)
0562             memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
0563                    orig->oobbuf.out, orig->ooblen);
0564     }
0565 }
0566 EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
0567 
0568 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
0569               struct nand_page_io_req *req)
0570 {
0571     struct nand_page_io_req *orig, *tweak;
0572 
0573     orig = &ctx->orig_req;
0574     tweak = req;
0575 
0576     /* Restore the data read from the bounce buffers, if needed */
0577     if (orig->type == NAND_PAGE_READ) {
0578         if (ctx->bounce_data)
0579             memcpy(orig->databuf.in,
0580                    tweak->databuf.in + orig->dataoffs,
0581                    orig->datalen);
0582 
0583         if (ctx->bounce_oob)
0584             memcpy(orig->oobbuf.in,
0585                    tweak->oobbuf.in + orig->ooboffs,
0586                    orig->ooblen);
0587     }
0588 
0589     /* Ensure the original request is restored */
0590     *req = *orig;
0591 }
0592 EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
0593 
0594 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
0595 {
0596     unsigned int algo = nand->ecc.user_conf.algo;
0597 
0598     if (algo == NAND_ECC_ALGO_UNKNOWN)
0599         algo = nand->ecc.defaults.algo;
0600 
0601     switch (algo) {
0602     case NAND_ECC_ALGO_HAMMING:
0603         return nand_ecc_sw_hamming_get_engine();
0604     case NAND_ECC_ALGO_BCH:
0605         return nand_ecc_sw_bch_get_engine();
0606     default:
0607         break;
0608     }
0609 
0610     return NULL;
0611 }
0612 EXPORT_SYMBOL(nand_ecc_get_sw_engine);
0613 
0614 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
0615 {
0616     return nand->ecc.ondie_engine;
0617 }
0618 EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
0619 
0620 int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
0621 {
0622     struct nand_ecc_engine *item;
0623 
0624     if (!engine)
0625         return -EINVAL;
0626 
0627     /* Prevent multiple registrations of one engine */
0628     list_for_each_entry(item, &on_host_hw_engines, node)
0629         if (item == engine)
0630             return 0;
0631 
0632     mutex_lock(&on_host_hw_engines_mutex);
0633     list_add_tail(&engine->node, &on_host_hw_engines);
0634     mutex_unlock(&on_host_hw_engines_mutex);
0635 
0636     return 0;
0637 }
0638 EXPORT_SYMBOL(nand_ecc_register_on_host_hw_engine);
0639 
0640 int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
0641 {
0642     if (!engine)
0643         return -EINVAL;
0644 
0645     mutex_lock(&on_host_hw_engines_mutex);
0646     list_del(&engine->node);
0647     mutex_unlock(&on_host_hw_engines_mutex);
0648 
0649     return 0;
0650 }
0651 EXPORT_SYMBOL(nand_ecc_unregister_on_host_hw_engine);
0652 
0653 static struct nand_ecc_engine *nand_ecc_match_on_host_hw_engine(struct device *dev)
0654 {
0655     struct nand_ecc_engine *item;
0656 
0657     list_for_each_entry(item, &on_host_hw_engines, node)
0658         if (item->dev == dev)
0659             return item;
0660 
0661     return NULL;
0662 }
0663 
0664 struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand)
0665 {
0666     struct nand_ecc_engine *engine = NULL;
0667     struct device *dev = &nand->mtd.dev;
0668     struct platform_device *pdev;
0669     struct device_node *np;
0670 
0671     if (list_empty(&on_host_hw_engines))
0672         return NULL;
0673 
0674     /* Check for an explicit nand-ecc-engine property */
0675     np = of_parse_phandle(dev->of_node, "nand-ecc-engine", 0);
0676     if (np) {
0677         pdev = of_find_device_by_node(np);
0678         if (!pdev)
0679             return ERR_PTR(-EPROBE_DEFER);
0680 
0681         engine = nand_ecc_match_on_host_hw_engine(&pdev->dev);
0682         platform_device_put(pdev);
0683         of_node_put(np);
0684 
0685         if (!engine)
0686             return ERR_PTR(-EPROBE_DEFER);
0687     }
0688 
0689     if (engine)
0690         get_device(engine->dev);
0691 
0692     return engine;
0693 }
0694 EXPORT_SYMBOL(nand_ecc_get_on_host_hw_engine);
0695 
0696 void nand_ecc_put_on_host_hw_engine(struct nand_device *nand)
0697 {
0698     put_device(nand->ecc.engine->dev);
0699 }
0700 EXPORT_SYMBOL(nand_ecc_put_on_host_hw_engine);
0701 
0702 /*
0703  * In the case of a pipelined engine, the device registering the ECC
0704  * engine is not necessarily the ECC engine itself but may be a host controller.
0705  * It is then useful to provide a helper to retrieve the right device object
0706  * which actually represents the ECC engine.
0707  */
0708 struct device *nand_ecc_get_engine_dev(struct device *host)
0709 {
0710     struct platform_device *ecc_pdev;
0711     struct device_node *np;
0712 
0713     /*
0714      * If the device node contains this property, it means we need to follow
0715      * it in order to get the right ECC engine device we are looking for.
0716      */
0717     np = of_parse_phandle(host->of_node, "nand-ecc-engine", 0);
0718     if (!np)
0719         return host;
0720 
0721     ecc_pdev = of_find_device_by_node(np);
0722     if (!ecc_pdev) {
0723         of_node_put(np);
0724         return NULL;
0725     }
0726 
0727     platform_device_put(ecc_pdev);
0728     of_node_put(np);
0729 
0730     return &ecc_pdev->dev;
0731 }
0732 
0733 MODULE_LICENSE("GPL");
0734 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
0735 MODULE_DESCRIPTION("Generic ECC engine");