Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2016-2017 Micron Technology, Inc.
0004  *
0005  * Authors:
0006  *  Peter Pan <peterpandong@micron.com>
0007  *  Boris Brezillon <boris.brezillon@bootlin.com>
0008  */
0009 
0010 #define pr_fmt(fmt) "spi-nand: " fmt
0011 
0012 #include <linux/device.h>
0013 #include <linux/jiffies.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/mtd/spinand.h>
0017 #include <linux/of.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/spi/spi.h>
0021 #include <linux/spi/spi-mem.h>
0022 
0023 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
0024 {
0025     struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
0026                               spinand->scratchbuf);
0027     int ret;
0028 
0029     ret = spi_mem_exec_op(spinand->spimem, &op);
0030     if (ret)
0031         return ret;
0032 
0033     *val = *spinand->scratchbuf;
0034     return 0;
0035 }
0036 
0037 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
0038 {
0039     struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
0040                               spinand->scratchbuf);
0041 
0042     *spinand->scratchbuf = val;
0043     return spi_mem_exec_op(spinand->spimem, &op);
0044 }
0045 
0046 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
0047 {
0048     return spinand_read_reg_op(spinand, REG_STATUS, status);
0049 }
0050 
0051 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
0052 {
0053     struct nand_device *nand = spinand_to_nand(spinand);
0054 
0055     if (WARN_ON(spinand->cur_target < 0 ||
0056             spinand->cur_target >= nand->memorg.ntargets))
0057         return -EINVAL;
0058 
0059     *cfg = spinand->cfg_cache[spinand->cur_target];
0060     return 0;
0061 }
0062 
0063 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
0064 {
0065     struct nand_device *nand = spinand_to_nand(spinand);
0066     int ret;
0067 
0068     if (WARN_ON(spinand->cur_target < 0 ||
0069             spinand->cur_target >= nand->memorg.ntargets))
0070         return -EINVAL;
0071 
0072     if (spinand->cfg_cache[spinand->cur_target] == cfg)
0073         return 0;
0074 
0075     ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
0076     if (ret)
0077         return ret;
0078 
0079     spinand->cfg_cache[spinand->cur_target] = cfg;
0080     return 0;
0081 }
0082 
0083 /**
0084  * spinand_upd_cfg() - Update the configuration register
0085  * @spinand: the spinand device
0086  * @mask: the mask encoding the bits to update in the config reg
0087  * @val: the new value to apply
0088  *
0089  * Update the configuration register.
0090  *
0091  * Return: 0 on success, a negative error code otherwise.
0092  */
0093 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
0094 {
0095     int ret;
0096     u8 cfg;
0097 
0098     ret = spinand_get_cfg(spinand, &cfg);
0099     if (ret)
0100         return ret;
0101 
0102     cfg &= ~mask;
0103     cfg |= val;
0104 
0105     return spinand_set_cfg(spinand, cfg);
0106 }
0107 
0108 /**
0109  * spinand_select_target() - Select a specific NAND target/die
0110  * @spinand: the spinand device
0111  * @target: the target/die to select
0112  *
0113  * Select a new target/die. If chip only has one die, this function is a NOOP.
0114  *
0115  * Return: 0 on success, a negative error code otherwise.
0116  */
0117 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
0118 {
0119     struct nand_device *nand = spinand_to_nand(spinand);
0120     int ret;
0121 
0122     if (WARN_ON(target >= nand->memorg.ntargets))
0123         return -EINVAL;
0124 
0125     if (spinand->cur_target == target)
0126         return 0;
0127 
0128     if (nand->memorg.ntargets == 1) {
0129         spinand->cur_target = target;
0130         return 0;
0131     }
0132 
0133     ret = spinand->select_target(spinand, target);
0134     if (ret)
0135         return ret;
0136 
0137     spinand->cur_target = target;
0138     return 0;
0139 }
0140 
0141 static int spinand_read_cfg(struct spinand_device *spinand)
0142 {
0143     struct nand_device *nand = spinand_to_nand(spinand);
0144     unsigned int target;
0145     int ret;
0146 
0147     for (target = 0; target < nand->memorg.ntargets; target++) {
0148         ret = spinand_select_target(spinand, target);
0149         if (ret)
0150             return ret;
0151 
0152         /*
0153          * We use spinand_read_reg_op() instead of spinand_get_cfg()
0154          * here to bypass the config cache.
0155          */
0156         ret = spinand_read_reg_op(spinand, REG_CFG,
0157                       &spinand->cfg_cache[target]);
0158         if (ret)
0159             return ret;
0160     }
0161 
0162     return 0;
0163 }
0164 
0165 static int spinand_init_cfg_cache(struct spinand_device *spinand)
0166 {
0167     struct nand_device *nand = spinand_to_nand(spinand);
0168     struct device *dev = &spinand->spimem->spi->dev;
0169 
0170     spinand->cfg_cache = devm_kcalloc(dev,
0171                       nand->memorg.ntargets,
0172                       sizeof(*spinand->cfg_cache),
0173                       GFP_KERNEL);
0174     if (!spinand->cfg_cache)
0175         return -ENOMEM;
0176 
0177     return 0;
0178 }
0179 
0180 static int spinand_init_quad_enable(struct spinand_device *spinand)
0181 {
0182     bool enable = false;
0183 
0184     if (!(spinand->flags & SPINAND_HAS_QE_BIT))
0185         return 0;
0186 
0187     if (spinand->op_templates.read_cache->data.buswidth == 4 ||
0188         spinand->op_templates.write_cache->data.buswidth == 4 ||
0189         spinand->op_templates.update_cache->data.buswidth == 4)
0190         enable = true;
0191 
0192     return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
0193                    enable ? CFG_QUAD_ENABLE : 0);
0194 }
0195 
0196 static int spinand_ecc_enable(struct spinand_device *spinand,
0197                   bool enable)
0198 {
0199     return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
0200                    enable ? CFG_ECC_ENABLE : 0);
0201 }
0202 
0203 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
0204 {
0205     struct nand_device *nand = spinand_to_nand(spinand);
0206 
0207     if (spinand->eccinfo.get_status)
0208         return spinand->eccinfo.get_status(spinand, status);
0209 
0210     switch (status & STATUS_ECC_MASK) {
0211     case STATUS_ECC_NO_BITFLIPS:
0212         return 0;
0213 
0214     case STATUS_ECC_HAS_BITFLIPS:
0215         /*
0216          * We have no way to know exactly how many bitflips have been
0217          * fixed, so let's return the maximum possible value so that
0218          * wear-leveling layers move the data immediately.
0219          */
0220         return nanddev_get_ecc_conf(nand)->strength;
0221 
0222     case STATUS_ECC_UNCOR_ERROR:
0223         return -EBADMSG;
0224 
0225     default:
0226         break;
0227     }
0228 
0229     return -EINVAL;
0230 }
0231 
0232 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
0233                        struct mtd_oob_region *region)
0234 {
0235     return -ERANGE;
0236 }
0237 
0238 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
0239                     struct mtd_oob_region *region)
0240 {
0241     if (section)
0242         return -ERANGE;
0243 
0244     /* Reserve 2 bytes for the BBM. */
0245     region->offset = 2;
0246     region->length = 62;
0247 
0248     return 0;
0249 }
0250 
0251 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
0252     .ecc = spinand_noecc_ooblayout_ecc,
0253     .free = spinand_noecc_ooblayout_free,
0254 };
0255 
0256 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
0257 {
0258     struct spinand_device *spinand = nand_to_spinand(nand);
0259     struct mtd_info *mtd = nanddev_to_mtd(nand);
0260     struct spinand_ondie_ecc_conf *engine_conf;
0261 
0262     nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
0263     nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
0264     nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
0265 
0266     engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
0267     if (!engine_conf)
0268         return -ENOMEM;
0269 
0270     nand->ecc.ctx.priv = engine_conf;
0271 
0272     if (spinand->eccinfo.ooblayout)
0273         mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
0274     else
0275         mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
0276 
0277     return 0;
0278 }
0279 
0280 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
0281 {
0282     kfree(nand->ecc.ctx.priv);
0283 }
0284 
0285 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
0286                         struct nand_page_io_req *req)
0287 {
0288     struct spinand_device *spinand = nand_to_spinand(nand);
0289     bool enable = (req->mode != MTD_OPS_RAW);
0290 
0291     memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
0292 
0293     /* Only enable or disable the engine */
0294     return spinand_ecc_enable(spinand, enable);
0295 }
0296 
0297 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
0298                        struct nand_page_io_req *req)
0299 {
0300     struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
0301     struct spinand_device *spinand = nand_to_spinand(nand);
0302     struct mtd_info *mtd = spinand_to_mtd(spinand);
0303     int ret;
0304 
0305     if (req->mode == MTD_OPS_RAW)
0306         return 0;
0307 
0308     /* Nothing to do when finishing a page write */
0309     if (req->type == NAND_PAGE_WRITE)
0310         return 0;
0311 
0312     /* Finish a page read: check the status, report errors/bitflips */
0313     ret = spinand_check_ecc_status(spinand, engine_conf->status);
0314     if (ret == -EBADMSG)
0315         mtd->ecc_stats.failed++;
0316     else if (ret > 0)
0317         mtd->ecc_stats.corrected += ret;
0318 
0319     return ret;
0320 }
0321 
0322 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
0323     .init_ctx = spinand_ondie_ecc_init_ctx,
0324     .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
0325     .prepare_io_req = spinand_ondie_ecc_prepare_io_req,
0326     .finish_io_req = spinand_ondie_ecc_finish_io_req,
0327 };
0328 
0329 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
0330     .ops = &spinand_ondie_ecc_engine_ops,
0331 };
0332 
0333 static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status)
0334 {
0335     struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
0336 
0337     if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE &&
0338         engine_conf)
0339         engine_conf->status = status;
0340 }
0341 
0342 static int spinand_write_enable_op(struct spinand_device *spinand)
0343 {
0344     struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
0345 
0346     return spi_mem_exec_op(spinand->spimem, &op);
0347 }
0348 
0349 static int spinand_load_page_op(struct spinand_device *spinand,
0350                 const struct nand_page_io_req *req)
0351 {
0352     struct nand_device *nand = spinand_to_nand(spinand);
0353     unsigned int row = nanddev_pos_to_row(nand, &req->pos);
0354     struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
0355 
0356     return spi_mem_exec_op(spinand->spimem, &op);
0357 }
0358 
0359 static int spinand_read_from_cache_op(struct spinand_device *spinand,
0360                       const struct nand_page_io_req *req)
0361 {
0362     struct nand_device *nand = spinand_to_nand(spinand);
0363     struct mtd_info *mtd = spinand_to_mtd(spinand);
0364     struct spi_mem_dirmap_desc *rdesc;
0365     unsigned int nbytes = 0;
0366     void *buf = NULL;
0367     u16 column = 0;
0368     ssize_t ret;
0369 
0370     if (req->datalen) {
0371         buf = spinand->databuf;
0372         nbytes = nanddev_page_size(nand);
0373         column = 0;
0374     }
0375 
0376     if (req->ooblen) {
0377         nbytes += nanddev_per_page_oobsize(nand);
0378         if (!buf) {
0379             buf = spinand->oobbuf;
0380             column = nanddev_page_size(nand);
0381         }
0382     }
0383 
0384     if (req->mode == MTD_OPS_RAW)
0385         rdesc = spinand->dirmaps[req->pos.plane].rdesc;
0386     else
0387         rdesc = spinand->dirmaps[req->pos.plane].rdesc_ecc;
0388 
0389     while (nbytes) {
0390         ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
0391         if (ret < 0)
0392             return ret;
0393 
0394         if (!ret || ret > nbytes)
0395             return -EIO;
0396 
0397         nbytes -= ret;
0398         column += ret;
0399         buf += ret;
0400     }
0401 
0402     if (req->datalen)
0403         memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
0404                req->datalen);
0405 
0406     if (req->ooblen) {
0407         if (req->mode == MTD_OPS_AUTO_OOB)
0408             mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
0409                             spinand->oobbuf,
0410                             req->ooboffs,
0411                             req->ooblen);
0412         else
0413             memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
0414                    req->ooblen);
0415     }
0416 
0417     return 0;
0418 }
0419 
0420 static int spinand_write_to_cache_op(struct spinand_device *spinand,
0421                      const struct nand_page_io_req *req)
0422 {
0423     struct nand_device *nand = spinand_to_nand(spinand);
0424     struct mtd_info *mtd = spinand_to_mtd(spinand);
0425     struct spi_mem_dirmap_desc *wdesc;
0426     unsigned int nbytes, column = 0;
0427     void *buf = spinand->databuf;
0428     ssize_t ret;
0429 
0430     /*
0431      * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
0432      * the cache content to 0xFF (depends on vendor implementation), so we
0433      * must fill the page cache entirely even if we only want to program
0434      * the data portion of the page, otherwise we might corrupt the BBM or
0435      * user data previously programmed in OOB area.
0436      *
0437      * Only reset the data buffer manually, the OOB buffer is prepared by
0438      * ECC engines ->prepare_io_req() callback.
0439      */
0440     nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
0441     memset(spinand->databuf, 0xff, nanddev_page_size(nand));
0442 
0443     if (req->datalen)
0444         memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
0445                req->datalen);
0446 
0447     if (req->ooblen) {
0448         if (req->mode == MTD_OPS_AUTO_OOB)
0449             mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
0450                             spinand->oobbuf,
0451                             req->ooboffs,
0452                             req->ooblen);
0453         else
0454             memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
0455                    req->ooblen);
0456     }
0457 
0458     if (req->mode == MTD_OPS_RAW)
0459         wdesc = spinand->dirmaps[req->pos.plane].wdesc;
0460     else
0461         wdesc = spinand->dirmaps[req->pos.plane].wdesc_ecc;
0462 
0463     while (nbytes) {
0464         ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
0465         if (ret < 0)
0466             return ret;
0467 
0468         if (!ret || ret > nbytes)
0469             return -EIO;
0470 
0471         nbytes -= ret;
0472         column += ret;
0473         buf += ret;
0474     }
0475 
0476     return 0;
0477 }
0478 
0479 static int spinand_program_op(struct spinand_device *spinand,
0480                   const struct nand_page_io_req *req)
0481 {
0482     struct nand_device *nand = spinand_to_nand(spinand);
0483     unsigned int row = nanddev_pos_to_row(nand, &req->pos);
0484     struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
0485 
0486     return spi_mem_exec_op(spinand->spimem, &op);
0487 }
0488 
0489 static int spinand_erase_op(struct spinand_device *spinand,
0490                 const struct nand_pos *pos)
0491 {
0492     struct nand_device *nand = spinand_to_nand(spinand);
0493     unsigned int row = nanddev_pos_to_row(nand, pos);
0494     struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
0495 
0496     return spi_mem_exec_op(spinand->spimem, &op);
0497 }
0498 
0499 static int spinand_wait(struct spinand_device *spinand,
0500             unsigned long initial_delay_us,
0501             unsigned long poll_delay_us,
0502             u8 *s)
0503 {
0504     struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
0505                               spinand->scratchbuf);
0506     u8 status;
0507     int ret;
0508 
0509     ret = spi_mem_poll_status(spinand->spimem, &op, STATUS_BUSY, 0,
0510                   initial_delay_us,
0511                   poll_delay_us,
0512                   SPINAND_WAITRDY_TIMEOUT_MS);
0513     if (ret)
0514         return ret;
0515 
0516     status = *spinand->scratchbuf;
0517     if (!(status & STATUS_BUSY))
0518         goto out;
0519 
0520     /*
0521      * Extra read, just in case the STATUS_READY bit has changed
0522      * since our last check
0523      */
0524     ret = spinand_read_status(spinand, &status);
0525     if (ret)
0526         return ret;
0527 
0528 out:
0529     if (s)
0530         *s = status;
0531 
0532     return status & STATUS_BUSY ? -ETIMEDOUT : 0;
0533 }
0534 
0535 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
0536                   u8 ndummy, u8 *buf)
0537 {
0538     struct spi_mem_op op = SPINAND_READID_OP(
0539         naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
0540     int ret;
0541 
0542     ret = spi_mem_exec_op(spinand->spimem, &op);
0543     if (!ret)
0544         memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
0545 
0546     return ret;
0547 }
0548 
0549 static int spinand_reset_op(struct spinand_device *spinand)
0550 {
0551     struct spi_mem_op op = SPINAND_RESET_OP;
0552     int ret;
0553 
0554     ret = spi_mem_exec_op(spinand->spimem, &op);
0555     if (ret)
0556         return ret;
0557 
0558     return spinand_wait(spinand,
0559                 SPINAND_RESET_INITIAL_DELAY_US,
0560                 SPINAND_RESET_POLL_DELAY_US,
0561                 NULL);
0562 }
0563 
0564 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
0565 {
0566     return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
0567 }
0568 
0569 static int spinand_read_page(struct spinand_device *spinand,
0570                  const struct nand_page_io_req *req)
0571 {
0572     struct nand_device *nand = spinand_to_nand(spinand);
0573     u8 status;
0574     int ret;
0575 
0576     ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
0577     if (ret)
0578         return ret;
0579 
0580     ret = spinand_load_page_op(spinand, req);
0581     if (ret)
0582         return ret;
0583 
0584     ret = spinand_wait(spinand,
0585                SPINAND_READ_INITIAL_DELAY_US,
0586                SPINAND_READ_POLL_DELAY_US,
0587                &status);
0588     if (ret < 0)
0589         return ret;
0590 
0591     spinand_ondie_ecc_save_status(nand, status);
0592 
0593     ret = spinand_read_from_cache_op(spinand, req);
0594     if (ret)
0595         return ret;
0596 
0597     return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
0598 }
0599 
0600 static int spinand_write_page(struct spinand_device *spinand,
0601                   const struct nand_page_io_req *req)
0602 {
0603     struct nand_device *nand = spinand_to_nand(spinand);
0604     u8 status;
0605     int ret;
0606 
0607     ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req);
0608     if (ret)
0609         return ret;
0610 
0611     ret = spinand_write_enable_op(spinand);
0612     if (ret)
0613         return ret;
0614 
0615     ret = spinand_write_to_cache_op(spinand, req);
0616     if (ret)
0617         return ret;
0618 
0619     ret = spinand_program_op(spinand, req);
0620     if (ret)
0621         return ret;
0622 
0623     ret = spinand_wait(spinand,
0624                SPINAND_WRITE_INITIAL_DELAY_US,
0625                SPINAND_WRITE_POLL_DELAY_US,
0626                &status);
0627     if (!ret && (status & STATUS_PROG_FAILED))
0628         return -EIO;
0629 
0630     return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
0631 }
0632 
0633 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
0634                 struct mtd_oob_ops *ops)
0635 {
0636     struct spinand_device *spinand = mtd_to_spinand(mtd);
0637     struct nand_device *nand = mtd_to_nanddev(mtd);
0638     unsigned int max_bitflips = 0;
0639     struct nand_io_iter iter;
0640     bool disable_ecc = false;
0641     bool ecc_failed = false;
0642     int ret = 0;
0643 
0644     if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
0645         disable_ecc = true;
0646 
0647     mutex_lock(&spinand->lock);
0648 
0649     nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
0650         if (disable_ecc)
0651             iter.req.mode = MTD_OPS_RAW;
0652 
0653         ret = spinand_select_target(spinand, iter.req.pos.target);
0654         if (ret)
0655             break;
0656 
0657         ret = spinand_read_page(spinand, &iter.req);
0658         if (ret < 0 && ret != -EBADMSG)
0659             break;
0660 
0661         if (ret == -EBADMSG)
0662             ecc_failed = true;
0663         else
0664             max_bitflips = max_t(unsigned int, max_bitflips, ret);
0665 
0666         ret = 0;
0667         ops->retlen += iter.req.datalen;
0668         ops->oobretlen += iter.req.ooblen;
0669     }
0670 
0671     mutex_unlock(&spinand->lock);
0672 
0673     if (ecc_failed && !ret)
0674         ret = -EBADMSG;
0675 
0676     return ret ? ret : max_bitflips;
0677 }
0678 
0679 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
0680                  struct mtd_oob_ops *ops)
0681 {
0682     struct spinand_device *spinand = mtd_to_spinand(mtd);
0683     struct nand_device *nand = mtd_to_nanddev(mtd);
0684     struct nand_io_iter iter;
0685     bool disable_ecc = false;
0686     int ret = 0;
0687 
0688     if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
0689         disable_ecc = true;
0690 
0691     mutex_lock(&spinand->lock);
0692 
0693     nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
0694         if (disable_ecc)
0695             iter.req.mode = MTD_OPS_RAW;
0696 
0697         ret = spinand_select_target(spinand, iter.req.pos.target);
0698         if (ret)
0699             break;
0700 
0701         ret = spinand_write_page(spinand, &iter.req);
0702         if (ret)
0703             break;
0704 
0705         ops->retlen += iter.req.datalen;
0706         ops->oobretlen += iter.req.ooblen;
0707     }
0708 
0709     mutex_unlock(&spinand->lock);
0710 
0711     return ret;
0712 }
0713 
0714 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
0715 {
0716     struct spinand_device *spinand = nand_to_spinand(nand);
0717     u8 marker[2] = { };
0718     struct nand_page_io_req req = {
0719         .pos = *pos,
0720         .ooblen = sizeof(marker),
0721         .ooboffs = 0,
0722         .oobbuf.in = marker,
0723         .mode = MTD_OPS_RAW,
0724     };
0725 
0726     spinand_select_target(spinand, pos->target);
0727     spinand_read_page(spinand, &req);
0728     if (marker[0] != 0xff || marker[1] != 0xff)
0729         return true;
0730 
0731     return false;
0732 }
0733 
0734 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
0735 {
0736     struct nand_device *nand = mtd_to_nanddev(mtd);
0737     struct spinand_device *spinand = nand_to_spinand(nand);
0738     struct nand_pos pos;
0739     int ret;
0740 
0741     nanddev_offs_to_pos(nand, offs, &pos);
0742     mutex_lock(&spinand->lock);
0743     ret = nanddev_isbad(nand, &pos);
0744     mutex_unlock(&spinand->lock);
0745 
0746     return ret;
0747 }
0748 
0749 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
0750 {
0751     struct spinand_device *spinand = nand_to_spinand(nand);
0752     u8 marker[2] = { };
0753     struct nand_page_io_req req = {
0754         .pos = *pos,
0755         .ooboffs = 0,
0756         .ooblen = sizeof(marker),
0757         .oobbuf.out = marker,
0758         .mode = MTD_OPS_RAW,
0759     };
0760     int ret;
0761 
0762     ret = spinand_select_target(spinand, pos->target);
0763     if (ret)
0764         return ret;
0765 
0766     ret = spinand_write_enable_op(spinand);
0767     if (ret)
0768         return ret;
0769 
0770     return spinand_write_page(spinand, &req);
0771 }
0772 
0773 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
0774 {
0775     struct nand_device *nand = mtd_to_nanddev(mtd);
0776     struct spinand_device *spinand = nand_to_spinand(nand);
0777     struct nand_pos pos;
0778     int ret;
0779 
0780     nanddev_offs_to_pos(nand, offs, &pos);
0781     mutex_lock(&spinand->lock);
0782     ret = nanddev_markbad(nand, &pos);
0783     mutex_unlock(&spinand->lock);
0784 
0785     return ret;
0786 }
0787 
0788 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
0789 {
0790     struct spinand_device *spinand = nand_to_spinand(nand);
0791     u8 status;
0792     int ret;
0793 
0794     ret = spinand_select_target(spinand, pos->target);
0795     if (ret)
0796         return ret;
0797 
0798     ret = spinand_write_enable_op(spinand);
0799     if (ret)
0800         return ret;
0801 
0802     ret = spinand_erase_op(spinand, pos);
0803     if (ret)
0804         return ret;
0805 
0806     ret = spinand_wait(spinand,
0807                SPINAND_ERASE_INITIAL_DELAY_US,
0808                SPINAND_ERASE_POLL_DELAY_US,
0809                &status);
0810 
0811     if (!ret && (status & STATUS_ERASE_FAILED))
0812         ret = -EIO;
0813 
0814     return ret;
0815 }
0816 
0817 static int spinand_mtd_erase(struct mtd_info *mtd,
0818                  struct erase_info *einfo)
0819 {
0820     struct spinand_device *spinand = mtd_to_spinand(mtd);
0821     int ret;
0822 
0823     mutex_lock(&spinand->lock);
0824     ret = nanddev_mtd_erase(mtd, einfo);
0825     mutex_unlock(&spinand->lock);
0826 
0827     return ret;
0828 }
0829 
0830 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
0831 {
0832     struct spinand_device *spinand = mtd_to_spinand(mtd);
0833     struct nand_device *nand = mtd_to_nanddev(mtd);
0834     struct nand_pos pos;
0835     int ret;
0836 
0837     nanddev_offs_to_pos(nand, offs, &pos);
0838     mutex_lock(&spinand->lock);
0839     ret = nanddev_isreserved(nand, &pos);
0840     mutex_unlock(&spinand->lock);
0841 
0842     return ret;
0843 }
0844 
0845 static int spinand_create_dirmap(struct spinand_device *spinand,
0846                  unsigned int plane)
0847 {
0848     struct nand_device *nand = spinand_to_nand(spinand);
0849     struct spi_mem_dirmap_info info = {
0850         .length = nanddev_page_size(nand) +
0851               nanddev_per_page_oobsize(nand),
0852     };
0853     struct spi_mem_dirmap_desc *desc;
0854 
0855     /* The plane number is passed in MSB just above the column address */
0856     info.offset = plane << fls(nand->memorg.pagesize);
0857 
0858     info.op_tmpl = *spinand->op_templates.update_cache;
0859     desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
0860                       spinand->spimem, &info);
0861     if (IS_ERR(desc))
0862         return PTR_ERR(desc);
0863 
0864     spinand->dirmaps[plane].wdesc = desc;
0865 
0866     info.op_tmpl = *spinand->op_templates.read_cache;
0867     desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
0868                       spinand->spimem, &info);
0869     if (IS_ERR(desc))
0870         return PTR_ERR(desc);
0871 
0872     spinand->dirmaps[plane].rdesc = desc;
0873 
0874     if (nand->ecc.engine->integration != NAND_ECC_ENGINE_INTEGRATION_PIPELINED) {
0875         spinand->dirmaps[plane].wdesc_ecc = spinand->dirmaps[plane].wdesc;
0876         spinand->dirmaps[plane].rdesc_ecc = spinand->dirmaps[plane].rdesc;
0877 
0878         return 0;
0879     }
0880 
0881     info.op_tmpl = *spinand->op_templates.update_cache;
0882     info.op_tmpl.data.ecc = true;
0883     desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
0884                       spinand->spimem, &info);
0885     if (IS_ERR(desc))
0886         return PTR_ERR(desc);
0887 
0888     spinand->dirmaps[plane].wdesc_ecc = desc;
0889 
0890     info.op_tmpl = *spinand->op_templates.read_cache;
0891     info.op_tmpl.data.ecc = true;
0892     desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
0893                       spinand->spimem, &info);
0894     if (IS_ERR(desc))
0895         return PTR_ERR(desc);
0896 
0897     spinand->dirmaps[plane].rdesc_ecc = desc;
0898 
0899     return 0;
0900 }
0901 
0902 static int spinand_create_dirmaps(struct spinand_device *spinand)
0903 {
0904     struct nand_device *nand = spinand_to_nand(spinand);
0905     int i, ret;
0906 
0907     spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
0908                     sizeof(*spinand->dirmaps) *
0909                     nand->memorg.planes_per_lun,
0910                     GFP_KERNEL);
0911     if (!spinand->dirmaps)
0912         return -ENOMEM;
0913 
0914     for (i = 0; i < nand->memorg.planes_per_lun; i++) {
0915         ret = spinand_create_dirmap(spinand, i);
0916         if (ret)
0917             return ret;
0918     }
0919 
0920     return 0;
0921 }
0922 
0923 static const struct nand_ops spinand_ops = {
0924     .erase = spinand_erase,
0925     .markbad = spinand_markbad,
0926     .isbad = spinand_isbad,
0927 };
0928 
0929 static const struct spinand_manufacturer *spinand_manufacturers[] = {
0930     &ato_spinand_manufacturer,
0931     &gigadevice_spinand_manufacturer,
0932     &macronix_spinand_manufacturer,
0933     &micron_spinand_manufacturer,
0934     &paragon_spinand_manufacturer,
0935     &toshiba_spinand_manufacturer,
0936     &winbond_spinand_manufacturer,
0937     &xtx_spinand_manufacturer,
0938 };
0939 
0940 static int spinand_manufacturer_match(struct spinand_device *spinand,
0941                       enum spinand_readid_method rdid_method)
0942 {
0943     u8 *id = spinand->id.data;
0944     unsigned int i;
0945     int ret;
0946 
0947     for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
0948         const struct spinand_manufacturer *manufacturer =
0949             spinand_manufacturers[i];
0950 
0951         if (id[0] != manufacturer->id)
0952             continue;
0953 
0954         ret = spinand_match_and_init(spinand,
0955                          manufacturer->chips,
0956                          manufacturer->nchips,
0957                          rdid_method);
0958         if (ret < 0)
0959             continue;
0960 
0961         spinand->manufacturer = manufacturer;
0962         return 0;
0963     }
0964     return -ENOTSUPP;
0965 }
0966 
0967 static int spinand_id_detect(struct spinand_device *spinand)
0968 {
0969     u8 *id = spinand->id.data;
0970     int ret;
0971 
0972     ret = spinand_read_id_op(spinand, 0, 0, id);
0973     if (ret)
0974         return ret;
0975     ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
0976     if (!ret)
0977         return 0;
0978 
0979     ret = spinand_read_id_op(spinand, 1, 0, id);
0980     if (ret)
0981         return ret;
0982     ret = spinand_manufacturer_match(spinand,
0983                      SPINAND_READID_METHOD_OPCODE_ADDR);
0984     if (!ret)
0985         return 0;
0986 
0987     ret = spinand_read_id_op(spinand, 0, 1, id);
0988     if (ret)
0989         return ret;
0990     ret = spinand_manufacturer_match(spinand,
0991                      SPINAND_READID_METHOD_OPCODE_DUMMY);
0992 
0993     return ret;
0994 }
0995 
0996 static int spinand_manufacturer_init(struct spinand_device *spinand)
0997 {
0998     if (spinand->manufacturer->ops->init)
0999         return spinand->manufacturer->ops->init(spinand);
1000 
1001     return 0;
1002 }
1003 
1004 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1005 {
1006     /* Release manufacturer private data */
1007     if (spinand->manufacturer->ops->cleanup)
1008         return spinand->manufacturer->ops->cleanup(spinand);
1009 }
1010 
1011 static const struct spi_mem_op *
1012 spinand_select_op_variant(struct spinand_device *spinand,
1013               const struct spinand_op_variants *variants)
1014 {
1015     struct nand_device *nand = spinand_to_nand(spinand);
1016     unsigned int i;
1017 
1018     for (i = 0; i < variants->nops; i++) {
1019         struct spi_mem_op op = variants->ops[i];
1020         unsigned int nbytes;
1021         int ret;
1022 
1023         nbytes = nanddev_per_page_oobsize(nand) +
1024              nanddev_page_size(nand);
1025 
1026         while (nbytes) {
1027             op.data.nbytes = nbytes;
1028             ret = spi_mem_adjust_op_size(spinand->spimem, &op);
1029             if (ret)
1030                 break;
1031 
1032             if (!spi_mem_supports_op(spinand->spimem, &op))
1033                 break;
1034 
1035             nbytes -= op.data.nbytes;
1036         }
1037 
1038         if (!nbytes)
1039             return &variants->ops[i];
1040     }
1041 
1042     return NULL;
1043 }
1044 
1045 /**
1046  * spinand_match_and_init() - Try to find a match between a device ID and an
1047  *                entry in a spinand_info table
1048  * @spinand: SPI NAND object
1049  * @table: SPI NAND device description table
1050  * @table_size: size of the device description table
1051  * @rdid_method: read id method to match
1052  *
1053  * Match between a device ID retrieved through the READ_ID command and an
1054  * entry in the SPI NAND description table. If a match is found, the spinand
1055  * object will be initialized with information provided by the matching
1056  * spinand_info entry.
1057  *
1058  * Return: 0 on success, a negative error code otherwise.
1059  */
1060 int spinand_match_and_init(struct spinand_device *spinand,
1061                const struct spinand_info *table,
1062                unsigned int table_size,
1063                enum spinand_readid_method rdid_method)
1064 {
1065     u8 *id = spinand->id.data;
1066     struct nand_device *nand = spinand_to_nand(spinand);
1067     unsigned int i;
1068 
1069     for (i = 0; i < table_size; i++) {
1070         const struct spinand_info *info = &table[i];
1071         const struct spi_mem_op *op;
1072 
1073         if (rdid_method != info->devid.method)
1074             continue;
1075 
1076         if (memcmp(id + 1, info->devid.id, info->devid.len))
1077             continue;
1078 
1079         nand->memorg = table[i].memorg;
1080         nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1081         spinand->eccinfo = table[i].eccinfo;
1082         spinand->flags = table[i].flags;
1083         spinand->id.len = 1 + table[i].devid.len;
1084         spinand->select_target = table[i].select_target;
1085 
1086         op = spinand_select_op_variant(spinand,
1087                            info->op_variants.read_cache);
1088         if (!op)
1089             return -ENOTSUPP;
1090 
1091         spinand->op_templates.read_cache = op;
1092 
1093         op = spinand_select_op_variant(spinand,
1094                            info->op_variants.write_cache);
1095         if (!op)
1096             return -ENOTSUPP;
1097 
1098         spinand->op_templates.write_cache = op;
1099 
1100         op = spinand_select_op_variant(spinand,
1101                            info->op_variants.update_cache);
1102         spinand->op_templates.update_cache = op;
1103 
1104         return 0;
1105     }
1106 
1107     return -ENOTSUPP;
1108 }
1109 
1110 static int spinand_detect(struct spinand_device *spinand)
1111 {
1112     struct device *dev = &spinand->spimem->spi->dev;
1113     struct nand_device *nand = spinand_to_nand(spinand);
1114     int ret;
1115 
1116     ret = spinand_reset_op(spinand);
1117     if (ret)
1118         return ret;
1119 
1120     ret = spinand_id_detect(spinand);
1121     if (ret) {
1122         dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1123             spinand->id.data);
1124         return ret;
1125     }
1126 
1127     if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1128         dev_err(dev,
1129             "SPI NANDs with more than one die must implement ->select_target()\n");
1130         return -EINVAL;
1131     }
1132 
1133     dev_info(&spinand->spimem->spi->dev,
1134          "%s SPI NAND was found.\n", spinand->manufacturer->name);
1135     dev_info(&spinand->spimem->spi->dev,
1136          "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1137          nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1138          nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1139 
1140     return 0;
1141 }
1142 
1143 static int spinand_init_flash(struct spinand_device *spinand)
1144 {
1145     struct device *dev = &spinand->spimem->spi->dev;
1146     struct nand_device *nand = spinand_to_nand(spinand);
1147     int ret, i;
1148 
1149     ret = spinand_read_cfg(spinand);
1150     if (ret)
1151         return ret;
1152 
1153     ret = spinand_init_quad_enable(spinand);
1154     if (ret)
1155         return ret;
1156 
1157     ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1158     if (ret)
1159         return ret;
1160 
1161     ret = spinand_manufacturer_init(spinand);
1162     if (ret) {
1163         dev_err(dev,
1164         "Failed to initialize the SPI NAND chip (err = %d)\n",
1165         ret);
1166         return ret;
1167     }
1168 
1169     /* After power up, all blocks are locked, so unlock them here. */
1170     for (i = 0; i < nand->memorg.ntargets; i++) {
1171         ret = spinand_select_target(spinand, i);
1172         if (ret)
1173             break;
1174 
1175         ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1176         if (ret)
1177             break;
1178     }
1179 
1180     if (ret)
1181         spinand_manufacturer_cleanup(spinand);
1182 
1183     return ret;
1184 }
1185 
1186 static void spinand_mtd_resume(struct mtd_info *mtd)
1187 {
1188     struct spinand_device *spinand = mtd_to_spinand(mtd);
1189     int ret;
1190 
1191     ret = spinand_reset_op(spinand);
1192     if (ret)
1193         return;
1194 
1195     ret = spinand_init_flash(spinand);
1196     if (ret)
1197         return;
1198 
1199     spinand_ecc_enable(spinand, false);
1200 }
1201 
1202 static int spinand_init(struct spinand_device *spinand)
1203 {
1204     struct device *dev = &spinand->spimem->spi->dev;
1205     struct mtd_info *mtd = spinand_to_mtd(spinand);
1206     struct nand_device *nand = mtd_to_nanddev(mtd);
1207     int ret;
1208 
1209     /*
1210      * We need a scratch buffer because the spi_mem interface requires that
1211      * buf passed in spi_mem_op->data.buf be DMA-able.
1212      */
1213     spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1214     if (!spinand->scratchbuf)
1215         return -ENOMEM;
1216 
1217     ret = spinand_detect(spinand);
1218     if (ret)
1219         goto err_free_bufs;
1220 
1221     /*
1222      * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1223      * may use this buffer for DMA access.
1224      * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1225      */
1226     spinand->databuf = kzalloc(nanddev_page_size(nand) +
1227                    nanddev_per_page_oobsize(nand),
1228                    GFP_KERNEL);
1229     if (!spinand->databuf) {
1230         ret = -ENOMEM;
1231         goto err_free_bufs;
1232     }
1233 
1234     spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1235 
1236     ret = spinand_init_cfg_cache(spinand);
1237     if (ret)
1238         goto err_free_bufs;
1239 
1240     ret = spinand_init_flash(spinand);
1241     if (ret)
1242         goto err_free_bufs;
1243 
1244     ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1245     if (ret)
1246         goto err_manuf_cleanup;
1247 
1248     /* SPI-NAND default ECC engine is on-die */
1249     nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1250     nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1251 
1252     spinand_ecc_enable(spinand, false);
1253     ret = nanddev_ecc_engine_init(nand);
1254     if (ret)
1255         goto err_cleanup_nanddev;
1256 
1257     mtd->_read_oob = spinand_mtd_read;
1258     mtd->_write_oob = spinand_mtd_write;
1259     mtd->_block_isbad = spinand_mtd_block_isbad;
1260     mtd->_block_markbad = spinand_mtd_block_markbad;
1261     mtd->_block_isreserved = spinand_mtd_block_isreserved;
1262     mtd->_erase = spinand_mtd_erase;
1263     mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1264     mtd->_resume = spinand_mtd_resume;
1265 
1266     if (nand->ecc.engine) {
1267         ret = mtd_ooblayout_count_freebytes(mtd);
1268         if (ret < 0)
1269             goto err_cleanup_ecc_engine;
1270     }
1271 
1272     mtd->oobavail = ret;
1273 
1274     /* Propagate ECC information to mtd_info */
1275     mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
1276     mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
1277 
1278     ret = spinand_create_dirmaps(spinand);
1279     if (ret) {
1280         dev_err(dev,
1281             "Failed to create direct mappings for read/write operations (err = %d)\n",
1282             ret);
1283         goto err_cleanup_ecc_engine;
1284     }
1285 
1286     return 0;
1287 
1288 err_cleanup_ecc_engine:
1289     nanddev_ecc_engine_cleanup(nand);
1290 
1291 err_cleanup_nanddev:
1292     nanddev_cleanup(nand);
1293 
1294 err_manuf_cleanup:
1295     spinand_manufacturer_cleanup(spinand);
1296 
1297 err_free_bufs:
1298     kfree(spinand->databuf);
1299     kfree(spinand->scratchbuf);
1300     return ret;
1301 }
1302 
1303 static void spinand_cleanup(struct spinand_device *spinand)
1304 {
1305     struct nand_device *nand = spinand_to_nand(spinand);
1306 
1307     nanddev_cleanup(nand);
1308     spinand_manufacturer_cleanup(spinand);
1309     kfree(spinand->databuf);
1310     kfree(spinand->scratchbuf);
1311 }
1312 
1313 static int spinand_probe(struct spi_mem *mem)
1314 {
1315     struct spinand_device *spinand;
1316     struct mtd_info *mtd;
1317     int ret;
1318 
1319     spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1320                    GFP_KERNEL);
1321     if (!spinand)
1322         return -ENOMEM;
1323 
1324     spinand->spimem = mem;
1325     spi_mem_set_drvdata(mem, spinand);
1326     spinand_set_of_node(spinand, mem->spi->dev.of_node);
1327     mutex_init(&spinand->lock);
1328     mtd = spinand_to_mtd(spinand);
1329     mtd->dev.parent = &mem->spi->dev;
1330 
1331     ret = spinand_init(spinand);
1332     if (ret)
1333         return ret;
1334 
1335     ret = mtd_device_register(mtd, NULL, 0);
1336     if (ret)
1337         goto err_spinand_cleanup;
1338 
1339     return 0;
1340 
1341 err_spinand_cleanup:
1342     spinand_cleanup(spinand);
1343 
1344     return ret;
1345 }
1346 
1347 static int spinand_remove(struct spi_mem *mem)
1348 {
1349     struct spinand_device *spinand;
1350     struct mtd_info *mtd;
1351     int ret;
1352 
1353     spinand = spi_mem_get_drvdata(mem);
1354     mtd = spinand_to_mtd(spinand);
1355 
1356     ret = mtd_device_unregister(mtd);
1357     if (ret)
1358         return ret;
1359 
1360     spinand_cleanup(spinand);
1361 
1362     return 0;
1363 }
1364 
1365 static const struct spi_device_id spinand_ids[] = {
1366     { .name = "spi-nand" },
1367     { /* sentinel */ },
1368 };
1369 MODULE_DEVICE_TABLE(spi, spinand_ids);
1370 
1371 #ifdef CONFIG_OF
1372 static const struct of_device_id spinand_of_ids[] = {
1373     { .compatible = "spi-nand" },
1374     { /* sentinel */ },
1375 };
1376 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1377 #endif
1378 
1379 static struct spi_mem_driver spinand_drv = {
1380     .spidrv = {
1381         .id_table = spinand_ids,
1382         .driver = {
1383             .name = "spi-nand",
1384             .of_match_table = of_match_ptr(spinand_of_ids),
1385         },
1386     },
1387     .probe = spinand_probe,
1388     .remove = spinand_remove,
1389 };
1390 module_spi_mem_driver(spinand_drv);
1391 
1392 MODULE_DESCRIPTION("SPI NAND framework");
1393 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1394 MODULE_LICENSE("GPL v2");