Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  *  Copyright 2017 - Free Electrons
0004  *
0005  *  Authors:
0006  *  Boris Brezillon <boris.brezillon@free-electrons.com>
0007  *  Peter Pan <peterpandong@micron.com>
0008  */
0009 
0010 #ifndef __LINUX_MTD_NAND_H
0011 #define __LINUX_MTD_NAND_H
0012 
0013 #include <linux/mtd/mtd.h>
0014 
0015 struct nand_device;
0016 
0017 /**
0018  * struct nand_memory_organization - Memory organization structure
0019  * @bits_per_cell: number of bits per NAND cell
0020  * @pagesize: page size
0021  * @oobsize: OOB area size
0022  * @pages_per_eraseblock: number of pages per eraseblock
0023  * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
0024  * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
0025  * @planes_per_lun: number of planes per LUN
0026  * @luns_per_target: number of LUN per target (target is a synonym for die)
0027  * @ntargets: total number of targets exposed by the NAND device
0028  */
0029 struct nand_memory_organization {
0030     unsigned int bits_per_cell;
0031     unsigned int pagesize;
0032     unsigned int oobsize;
0033     unsigned int pages_per_eraseblock;
0034     unsigned int eraseblocks_per_lun;
0035     unsigned int max_bad_eraseblocks_per_lun;
0036     unsigned int planes_per_lun;
0037     unsigned int luns_per_target;
0038     unsigned int ntargets;
0039 };
0040 
0041 #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt)   \
0042     {                           \
0043         .bits_per_cell = (bpc),             \
0044         .pagesize = (ps),               \
0045         .oobsize = (os),                \
0046         .pages_per_eraseblock = (ppe),          \
0047         .eraseblocks_per_lun = (epl),           \
0048         .max_bad_eraseblocks_per_lun = (mbb),       \
0049         .planes_per_lun = (ppl),            \
0050         .luns_per_target = (lpt),           \
0051         .ntargets = (nt),               \
0052     }
0053 
0054 /**
0055  * struct nand_row_converter - Information needed to convert an absolute offset
0056  *                 into a row address
0057  * @lun_addr_shift: position of the LUN identifier in the row address
0058  * @eraseblock_addr_shift: position of the eraseblock identifier in the row
0059  *             address
0060  */
0061 struct nand_row_converter {
0062     unsigned int lun_addr_shift;
0063     unsigned int eraseblock_addr_shift;
0064 };
0065 
0066 /**
0067  * struct nand_pos - NAND position object
0068  * @target: the NAND target/die
0069  * @lun: the LUN identifier
0070  * @plane: the plane within the LUN
0071  * @eraseblock: the eraseblock within the LUN
0072  * @page: the page within the LUN
0073  *
0074  * These information are usually used by specific sub-layers to select the
0075  * appropriate target/die and generate a row address to pass to the device.
0076  */
0077 struct nand_pos {
0078     unsigned int target;
0079     unsigned int lun;
0080     unsigned int plane;
0081     unsigned int eraseblock;
0082     unsigned int page;
0083 };
0084 
0085 /**
0086  * enum nand_page_io_req_type - Direction of an I/O request
0087  * @NAND_PAGE_READ: from the chip, to the controller
0088  * @NAND_PAGE_WRITE: from the controller, to the chip
0089  */
0090 enum nand_page_io_req_type {
0091     NAND_PAGE_READ = 0,
0092     NAND_PAGE_WRITE,
0093 };
0094 
0095 /**
0096  * struct nand_page_io_req - NAND I/O request object
0097  * @type: the type of page I/O: read or write
0098  * @pos: the position this I/O request is targeting
0099  * @dataoffs: the offset within the page
0100  * @datalen: number of data bytes to read from/write to this page
0101  * @databuf: buffer to store data in or get data from
0102  * @ooboffs: the OOB offset within the page
0103  * @ooblen: the number of OOB bytes to read from/write to this page
0104  * @oobbuf: buffer to store OOB data in or get OOB data from
0105  * @mode: one of the %MTD_OPS_XXX mode
0106  *
0107  * This object is used to pass per-page I/O requests to NAND sub-layers. This
0108  * way all useful information are already formatted in a useful way and
0109  * specific NAND layers can focus on translating these information into
0110  * specific commands/operations.
0111  */
0112 struct nand_page_io_req {
0113     enum nand_page_io_req_type type;
0114     struct nand_pos pos;
0115     unsigned int dataoffs;
0116     unsigned int datalen;
0117     union {
0118         const void *out;
0119         void *in;
0120     } databuf;
0121     unsigned int ooboffs;
0122     unsigned int ooblen;
0123     union {
0124         const void *out;
0125         void *in;
0126     } oobbuf;
0127     int mode;
0128 };
0129 
0130 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
0131 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
0132 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
0133 
0134 /**
0135  * enum nand_ecc_engine_type - NAND ECC engine type
0136  * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
0137  * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
0138  * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
0139  * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
0140  * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
0141  */
0142 enum nand_ecc_engine_type {
0143     NAND_ECC_ENGINE_TYPE_INVALID,
0144     NAND_ECC_ENGINE_TYPE_NONE,
0145     NAND_ECC_ENGINE_TYPE_SOFT,
0146     NAND_ECC_ENGINE_TYPE_ON_HOST,
0147     NAND_ECC_ENGINE_TYPE_ON_DIE,
0148 };
0149 
0150 /**
0151  * enum nand_ecc_placement - NAND ECC bytes placement
0152  * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
0153  * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
0154  * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
0155  *                                  interleaved with regular data in the main
0156  *                                  area
0157  */
0158 enum nand_ecc_placement {
0159     NAND_ECC_PLACEMENT_UNKNOWN,
0160     NAND_ECC_PLACEMENT_OOB,
0161     NAND_ECC_PLACEMENT_INTERLEAVED,
0162 };
0163 
0164 /**
0165  * enum nand_ecc_algo - NAND ECC algorithm
0166  * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
0167  * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
0168  * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
0169  * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
0170  */
0171 enum nand_ecc_algo {
0172     NAND_ECC_ALGO_UNKNOWN,
0173     NAND_ECC_ALGO_HAMMING,
0174     NAND_ECC_ALGO_BCH,
0175     NAND_ECC_ALGO_RS,
0176 };
0177 
0178 /**
0179  * struct nand_ecc_props - NAND ECC properties
0180  * @engine_type: ECC engine type
0181  * @placement: OOB placement (if relevant)
0182  * @algo: ECC algorithm (if relevant)
0183  * @strength: ECC strength
0184  * @step_size: Number of bytes per step
0185  * @flags: Misc properties
0186  */
0187 struct nand_ecc_props {
0188     enum nand_ecc_engine_type engine_type;
0189     enum nand_ecc_placement placement;
0190     enum nand_ecc_algo algo;
0191     unsigned int strength;
0192     unsigned int step_size;
0193     unsigned int flags;
0194 };
0195 
0196 #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
0197 
0198 /* NAND ECC misc flags */
0199 #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
0200 
0201 /**
0202  * struct nand_bbt - bad block table object
0203  * @cache: in memory BBT cache
0204  */
0205 struct nand_bbt {
0206     unsigned long *cache;
0207 };
0208 
0209 /**
0210  * struct nand_ops - NAND operations
0211  * @erase: erase a specific block. No need to check if the block is bad before
0212  *     erasing, this has been taken care of by the generic NAND layer
0213  * @markbad: mark a specific block bad. No need to check if the block is
0214  *       already marked bad, this has been taken care of by the generic
0215  *       NAND layer. This method should just write the BBM (Bad Block
0216  *       Marker) so that future call to struct_nand_ops->isbad() return
0217  *       true
0218  * @isbad: check whether a block is bad or not. This method should just read
0219  *     the BBM and return whether the block is bad or not based on what it
0220  *     reads
0221  *
0222  * These are all low level operations that should be implemented by specialized
0223  * NAND layers (SPI NAND, raw NAND, ...).
0224  */
0225 struct nand_ops {
0226     int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
0227     int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
0228     bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
0229 };
0230 
0231 /**
0232  * struct nand_ecc_context - Context for the ECC engine
0233  * @conf: basic ECC engine parameters
0234  * @nsteps: number of ECC steps
0235  * @total: total number of bytes used for storing ECC codes, this is used by
0236  *         generic OOB layouts
0237  * @priv: ECC engine driver private data
0238  */
0239 struct nand_ecc_context {
0240     struct nand_ecc_props conf;
0241     unsigned int nsteps;
0242     unsigned int total;
0243     void *priv;
0244 };
0245 
0246 /**
0247  * struct nand_ecc_engine_ops - ECC engine operations
0248  * @init_ctx: given a desired user configuration for the pointed NAND device,
0249  *            requests the ECC engine driver to setup a configuration with
0250  *            values it supports.
0251  * @cleanup_ctx: clean the context initialized by @init_ctx.
0252  * @prepare_io_req: is called before reading/writing a page to prepare the I/O
0253  *                  request to be performed with ECC correction.
0254  * @finish_io_req: is called after reading/writing a page to terminate the I/O
0255  *                 request and ensure proper ECC correction.
0256  */
0257 struct nand_ecc_engine_ops {
0258     int (*init_ctx)(struct nand_device *nand);
0259     void (*cleanup_ctx)(struct nand_device *nand);
0260     int (*prepare_io_req)(struct nand_device *nand,
0261                   struct nand_page_io_req *req);
0262     int (*finish_io_req)(struct nand_device *nand,
0263                  struct nand_page_io_req *req);
0264 };
0265 
0266 /**
0267  * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
0268  * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
0269  * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
0270  *                                         correction, does not need to copy
0271  *                                         data around
0272  * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
0273  *                                        data into its own area before use
0274  */
0275 enum nand_ecc_engine_integration {
0276     NAND_ECC_ENGINE_INTEGRATION_INVALID,
0277     NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
0278     NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
0279 };
0280 
0281 /**
0282  * struct nand_ecc_engine - ECC engine abstraction for NAND devices
0283  * @dev: Host device
0284  * @node: Private field for registration time
0285  * @ops: ECC engine operations
0286  * @integration: How the engine is integrated with the host
0287  *               (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
0288  * @priv: Private data
0289  */
0290 struct nand_ecc_engine {
0291     struct device *dev;
0292     struct list_head node;
0293     struct nand_ecc_engine_ops *ops;
0294     enum nand_ecc_engine_integration integration;
0295     void *priv;
0296 };
0297 
0298 void of_get_nand_ecc_user_config(struct nand_device *nand);
0299 int nand_ecc_init_ctx(struct nand_device *nand);
0300 void nand_ecc_cleanup_ctx(struct nand_device *nand);
0301 int nand_ecc_prepare_io_req(struct nand_device *nand,
0302                 struct nand_page_io_req *req);
0303 int nand_ecc_finish_io_req(struct nand_device *nand,
0304                struct nand_page_io_req *req);
0305 bool nand_ecc_is_strong_enough(struct nand_device *nand);
0306 
0307 #if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
0308 int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
0309 int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
0310 #else
0311 static inline int
0312 nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
0313 {
0314     return -ENOTSUPP;
0315 }
0316 static inline int
0317 nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
0318 {
0319     return -ENOTSUPP;
0320 }
0321 #endif
0322 
0323 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
0324 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
0325 struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
0326 void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
0327 struct device *nand_ecc_get_engine_dev(struct device *host);
0328 
0329 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
0330 struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
0331 #else
0332 static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
0333 {
0334     return NULL;
0335 }
0336 #endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
0337 
0338 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
0339 struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
0340 #else
0341 static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
0342 {
0343     return NULL;
0344 }
0345 #endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
0346 
0347 /**
0348  * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
0349  * @orig_req: Pointer to the original IO request
0350  * @nand: Related NAND device, to have access to its memory organization
0351  * @page_buffer_size: Real size of the page buffer to use (can be set by the
0352  *                    user before the tweaking mechanism initialization)
0353  * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
0354  *                   user before the tweaking mechanism initialization)
0355  * @spare_databuf: Data bounce buffer
0356  * @spare_oobbuf: OOB bounce buffer
0357  * @bounce_data: Flag indicating a data bounce buffer is used
0358  * @bounce_oob: Flag indicating an OOB bounce buffer is used
0359  */
0360 struct nand_ecc_req_tweak_ctx {
0361     struct nand_page_io_req orig_req;
0362     struct nand_device *nand;
0363     unsigned int page_buffer_size;
0364     unsigned int oob_buffer_size;
0365     void *spare_databuf;
0366     void *spare_oobbuf;
0367     bool bounce_data;
0368     bool bounce_oob;
0369 };
0370 
0371 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
0372                    struct nand_device *nand);
0373 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
0374 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
0375             struct nand_page_io_req *req);
0376 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
0377               struct nand_page_io_req *req);
0378 
0379 /**
0380  * struct nand_ecc - Information relative to the ECC
0381  * @defaults: Default values, depend on the underlying subsystem
0382  * @requirements: ECC requirements from the NAND chip perspective
0383  * @user_conf: User desires in terms of ECC parameters
0384  * @ctx: ECC context for the ECC engine, derived from the device @requirements
0385  *       the @user_conf and the @defaults
0386  * @ondie_engine: On-die ECC engine reference, if any
0387  * @engine: ECC engine actually bound
0388  */
0389 struct nand_ecc {
0390     struct nand_ecc_props defaults;
0391     struct nand_ecc_props requirements;
0392     struct nand_ecc_props user_conf;
0393     struct nand_ecc_context ctx;
0394     struct nand_ecc_engine *ondie_engine;
0395     struct nand_ecc_engine *engine;
0396 };
0397 
0398 /**
0399  * struct nand_device - NAND device
0400  * @mtd: MTD instance attached to the NAND device
0401  * @memorg: memory layout
0402  * @ecc: NAND ECC object attached to the NAND device
0403  * @rowconv: position to row address converter
0404  * @bbt: bad block table info
0405  * @ops: NAND operations attached to the NAND device
0406  *
0407  * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
0408  * should declare their own NAND object embedding a nand_device struct (that's
0409  * how inheritance is done).
0410  * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
0411  * be filled at device detection time to reflect the NAND device
0412  * capabilities/requirements. Once this is done nanddev_init() can be called.
0413  * It will take care of converting NAND information into MTD ones, which means
0414  * the specialized NAND layers should never manually tweak
0415  * struct_nand_device->mtd except for the ->_read/write() hooks.
0416  */
0417 struct nand_device {
0418     struct mtd_info mtd;
0419     struct nand_memory_organization memorg;
0420     struct nand_ecc ecc;
0421     struct nand_row_converter rowconv;
0422     struct nand_bbt bbt;
0423     const struct nand_ops *ops;
0424 };
0425 
0426 /**
0427  * struct nand_io_iter - NAND I/O iterator
0428  * @req: current I/O request
0429  * @oobbytes_per_page: maximum number of OOB bytes per page
0430  * @dataleft: remaining number of data bytes to read/write
0431  * @oobleft: remaining number of OOB bytes to read/write
0432  *
0433  * Can be used by specialized NAND layers to iterate over all pages covered
0434  * by an MTD I/O request, which should greatly simplifies the boiler-plate
0435  * code needed to read/write data from/to a NAND device.
0436  */
0437 struct nand_io_iter {
0438     struct nand_page_io_req req;
0439     unsigned int oobbytes_per_page;
0440     unsigned int dataleft;
0441     unsigned int oobleft;
0442 };
0443 
0444 /**
0445  * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
0446  * @mtd: MTD instance
0447  *
0448  * Return: the NAND device embedding @mtd.
0449  */
0450 static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
0451 {
0452     return container_of(mtd, struct nand_device, mtd);
0453 }
0454 
0455 /**
0456  * nanddev_to_mtd() - Get the MTD device attached to a NAND device
0457  * @nand: NAND device
0458  *
0459  * Return: the MTD device embedded in @nand.
0460  */
0461 static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
0462 {
0463     return &nand->mtd;
0464 }
0465 
0466 /*
0467  * nanddev_bits_per_cell() - Get the number of bits per cell
0468  * @nand: NAND device
0469  *
0470  * Return: the number of bits per cell.
0471  */
0472 static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
0473 {
0474     return nand->memorg.bits_per_cell;
0475 }
0476 
0477 /**
0478  * nanddev_page_size() - Get NAND page size
0479  * @nand: NAND device
0480  *
0481  * Return: the page size.
0482  */
0483 static inline size_t nanddev_page_size(const struct nand_device *nand)
0484 {
0485     return nand->memorg.pagesize;
0486 }
0487 
0488 /**
0489  * nanddev_per_page_oobsize() - Get NAND OOB size
0490  * @nand: NAND device
0491  *
0492  * Return: the OOB size.
0493  */
0494 static inline unsigned int
0495 nanddev_per_page_oobsize(const struct nand_device *nand)
0496 {
0497     return nand->memorg.oobsize;
0498 }
0499 
0500 /**
0501  * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
0502  * @nand: NAND device
0503  *
0504  * Return: the number of pages per eraseblock.
0505  */
0506 static inline unsigned int
0507 nanddev_pages_per_eraseblock(const struct nand_device *nand)
0508 {
0509     return nand->memorg.pages_per_eraseblock;
0510 }
0511 
0512 /**
0513  * nanddev_pages_per_target() - Get the number of pages per target
0514  * @nand: NAND device
0515  *
0516  * Return: the number of pages per target.
0517  */
0518 static inline unsigned int
0519 nanddev_pages_per_target(const struct nand_device *nand)
0520 {
0521     return nand->memorg.pages_per_eraseblock *
0522            nand->memorg.eraseblocks_per_lun *
0523            nand->memorg.luns_per_target;
0524 }
0525 
0526 /**
0527  * nanddev_per_page_oobsize() - Get NAND erase block size
0528  * @nand: NAND device
0529  *
0530  * Return: the eraseblock size.
0531  */
0532 static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
0533 {
0534     return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
0535 }
0536 
0537 /**
0538  * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
0539  * @nand: NAND device
0540  *
0541  * Return: the number of eraseblocks per LUN.
0542  */
0543 static inline unsigned int
0544 nanddev_eraseblocks_per_lun(const struct nand_device *nand)
0545 {
0546     return nand->memorg.eraseblocks_per_lun;
0547 }
0548 
0549 /**
0550  * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
0551  * @nand: NAND device
0552  *
0553  * Return: the number of eraseblocks per target.
0554  */
0555 static inline unsigned int
0556 nanddev_eraseblocks_per_target(const struct nand_device *nand)
0557 {
0558     return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
0559 }
0560 
0561 /**
0562  * nanddev_target_size() - Get the total size provided by a single target/die
0563  * @nand: NAND device
0564  *
0565  * Return: the total size exposed by a single target/die in bytes.
0566  */
0567 static inline u64 nanddev_target_size(const struct nand_device *nand)
0568 {
0569     return (u64)nand->memorg.luns_per_target *
0570            nand->memorg.eraseblocks_per_lun *
0571            nand->memorg.pages_per_eraseblock *
0572            nand->memorg.pagesize;
0573 }
0574 
0575 /**
0576  * nanddev_ntarget() - Get the total of targets
0577  * @nand: NAND device
0578  *
0579  * Return: the number of targets/dies exposed by @nand.
0580  */
0581 static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
0582 {
0583     return nand->memorg.ntargets;
0584 }
0585 
0586 /**
0587  * nanddev_neraseblocks() - Get the total number of eraseblocks
0588  * @nand: NAND device
0589  *
0590  * Return: the total number of eraseblocks exposed by @nand.
0591  */
0592 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
0593 {
0594     return nand->memorg.ntargets * nand->memorg.luns_per_target *
0595            nand->memorg.eraseblocks_per_lun;
0596 }
0597 
0598 /**
0599  * nanddev_size() - Get NAND size
0600  * @nand: NAND device
0601  *
0602  * Return: the total size (in bytes) exposed by @nand.
0603  */
0604 static inline u64 nanddev_size(const struct nand_device *nand)
0605 {
0606     return nanddev_target_size(nand) * nanddev_ntargets(nand);
0607 }
0608 
0609 /**
0610  * nanddev_get_memorg() - Extract memory organization info from a NAND device
0611  * @nand: NAND device
0612  *
0613  * This can be used by the upper layer to fill the memorg info before calling
0614  * nanddev_init().
0615  *
0616  * Return: the memorg object embedded in the NAND device.
0617  */
0618 static inline struct nand_memory_organization *
0619 nanddev_get_memorg(struct nand_device *nand)
0620 {
0621     return &nand->memorg;
0622 }
0623 
0624 /**
0625  * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
0626  * @nand: NAND device
0627  */
0628 static inline const struct nand_ecc_props *
0629 nanddev_get_ecc_conf(struct nand_device *nand)
0630 {
0631     return &nand->ecc.ctx.conf;
0632 }
0633 
0634 /**
0635  * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
0636  * @nand: NAND device
0637  */
0638 static inline unsigned int
0639 nanddev_get_ecc_nsteps(struct nand_device *nand)
0640 {
0641     return nand->ecc.ctx.nsteps;
0642 }
0643 
0644 /**
0645  * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
0646  * @nand: NAND device
0647  */
0648 static inline unsigned int
0649 nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
0650 {
0651     return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
0652 }
0653 
0654 /**
0655  * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
0656  *                                  device
0657  * @nand: NAND device
0658  */
0659 static inline const struct nand_ecc_props *
0660 nanddev_get_ecc_requirements(struct nand_device *nand)
0661 {
0662     return &nand->ecc.requirements;
0663 }
0664 
0665 /**
0666  * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
0667  *                                  device
0668  * @nand: NAND device
0669  * @reqs: Requirements
0670  */
0671 static inline void
0672 nanddev_set_ecc_requirements(struct nand_device *nand,
0673                  const struct nand_ecc_props *reqs)
0674 {
0675     nand->ecc.requirements = *reqs;
0676 }
0677 
0678 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
0679          struct module *owner);
0680 void nanddev_cleanup(struct nand_device *nand);
0681 
0682 /**
0683  * nanddev_register() - Register a NAND device
0684  * @nand: NAND device
0685  *
0686  * Register a NAND device.
0687  * This function is just a wrapper around mtd_device_register()
0688  * registering the MTD device embedded in @nand.
0689  *
0690  * Return: 0 in case of success, a negative error code otherwise.
0691  */
0692 static inline int nanddev_register(struct nand_device *nand)
0693 {
0694     return mtd_device_register(&nand->mtd, NULL, 0);
0695 }
0696 
0697 /**
0698  * nanddev_unregister() - Unregister a NAND device
0699  * @nand: NAND device
0700  *
0701  * Unregister a NAND device.
0702  * This function is just a wrapper around mtd_device_unregister()
0703  * unregistering the MTD device embedded in @nand.
0704  *
0705  * Return: 0 in case of success, a negative error code otherwise.
0706  */
0707 static inline int nanddev_unregister(struct nand_device *nand)
0708 {
0709     return mtd_device_unregister(&nand->mtd);
0710 }
0711 
0712 /**
0713  * nanddev_set_of_node() - Attach a DT node to a NAND device
0714  * @nand: NAND device
0715  * @np: DT node
0716  *
0717  * Attach a DT node to a NAND device.
0718  */
0719 static inline void nanddev_set_of_node(struct nand_device *nand,
0720                        struct device_node *np)
0721 {
0722     mtd_set_of_node(&nand->mtd, np);
0723 }
0724 
0725 /**
0726  * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
0727  * @nand: NAND device
0728  *
0729  * Return: the DT node attached to @nand.
0730  */
0731 static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
0732 {
0733     return mtd_get_of_node(&nand->mtd);
0734 }
0735 
0736 /**
0737  * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
0738  * @nand: NAND device
0739  * @offs: absolute NAND offset (usually passed by the MTD layer)
0740  * @pos: a NAND position object to fill in
0741  *
0742  * Converts @offs into a nand_pos representation.
0743  *
0744  * Return: the offset within the NAND page pointed by @pos.
0745  */
0746 static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
0747                            loff_t offs,
0748                            struct nand_pos *pos)
0749 {
0750     unsigned int pageoffs;
0751     u64 tmp = offs;
0752 
0753     pageoffs = do_div(tmp, nand->memorg.pagesize);
0754     pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
0755     pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
0756     pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
0757     pos->lun = do_div(tmp, nand->memorg.luns_per_target);
0758     pos->target = tmp;
0759 
0760     return pageoffs;
0761 }
0762 
0763 /**
0764  * nanddev_pos_cmp() - Compare two NAND positions
0765  * @a: First NAND position
0766  * @b: Second NAND position
0767  *
0768  * Compares two NAND positions.
0769  *
0770  * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
0771  */
0772 static inline int nanddev_pos_cmp(const struct nand_pos *a,
0773                   const struct nand_pos *b)
0774 {
0775     if (a->target != b->target)
0776         return a->target < b->target ? -1 : 1;
0777 
0778     if (a->lun != b->lun)
0779         return a->lun < b->lun ? -1 : 1;
0780 
0781     if (a->eraseblock != b->eraseblock)
0782         return a->eraseblock < b->eraseblock ? -1 : 1;
0783 
0784     if (a->page != b->page)
0785         return a->page < b->page ? -1 : 1;
0786 
0787     return 0;
0788 }
0789 
0790 /**
0791  * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
0792  * @nand: NAND device
0793  * @pos: the NAND position to convert
0794  *
0795  * Converts @pos NAND position into an absolute offset.
0796  *
0797  * Return: the absolute offset. Note that @pos points to the beginning of a
0798  *     page, if one wants to point to a specific offset within this page
0799  *     the returned offset has to be adjusted manually.
0800  */
0801 static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
0802                      const struct nand_pos *pos)
0803 {
0804     unsigned int npages;
0805 
0806     npages = pos->page +
0807          ((pos->eraseblock +
0808            (pos->lun +
0809             (pos->target * nand->memorg.luns_per_target)) *
0810            nand->memorg.eraseblocks_per_lun) *
0811           nand->memorg.pages_per_eraseblock);
0812 
0813     return (loff_t)npages * nand->memorg.pagesize;
0814 }
0815 
0816 /**
0817  * nanddev_pos_to_row() - Extract a row address from a NAND position
0818  * @nand: NAND device
0819  * @pos: the position to convert
0820  *
0821  * Converts a NAND position into a row address that can then be passed to the
0822  * device.
0823  *
0824  * Return: the row address extracted from @pos.
0825  */
0826 static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
0827                           const struct nand_pos *pos)
0828 {
0829     return (pos->lun << nand->rowconv.lun_addr_shift) |
0830            (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
0831            pos->page;
0832 }
0833 
0834 /**
0835  * nanddev_pos_next_target() - Move a position to the next target/die
0836  * @nand: NAND device
0837  * @pos: the position to update
0838  *
0839  * Updates @pos to point to the start of the next target/die. Useful when you
0840  * want to iterate over all targets/dies of a NAND device.
0841  */
0842 static inline void nanddev_pos_next_target(struct nand_device *nand,
0843                        struct nand_pos *pos)
0844 {
0845     pos->page = 0;
0846     pos->plane = 0;
0847     pos->eraseblock = 0;
0848     pos->lun = 0;
0849     pos->target++;
0850 }
0851 
0852 /**
0853  * nanddev_pos_next_lun() - Move a position to the next LUN
0854  * @nand: NAND device
0855  * @pos: the position to update
0856  *
0857  * Updates @pos to point to the start of the next LUN. Useful when you want to
0858  * iterate over all LUNs of a NAND device.
0859  */
0860 static inline void nanddev_pos_next_lun(struct nand_device *nand,
0861                     struct nand_pos *pos)
0862 {
0863     if (pos->lun >= nand->memorg.luns_per_target - 1)
0864         return nanddev_pos_next_target(nand, pos);
0865 
0866     pos->lun++;
0867     pos->page = 0;
0868     pos->plane = 0;
0869     pos->eraseblock = 0;
0870 }
0871 
0872 /**
0873  * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
0874  * @nand: NAND device
0875  * @pos: the position to update
0876  *
0877  * Updates @pos to point to the start of the next eraseblock. Useful when you
0878  * want to iterate over all eraseblocks of a NAND device.
0879  */
0880 static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
0881                            struct nand_pos *pos)
0882 {
0883     if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
0884         return nanddev_pos_next_lun(nand, pos);
0885 
0886     pos->eraseblock++;
0887     pos->page = 0;
0888     pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
0889 }
0890 
0891 /**
0892  * nanddev_pos_next_page() - Move a position to the next page
0893  * @nand: NAND device
0894  * @pos: the position to update
0895  *
0896  * Updates @pos to point to the start of the next page. Useful when you want to
0897  * iterate over all pages of a NAND device.
0898  */
0899 static inline void nanddev_pos_next_page(struct nand_device *nand,
0900                      struct nand_pos *pos)
0901 {
0902     if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
0903         return nanddev_pos_next_eraseblock(nand, pos);
0904 
0905     pos->page++;
0906 }
0907 
0908 /**
0909  * nand_io_iter_init - Initialize a NAND I/O iterator
0910  * @nand: NAND device
0911  * @offs: absolute offset
0912  * @req: MTD request
0913  * @iter: NAND I/O iterator
0914  *
0915  * Initializes a NAND iterator based on the information passed by the MTD
0916  * layer.
0917  */
0918 static inline void nanddev_io_iter_init(struct nand_device *nand,
0919                     enum nand_page_io_req_type reqtype,
0920                     loff_t offs, struct mtd_oob_ops *req,
0921                     struct nand_io_iter *iter)
0922 {
0923     struct mtd_info *mtd = nanddev_to_mtd(nand);
0924 
0925     iter->req.type = reqtype;
0926     iter->req.mode = req->mode;
0927     iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
0928     iter->req.ooboffs = req->ooboffs;
0929     iter->oobbytes_per_page = mtd_oobavail(mtd, req);
0930     iter->dataleft = req->len;
0931     iter->oobleft = req->ooblen;
0932     iter->req.databuf.in = req->datbuf;
0933     iter->req.datalen = min_t(unsigned int,
0934                   nand->memorg.pagesize - iter->req.dataoffs,
0935                   iter->dataleft);
0936     iter->req.oobbuf.in = req->oobbuf;
0937     iter->req.ooblen = min_t(unsigned int,
0938                  iter->oobbytes_per_page - iter->req.ooboffs,
0939                  iter->oobleft);
0940 }
0941 
0942 /**
0943  * nand_io_iter_next_page - Move to the next page
0944  * @nand: NAND device
0945  * @iter: NAND I/O iterator
0946  *
0947  * Updates the @iter to point to the next page.
0948  */
0949 static inline void nanddev_io_iter_next_page(struct nand_device *nand,
0950                          struct nand_io_iter *iter)
0951 {
0952     nanddev_pos_next_page(nand, &iter->req.pos);
0953     iter->dataleft -= iter->req.datalen;
0954     iter->req.databuf.in += iter->req.datalen;
0955     iter->oobleft -= iter->req.ooblen;
0956     iter->req.oobbuf.in += iter->req.ooblen;
0957     iter->req.dataoffs = 0;
0958     iter->req.ooboffs = 0;
0959     iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
0960                   iter->dataleft);
0961     iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
0962                  iter->oobleft);
0963 }
0964 
0965 /**
0966  * nand_io_iter_end - Should end iteration or not
0967  * @nand: NAND device
0968  * @iter: NAND I/O iterator
0969  *
0970  * Check whether @iter has reached the end of the NAND portion it was asked to
0971  * iterate on or not.
0972  *
0973  * Return: true if @iter has reached the end of the iteration request, false
0974  *     otherwise.
0975  */
0976 static inline bool nanddev_io_iter_end(struct nand_device *nand,
0977                        const struct nand_io_iter *iter)
0978 {
0979     if (iter->dataleft || iter->oobleft)
0980         return false;
0981 
0982     return true;
0983 }
0984 
0985 /**
0986  * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
0987  *             request
0988  * @nand: NAND device
0989  * @start: start address to read/write from
0990  * @req: MTD I/O request
0991  * @iter: NAND I/O iterator
0992  *
0993  * Should be used for iterate over pages that are contained in an MTD request.
0994  */
0995 #define nanddev_io_for_each_page(nand, type, start, req, iter)      \
0996     for (nanddev_io_iter_init(nand, type, start, req, iter);    \
0997          !nanddev_io_iter_end(nand, iter);              \
0998          nanddev_io_iter_next_page(nand, iter))
0999 
1000 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
1001 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
1002 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
1003 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
1004 
1005 /* ECC related functions */
1006 int nanddev_ecc_engine_init(struct nand_device *nand);
1007 void nanddev_ecc_engine_cleanup(struct nand_device *nand);
1008 
1009 static inline void *nand_to_ecc_ctx(struct nand_device *nand)
1010 {
1011     return nand->ecc.ctx.priv;
1012 }
1013 
1014 /* BBT related functions */
1015 enum nand_bbt_block_status {
1016     NAND_BBT_BLOCK_STATUS_UNKNOWN,
1017     NAND_BBT_BLOCK_GOOD,
1018     NAND_BBT_BLOCK_WORN,
1019     NAND_BBT_BLOCK_RESERVED,
1020     NAND_BBT_BLOCK_FACTORY_BAD,
1021     NAND_BBT_BLOCK_NUM_STATUS,
1022 };
1023 
1024 int nanddev_bbt_init(struct nand_device *nand);
1025 void nanddev_bbt_cleanup(struct nand_device *nand);
1026 int nanddev_bbt_update(struct nand_device *nand);
1027 int nanddev_bbt_get_block_status(const struct nand_device *nand,
1028                  unsigned int entry);
1029 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
1030                  enum nand_bbt_block_status status);
1031 int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
1032 
1033 /**
1034  * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
1035  * @nand: NAND device
1036  * @pos: the NAND position we want to get BBT entry for
1037  *
1038  * Return the BBT entry used to store information about the eraseblock pointed
1039  * by @pos.
1040  *
1041  * Return: the BBT entry storing information about eraseblock pointed by @pos.
1042  */
1043 static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
1044                             const struct nand_pos *pos)
1045 {
1046     return pos->eraseblock +
1047            ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
1048         nand->memorg.eraseblocks_per_lun);
1049 }
1050 
1051 /**
1052  * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
1053  * @nand: NAND device
1054  *
1055  * Return: true if the BBT has been initialized, false otherwise.
1056  */
1057 static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
1058 {
1059     return !!nand->bbt.cache;
1060 }
1061 
1062 /* MTD -> NAND helper functions. */
1063 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
1064 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
1065 
1066 #endif /* __LINUX_MTD_NAND_H */