0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef __LINUX_MTD_NAND_H
0011 #define __LINUX_MTD_NAND_H
0012
0013 #include <linux/mtd/mtd.h>
0014
0015 struct nand_device;
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 struct nand_memory_organization {
0030 unsigned int bits_per_cell;
0031 unsigned int pagesize;
0032 unsigned int oobsize;
0033 unsigned int pages_per_eraseblock;
0034 unsigned int eraseblocks_per_lun;
0035 unsigned int max_bad_eraseblocks_per_lun;
0036 unsigned int planes_per_lun;
0037 unsigned int luns_per_target;
0038 unsigned int ntargets;
0039 };
0040
0041 #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
0042 { \
0043 .bits_per_cell = (bpc), \
0044 .pagesize = (ps), \
0045 .oobsize = (os), \
0046 .pages_per_eraseblock = (ppe), \
0047 .eraseblocks_per_lun = (epl), \
0048 .max_bad_eraseblocks_per_lun = (mbb), \
0049 .planes_per_lun = (ppl), \
0050 .luns_per_target = (lpt), \
0051 .ntargets = (nt), \
0052 }
0053
0054
0055
0056
0057
0058
0059
0060
0061 struct nand_row_converter {
0062 unsigned int lun_addr_shift;
0063 unsigned int eraseblock_addr_shift;
0064 };
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 struct nand_pos {
0078 unsigned int target;
0079 unsigned int lun;
0080 unsigned int plane;
0081 unsigned int eraseblock;
0082 unsigned int page;
0083 };
0084
0085
0086
0087
0088
0089
0090 enum nand_page_io_req_type {
0091 NAND_PAGE_READ = 0,
0092 NAND_PAGE_WRITE,
0093 };
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 struct nand_page_io_req {
0113 enum nand_page_io_req_type type;
0114 struct nand_pos pos;
0115 unsigned int dataoffs;
0116 unsigned int datalen;
0117 union {
0118 const void *out;
0119 void *in;
0120 } databuf;
0121 unsigned int ooboffs;
0122 unsigned int ooblen;
0123 union {
0124 const void *out;
0125 void *in;
0126 } oobbuf;
0127 int mode;
0128 };
0129
0130 const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
0131 const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
0132 const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 enum nand_ecc_engine_type {
0143 NAND_ECC_ENGINE_TYPE_INVALID,
0144 NAND_ECC_ENGINE_TYPE_NONE,
0145 NAND_ECC_ENGINE_TYPE_SOFT,
0146 NAND_ECC_ENGINE_TYPE_ON_HOST,
0147 NAND_ECC_ENGINE_TYPE_ON_DIE,
0148 };
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 enum nand_ecc_placement {
0159 NAND_ECC_PLACEMENT_UNKNOWN,
0160 NAND_ECC_PLACEMENT_OOB,
0161 NAND_ECC_PLACEMENT_INTERLEAVED,
0162 };
0163
0164
0165
0166
0167
0168
0169
0170
0171 enum nand_ecc_algo {
0172 NAND_ECC_ALGO_UNKNOWN,
0173 NAND_ECC_ALGO_HAMMING,
0174 NAND_ECC_ALGO_BCH,
0175 NAND_ECC_ALGO_RS,
0176 };
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 struct nand_ecc_props {
0188 enum nand_ecc_engine_type engine_type;
0189 enum nand_ecc_placement placement;
0190 enum nand_ecc_algo algo;
0191 unsigned int strength;
0192 unsigned int step_size;
0193 unsigned int flags;
0194 };
0195
0196 #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
0197
0198
0199 #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
0200
0201
0202
0203
0204
0205 struct nand_bbt {
0206 unsigned long *cache;
0207 };
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 struct nand_ops {
0226 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
0227 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
0228 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
0229 };
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 struct nand_ecc_context {
0240 struct nand_ecc_props conf;
0241 unsigned int nsteps;
0242 unsigned int total;
0243 void *priv;
0244 };
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 struct nand_ecc_engine_ops {
0258 int (*init_ctx)(struct nand_device *nand);
0259 void (*cleanup_ctx)(struct nand_device *nand);
0260 int (*prepare_io_req)(struct nand_device *nand,
0261 struct nand_page_io_req *req);
0262 int (*finish_io_req)(struct nand_device *nand,
0263 struct nand_page_io_req *req);
0264 };
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275 enum nand_ecc_engine_integration {
0276 NAND_ECC_ENGINE_INTEGRATION_INVALID,
0277 NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
0278 NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
0279 };
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290 struct nand_ecc_engine {
0291 struct device *dev;
0292 struct list_head node;
0293 struct nand_ecc_engine_ops *ops;
0294 enum nand_ecc_engine_integration integration;
0295 void *priv;
0296 };
0297
0298 void of_get_nand_ecc_user_config(struct nand_device *nand);
0299 int nand_ecc_init_ctx(struct nand_device *nand);
0300 void nand_ecc_cleanup_ctx(struct nand_device *nand);
0301 int nand_ecc_prepare_io_req(struct nand_device *nand,
0302 struct nand_page_io_req *req);
0303 int nand_ecc_finish_io_req(struct nand_device *nand,
0304 struct nand_page_io_req *req);
0305 bool nand_ecc_is_strong_enough(struct nand_device *nand);
0306
0307 #if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
0308 int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
0309 int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
0310 #else
0311 static inline int
0312 nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
0313 {
0314 return -ENOTSUPP;
0315 }
0316 static inline int
0317 nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
0318 {
0319 return -ENOTSUPP;
0320 }
0321 #endif
0322
0323 struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
0324 struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
0325 struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
0326 void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
0327 struct device *nand_ecc_get_engine_dev(struct device *host);
0328
0329 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
0330 struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
0331 #else
0332 static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
0333 {
0334 return NULL;
0335 }
0336 #endif
0337
0338 #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
0339 struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
0340 #else
0341 static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
0342 {
0343 return NULL;
0344 }
0345 #endif
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360 struct nand_ecc_req_tweak_ctx {
0361 struct nand_page_io_req orig_req;
0362 struct nand_device *nand;
0363 unsigned int page_buffer_size;
0364 unsigned int oob_buffer_size;
0365 void *spare_databuf;
0366 void *spare_oobbuf;
0367 bool bounce_data;
0368 bool bounce_oob;
0369 };
0370
0371 int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
0372 struct nand_device *nand);
0373 void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
0374 void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
0375 struct nand_page_io_req *req);
0376 void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
0377 struct nand_page_io_req *req);
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 struct nand_ecc {
0390 struct nand_ecc_props defaults;
0391 struct nand_ecc_props requirements;
0392 struct nand_ecc_props user_conf;
0393 struct nand_ecc_context ctx;
0394 struct nand_ecc_engine *ondie_engine;
0395 struct nand_ecc_engine *engine;
0396 };
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417 struct nand_device {
0418 struct mtd_info mtd;
0419 struct nand_memory_organization memorg;
0420 struct nand_ecc ecc;
0421 struct nand_row_converter rowconv;
0422 struct nand_bbt bbt;
0423 const struct nand_ops *ops;
0424 };
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 struct nand_io_iter {
0438 struct nand_page_io_req req;
0439 unsigned int oobbytes_per_page;
0440 unsigned int dataleft;
0441 unsigned int oobleft;
0442 };
0443
0444
0445
0446
0447
0448
0449
0450 static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
0451 {
0452 return container_of(mtd, struct nand_device, mtd);
0453 }
0454
0455
0456
0457
0458
0459
0460
0461 static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
0462 {
0463 return &nand->mtd;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472 static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
0473 {
0474 return nand->memorg.bits_per_cell;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483 static inline size_t nanddev_page_size(const struct nand_device *nand)
0484 {
0485 return nand->memorg.pagesize;
0486 }
0487
0488
0489
0490
0491
0492
0493
0494 static inline unsigned int
0495 nanddev_per_page_oobsize(const struct nand_device *nand)
0496 {
0497 return nand->memorg.oobsize;
0498 }
0499
0500
0501
0502
0503
0504
0505
0506 static inline unsigned int
0507 nanddev_pages_per_eraseblock(const struct nand_device *nand)
0508 {
0509 return nand->memorg.pages_per_eraseblock;
0510 }
0511
0512
0513
0514
0515
0516
0517
0518 static inline unsigned int
0519 nanddev_pages_per_target(const struct nand_device *nand)
0520 {
0521 return nand->memorg.pages_per_eraseblock *
0522 nand->memorg.eraseblocks_per_lun *
0523 nand->memorg.luns_per_target;
0524 }
0525
0526
0527
0528
0529
0530
0531
0532 static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
0533 {
0534 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543 static inline unsigned int
0544 nanddev_eraseblocks_per_lun(const struct nand_device *nand)
0545 {
0546 return nand->memorg.eraseblocks_per_lun;
0547 }
0548
0549
0550
0551
0552
0553
0554
0555 static inline unsigned int
0556 nanddev_eraseblocks_per_target(const struct nand_device *nand)
0557 {
0558 return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
0559 }
0560
0561
0562
0563
0564
0565
0566
0567 static inline u64 nanddev_target_size(const struct nand_device *nand)
0568 {
0569 return (u64)nand->memorg.luns_per_target *
0570 nand->memorg.eraseblocks_per_lun *
0571 nand->memorg.pages_per_eraseblock *
0572 nand->memorg.pagesize;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581 static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
0582 {
0583 return nand->memorg.ntargets;
0584 }
0585
0586
0587
0588
0589
0590
0591
0592 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
0593 {
0594 return nand->memorg.ntargets * nand->memorg.luns_per_target *
0595 nand->memorg.eraseblocks_per_lun;
0596 }
0597
0598
0599
0600
0601
0602
0603
0604 static inline u64 nanddev_size(const struct nand_device *nand)
0605 {
0606 return nanddev_target_size(nand) * nanddev_ntargets(nand);
0607 }
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618 static inline struct nand_memory_organization *
0619 nanddev_get_memorg(struct nand_device *nand)
0620 {
0621 return &nand->memorg;
0622 }
0623
0624
0625
0626
0627
0628 static inline const struct nand_ecc_props *
0629 nanddev_get_ecc_conf(struct nand_device *nand)
0630 {
0631 return &nand->ecc.ctx.conf;
0632 }
0633
0634
0635
0636
0637
0638 static inline unsigned int
0639 nanddev_get_ecc_nsteps(struct nand_device *nand)
0640 {
0641 return nand->ecc.ctx.nsteps;
0642 }
0643
0644
0645
0646
0647
0648 static inline unsigned int
0649 nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
0650 {
0651 return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
0652 }
0653
0654
0655
0656
0657
0658
0659 static inline const struct nand_ecc_props *
0660 nanddev_get_ecc_requirements(struct nand_device *nand)
0661 {
0662 return &nand->ecc.requirements;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671 static inline void
0672 nanddev_set_ecc_requirements(struct nand_device *nand,
0673 const struct nand_ecc_props *reqs)
0674 {
0675 nand->ecc.requirements = *reqs;
0676 }
0677
0678 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
0679 struct module *owner);
0680 void nanddev_cleanup(struct nand_device *nand);
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 static inline int nanddev_register(struct nand_device *nand)
0693 {
0694 return mtd_device_register(&nand->mtd, NULL, 0);
0695 }
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707 static inline int nanddev_unregister(struct nand_device *nand)
0708 {
0709 return mtd_device_unregister(&nand->mtd);
0710 }
0711
0712
0713
0714
0715
0716
0717
0718
0719 static inline void nanddev_set_of_node(struct nand_device *nand,
0720 struct device_node *np)
0721 {
0722 mtd_set_of_node(&nand->mtd, np);
0723 }
0724
0725
0726
0727
0728
0729
0730
0731 static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
0732 {
0733 return mtd_get_of_node(&nand->mtd);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
0747 loff_t offs,
0748 struct nand_pos *pos)
0749 {
0750 unsigned int pageoffs;
0751 u64 tmp = offs;
0752
0753 pageoffs = do_div(tmp, nand->memorg.pagesize);
0754 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
0755 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
0756 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
0757 pos->lun = do_div(tmp, nand->memorg.luns_per_target);
0758 pos->target = tmp;
0759
0760 return pageoffs;
0761 }
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772 static inline int nanddev_pos_cmp(const struct nand_pos *a,
0773 const struct nand_pos *b)
0774 {
0775 if (a->target != b->target)
0776 return a->target < b->target ? -1 : 1;
0777
0778 if (a->lun != b->lun)
0779 return a->lun < b->lun ? -1 : 1;
0780
0781 if (a->eraseblock != b->eraseblock)
0782 return a->eraseblock < b->eraseblock ? -1 : 1;
0783
0784 if (a->page != b->page)
0785 return a->page < b->page ? -1 : 1;
0786
0787 return 0;
0788 }
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801 static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
0802 const struct nand_pos *pos)
0803 {
0804 unsigned int npages;
0805
0806 npages = pos->page +
0807 ((pos->eraseblock +
0808 (pos->lun +
0809 (pos->target * nand->memorg.luns_per_target)) *
0810 nand->memorg.eraseblocks_per_lun) *
0811 nand->memorg.pages_per_eraseblock);
0812
0813 return (loff_t)npages * nand->memorg.pagesize;
0814 }
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826 static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
0827 const struct nand_pos *pos)
0828 {
0829 return (pos->lun << nand->rowconv.lun_addr_shift) |
0830 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
0831 pos->page;
0832 }
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842 static inline void nanddev_pos_next_target(struct nand_device *nand,
0843 struct nand_pos *pos)
0844 {
0845 pos->page = 0;
0846 pos->plane = 0;
0847 pos->eraseblock = 0;
0848 pos->lun = 0;
0849 pos->target++;
0850 }
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860 static inline void nanddev_pos_next_lun(struct nand_device *nand,
0861 struct nand_pos *pos)
0862 {
0863 if (pos->lun >= nand->memorg.luns_per_target - 1)
0864 return nanddev_pos_next_target(nand, pos);
0865
0866 pos->lun++;
0867 pos->page = 0;
0868 pos->plane = 0;
0869 pos->eraseblock = 0;
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880 static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
0881 struct nand_pos *pos)
0882 {
0883 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
0884 return nanddev_pos_next_lun(nand, pos);
0885
0886 pos->eraseblock++;
0887 pos->page = 0;
0888 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
0889 }
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899 static inline void nanddev_pos_next_page(struct nand_device *nand,
0900 struct nand_pos *pos)
0901 {
0902 if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
0903 return nanddev_pos_next_eraseblock(nand, pos);
0904
0905 pos->page++;
0906 }
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 static inline void nanddev_io_iter_init(struct nand_device *nand,
0919 enum nand_page_io_req_type reqtype,
0920 loff_t offs, struct mtd_oob_ops *req,
0921 struct nand_io_iter *iter)
0922 {
0923 struct mtd_info *mtd = nanddev_to_mtd(nand);
0924
0925 iter->req.type = reqtype;
0926 iter->req.mode = req->mode;
0927 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
0928 iter->req.ooboffs = req->ooboffs;
0929 iter->oobbytes_per_page = mtd_oobavail(mtd, req);
0930 iter->dataleft = req->len;
0931 iter->oobleft = req->ooblen;
0932 iter->req.databuf.in = req->datbuf;
0933 iter->req.datalen = min_t(unsigned int,
0934 nand->memorg.pagesize - iter->req.dataoffs,
0935 iter->dataleft);
0936 iter->req.oobbuf.in = req->oobbuf;
0937 iter->req.ooblen = min_t(unsigned int,
0938 iter->oobbytes_per_page - iter->req.ooboffs,
0939 iter->oobleft);
0940 }
0941
0942
0943
0944
0945
0946
0947
0948
0949 static inline void nanddev_io_iter_next_page(struct nand_device *nand,
0950 struct nand_io_iter *iter)
0951 {
0952 nanddev_pos_next_page(nand, &iter->req.pos);
0953 iter->dataleft -= iter->req.datalen;
0954 iter->req.databuf.in += iter->req.datalen;
0955 iter->oobleft -= iter->req.ooblen;
0956 iter->req.oobbuf.in += iter->req.ooblen;
0957 iter->req.dataoffs = 0;
0958 iter->req.ooboffs = 0;
0959 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
0960 iter->dataleft);
0961 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
0962 iter->oobleft);
0963 }
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976 static inline bool nanddev_io_iter_end(struct nand_device *nand,
0977 const struct nand_io_iter *iter)
0978 {
0979 if (iter->dataleft || iter->oobleft)
0980 return false;
0981
0982 return true;
0983 }
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995 #define nanddev_io_for_each_page(nand, type, start, req, iter) \
0996 for (nanddev_io_iter_init(nand, type, start, req, iter); \
0997 !nanddev_io_iter_end(nand, iter); \
0998 nanddev_io_iter_next_page(nand, iter))
0999
1000 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
1001 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
1002 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
1003 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
1004
1005
1006 int nanddev_ecc_engine_init(struct nand_device *nand);
1007 void nanddev_ecc_engine_cleanup(struct nand_device *nand);
1008
1009 static inline void *nand_to_ecc_ctx(struct nand_device *nand)
1010 {
1011 return nand->ecc.ctx.priv;
1012 }
1013
1014
1015 enum nand_bbt_block_status {
1016 NAND_BBT_BLOCK_STATUS_UNKNOWN,
1017 NAND_BBT_BLOCK_GOOD,
1018 NAND_BBT_BLOCK_WORN,
1019 NAND_BBT_BLOCK_RESERVED,
1020 NAND_BBT_BLOCK_FACTORY_BAD,
1021 NAND_BBT_BLOCK_NUM_STATUS,
1022 };
1023
1024 int nanddev_bbt_init(struct nand_device *nand);
1025 void nanddev_bbt_cleanup(struct nand_device *nand);
1026 int nanddev_bbt_update(struct nand_device *nand);
1027 int nanddev_bbt_get_block_status(const struct nand_device *nand,
1028 unsigned int entry);
1029 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
1030 enum nand_bbt_block_status status);
1031 int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043 static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
1044 const struct nand_pos *pos)
1045 {
1046 return pos->eraseblock +
1047 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
1048 nand->memorg.eraseblocks_per_lun);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057 static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
1058 {
1059 return !!nand->bbt.cache;
1060 }
1061
1062
1063 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
1064 int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
1065
1066 #endif