Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Core registration and callback routines for MTD
0004  * drivers and users.
0005  *
0006  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
0007  * Copyright © 2006      Red Hat UK Limited 
0008  */
0009 
0010 #include <linux/module.h>
0011 #include <linux/kernel.h>
0012 #include <linux/ptrace.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/string.h>
0015 #include <linux/timer.h>
0016 #include <linux/major.h>
0017 #include <linux/fs.h>
0018 #include <linux/err.h>
0019 #include <linux/ioctl.h>
0020 #include <linux/init.h>
0021 #include <linux/of.h>
0022 #include <linux/proc_fs.h>
0023 #include <linux/idr.h>
0024 #include <linux/backing-dev.h>
0025 #include <linux/gfp.h>
0026 #include <linux/slab.h>
0027 #include <linux/reboot.h>
0028 #include <linux/leds.h>
0029 #include <linux/debugfs.h>
0030 #include <linux/nvmem-provider.h>
0031 
0032 #include <linux/mtd/mtd.h>
0033 #include <linux/mtd/partitions.h>
0034 
0035 #include "mtdcore.h"
0036 
0037 struct backing_dev_info *mtd_bdi;
0038 
0039 #ifdef CONFIG_PM_SLEEP
0040 
0041 static int mtd_cls_suspend(struct device *dev)
0042 {
0043     struct mtd_info *mtd = dev_get_drvdata(dev);
0044 
0045     return mtd ? mtd_suspend(mtd) : 0;
0046 }
0047 
0048 static int mtd_cls_resume(struct device *dev)
0049 {
0050     struct mtd_info *mtd = dev_get_drvdata(dev);
0051 
0052     if (mtd)
0053         mtd_resume(mtd);
0054     return 0;
0055 }
0056 
0057 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
0058 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
0059 #else
0060 #define MTD_CLS_PM_OPS NULL
0061 #endif
0062 
0063 static struct class mtd_class = {
0064     .name = "mtd",
0065     .owner = THIS_MODULE,
0066     .pm = MTD_CLS_PM_OPS,
0067 };
0068 
0069 static DEFINE_IDR(mtd_idr);
0070 
0071 /* These are exported solely for the purpose of mtd_blkdevs.c. You
0072    should not use them for _anything_ else */
0073 DEFINE_MUTEX(mtd_table_mutex);
0074 EXPORT_SYMBOL_GPL(mtd_table_mutex);
0075 
0076 struct mtd_info *__mtd_next_device(int i)
0077 {
0078     return idr_get_next(&mtd_idr, &i);
0079 }
0080 EXPORT_SYMBOL_GPL(__mtd_next_device);
0081 
0082 static LIST_HEAD(mtd_notifiers);
0083 
0084 
0085 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
0086 
0087 /* REVISIT once MTD uses the driver model better, whoever allocates
0088  * the mtd_info will probably want to use the release() hook...
0089  */
0090 static void mtd_release(struct device *dev)
0091 {
0092     struct mtd_info *mtd = dev_get_drvdata(dev);
0093     dev_t index = MTD_DEVT(mtd->index);
0094 
0095     /* remove /dev/mtdXro node */
0096     device_destroy(&mtd_class, index + 1);
0097 }
0098 
0099 #define MTD_DEVICE_ATTR_RO(name) \
0100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
0101 
0102 #define MTD_DEVICE_ATTR_RW(name) \
0103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
0104 
0105 static ssize_t mtd_type_show(struct device *dev,
0106         struct device_attribute *attr, char *buf)
0107 {
0108     struct mtd_info *mtd = dev_get_drvdata(dev);
0109     char *type;
0110 
0111     switch (mtd->type) {
0112     case MTD_ABSENT:
0113         type = "absent";
0114         break;
0115     case MTD_RAM:
0116         type = "ram";
0117         break;
0118     case MTD_ROM:
0119         type = "rom";
0120         break;
0121     case MTD_NORFLASH:
0122         type = "nor";
0123         break;
0124     case MTD_NANDFLASH:
0125         type = "nand";
0126         break;
0127     case MTD_DATAFLASH:
0128         type = "dataflash";
0129         break;
0130     case MTD_UBIVOLUME:
0131         type = "ubi";
0132         break;
0133     case MTD_MLCNANDFLASH:
0134         type = "mlc-nand";
0135         break;
0136     default:
0137         type = "unknown";
0138     }
0139 
0140     return sysfs_emit(buf, "%s\n", type);
0141 }
0142 MTD_DEVICE_ATTR_RO(type);
0143 
0144 static ssize_t mtd_flags_show(struct device *dev,
0145         struct device_attribute *attr, char *buf)
0146 {
0147     struct mtd_info *mtd = dev_get_drvdata(dev);
0148 
0149     return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
0150 }
0151 MTD_DEVICE_ATTR_RO(flags);
0152 
0153 static ssize_t mtd_size_show(struct device *dev,
0154         struct device_attribute *attr, char *buf)
0155 {
0156     struct mtd_info *mtd = dev_get_drvdata(dev);
0157 
0158     return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
0159 }
0160 MTD_DEVICE_ATTR_RO(size);
0161 
0162 static ssize_t mtd_erasesize_show(struct device *dev,
0163         struct device_attribute *attr, char *buf)
0164 {
0165     struct mtd_info *mtd = dev_get_drvdata(dev);
0166 
0167     return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
0168 }
0169 MTD_DEVICE_ATTR_RO(erasesize);
0170 
0171 static ssize_t mtd_writesize_show(struct device *dev,
0172         struct device_attribute *attr, char *buf)
0173 {
0174     struct mtd_info *mtd = dev_get_drvdata(dev);
0175 
0176     return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
0177 }
0178 MTD_DEVICE_ATTR_RO(writesize);
0179 
0180 static ssize_t mtd_subpagesize_show(struct device *dev,
0181         struct device_attribute *attr, char *buf)
0182 {
0183     struct mtd_info *mtd = dev_get_drvdata(dev);
0184     unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
0185 
0186     return sysfs_emit(buf, "%u\n", subpagesize);
0187 }
0188 MTD_DEVICE_ATTR_RO(subpagesize);
0189 
0190 static ssize_t mtd_oobsize_show(struct device *dev,
0191         struct device_attribute *attr, char *buf)
0192 {
0193     struct mtd_info *mtd = dev_get_drvdata(dev);
0194 
0195     return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
0196 }
0197 MTD_DEVICE_ATTR_RO(oobsize);
0198 
0199 static ssize_t mtd_oobavail_show(struct device *dev,
0200                  struct device_attribute *attr, char *buf)
0201 {
0202     struct mtd_info *mtd = dev_get_drvdata(dev);
0203 
0204     return sysfs_emit(buf, "%u\n", mtd->oobavail);
0205 }
0206 MTD_DEVICE_ATTR_RO(oobavail);
0207 
0208 static ssize_t mtd_numeraseregions_show(struct device *dev,
0209         struct device_attribute *attr, char *buf)
0210 {
0211     struct mtd_info *mtd = dev_get_drvdata(dev);
0212 
0213     return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
0214 }
0215 MTD_DEVICE_ATTR_RO(numeraseregions);
0216 
0217 static ssize_t mtd_name_show(struct device *dev,
0218         struct device_attribute *attr, char *buf)
0219 {
0220     struct mtd_info *mtd = dev_get_drvdata(dev);
0221 
0222     return sysfs_emit(buf, "%s\n", mtd->name);
0223 }
0224 MTD_DEVICE_ATTR_RO(name);
0225 
0226 static ssize_t mtd_ecc_strength_show(struct device *dev,
0227                      struct device_attribute *attr, char *buf)
0228 {
0229     struct mtd_info *mtd = dev_get_drvdata(dev);
0230 
0231     return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
0232 }
0233 MTD_DEVICE_ATTR_RO(ecc_strength);
0234 
0235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
0236                       struct device_attribute *attr,
0237                       char *buf)
0238 {
0239     struct mtd_info *mtd = dev_get_drvdata(dev);
0240 
0241     return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
0242 }
0243 
0244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
0245                        struct device_attribute *attr,
0246                        const char *buf, size_t count)
0247 {
0248     struct mtd_info *mtd = dev_get_drvdata(dev);
0249     unsigned int bitflip_threshold;
0250     int retval;
0251 
0252     retval = kstrtouint(buf, 0, &bitflip_threshold);
0253     if (retval)
0254         return retval;
0255 
0256     mtd->bitflip_threshold = bitflip_threshold;
0257     return count;
0258 }
0259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
0260 
0261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
0262         struct device_attribute *attr, char *buf)
0263 {
0264     struct mtd_info *mtd = dev_get_drvdata(dev);
0265 
0266     return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
0267 
0268 }
0269 MTD_DEVICE_ATTR_RO(ecc_step_size);
0270 
0271 static ssize_t mtd_corrected_bits_show(struct device *dev,
0272         struct device_attribute *attr, char *buf)
0273 {
0274     struct mtd_info *mtd = dev_get_drvdata(dev);
0275     struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
0276 
0277     return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
0278 }
0279 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
0280 
0281 static ssize_t mtd_ecc_failures_show(struct device *dev,
0282         struct device_attribute *attr, char *buf)
0283 {
0284     struct mtd_info *mtd = dev_get_drvdata(dev);
0285     struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
0286 
0287     return sysfs_emit(buf, "%u\n", ecc_stats->failed);
0288 }
0289 MTD_DEVICE_ATTR_RO(ecc_failures);   /* ecc stats errors */
0290 
0291 static ssize_t mtd_bad_blocks_show(struct device *dev,
0292         struct device_attribute *attr, char *buf)
0293 {
0294     struct mtd_info *mtd = dev_get_drvdata(dev);
0295     struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
0296 
0297     return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
0298 }
0299 MTD_DEVICE_ATTR_RO(bad_blocks);
0300 
0301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
0302         struct device_attribute *attr, char *buf)
0303 {
0304     struct mtd_info *mtd = dev_get_drvdata(dev);
0305     struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
0306 
0307     return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
0308 }
0309 MTD_DEVICE_ATTR_RO(bbt_blocks);
0310 
0311 static struct attribute *mtd_attrs[] = {
0312     &dev_attr_type.attr,
0313     &dev_attr_flags.attr,
0314     &dev_attr_size.attr,
0315     &dev_attr_erasesize.attr,
0316     &dev_attr_writesize.attr,
0317     &dev_attr_subpagesize.attr,
0318     &dev_attr_oobsize.attr,
0319     &dev_attr_oobavail.attr,
0320     &dev_attr_numeraseregions.attr,
0321     &dev_attr_name.attr,
0322     &dev_attr_ecc_strength.attr,
0323     &dev_attr_ecc_step_size.attr,
0324     &dev_attr_corrected_bits.attr,
0325     &dev_attr_ecc_failures.attr,
0326     &dev_attr_bad_blocks.attr,
0327     &dev_attr_bbt_blocks.attr,
0328     &dev_attr_bitflip_threshold.attr,
0329     NULL,
0330 };
0331 ATTRIBUTE_GROUPS(mtd);
0332 
0333 static const struct device_type mtd_devtype = {
0334     .name       = "mtd",
0335     .groups     = mtd_groups,
0336     .release    = mtd_release,
0337 };
0338 
0339 static bool mtd_expert_analysis_mode;
0340 
0341 #ifdef CONFIG_DEBUG_FS
0342 bool mtd_check_expert_analysis_mode(void)
0343 {
0344     const char *mtd_expert_analysis_warning =
0345         "Bad block checks have been entirely disabled.\n"
0346         "This is only reserved for post-mortem forensics and debug purposes.\n"
0347         "Never enable this mode if you do not know what you are doing!\n";
0348 
0349     return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
0350 }
0351 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
0352 #endif
0353 
0354 static struct dentry *dfs_dir_mtd;
0355 
0356 static void mtd_debugfs_populate(struct mtd_info *mtd)
0357 {
0358     struct device *dev = &mtd->dev;
0359 
0360     if (IS_ERR_OR_NULL(dfs_dir_mtd))
0361         return;
0362 
0363     mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
0364 }
0365 
0366 #ifndef CONFIG_MMU
0367 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
0368 {
0369     switch (mtd->type) {
0370     case MTD_RAM:
0371         return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
0372             NOMMU_MAP_READ | NOMMU_MAP_WRITE;
0373     case MTD_ROM:
0374         return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
0375             NOMMU_MAP_READ;
0376     default:
0377         return NOMMU_MAP_COPY;
0378     }
0379 }
0380 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
0381 #endif
0382 
0383 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
0384                    void *cmd)
0385 {
0386     struct mtd_info *mtd;
0387 
0388     mtd = container_of(n, struct mtd_info, reboot_notifier);
0389     mtd->_reboot(mtd);
0390 
0391     return NOTIFY_DONE;
0392 }
0393 
0394 /**
0395  * mtd_wunit_to_pairing_info - get pairing information of a wunit
0396  * @mtd: pointer to new MTD device info structure
0397  * @wunit: write unit we are interested in
0398  * @info: returned pairing information
0399  *
0400  * Retrieve pairing information associated to the wunit.
0401  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
0402  * paired together, and where programming a page may influence the page it is
0403  * paired with.
0404  * The notion of page is replaced by the term wunit (write-unit) to stay
0405  * consistent with the ->writesize field.
0406  *
0407  * The @wunit argument can be extracted from an absolute offset using
0408  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
0409  * to @wunit.
0410  *
0411  * From the pairing info the MTD user can find all the wunits paired with
0412  * @wunit using the following loop:
0413  *
0414  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
0415  *  info.pair = i;
0416  *  mtd_pairing_info_to_wunit(mtd, &info);
0417  *  ...
0418  * }
0419  */
0420 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
0421                   struct mtd_pairing_info *info)
0422 {
0423     struct mtd_info *master = mtd_get_master(mtd);
0424     int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
0425 
0426     if (wunit < 0 || wunit >= npairs)
0427         return -EINVAL;
0428 
0429     if (master->pairing && master->pairing->get_info)
0430         return master->pairing->get_info(master, wunit, info);
0431 
0432     info->group = 0;
0433     info->pair = wunit;
0434 
0435     return 0;
0436 }
0437 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
0438 
0439 /**
0440  * mtd_pairing_info_to_wunit - get wunit from pairing information
0441  * @mtd: pointer to new MTD device info structure
0442  * @info: pairing information struct
0443  *
0444  * Returns a positive number representing the wunit associated to the info
0445  * struct, or a negative error code.
0446  *
0447  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
0448  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
0449  * doc).
0450  *
0451  * It can also be used to only program the first page of each pair (i.e.
0452  * page attached to group 0), which allows one to use an MLC NAND in
0453  * software-emulated SLC mode:
0454  *
0455  * info.group = 0;
0456  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
0457  * for (info.pair = 0; info.pair < npairs; info.pair++) {
0458  *  wunit = mtd_pairing_info_to_wunit(mtd, &info);
0459  *  mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
0460  *        mtd->writesize, &retlen, buf + (i * mtd->writesize));
0461  * }
0462  */
0463 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
0464                   const struct mtd_pairing_info *info)
0465 {
0466     struct mtd_info *master = mtd_get_master(mtd);
0467     int ngroups = mtd_pairing_groups(master);
0468     int npairs = mtd_wunit_per_eb(master) / ngroups;
0469 
0470     if (!info || info->pair < 0 || info->pair >= npairs ||
0471         info->group < 0 || info->group >= ngroups)
0472         return -EINVAL;
0473 
0474     if (master->pairing && master->pairing->get_wunit)
0475         return mtd->pairing->get_wunit(master, info);
0476 
0477     return info->pair;
0478 }
0479 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
0480 
0481 /**
0482  * mtd_pairing_groups - get the number of pairing groups
0483  * @mtd: pointer to new MTD device info structure
0484  *
0485  * Returns the number of pairing groups.
0486  *
0487  * This number is usually equal to the number of bits exposed by a single
0488  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
0489  * to iterate over all pages of a given pair.
0490  */
0491 int mtd_pairing_groups(struct mtd_info *mtd)
0492 {
0493     struct mtd_info *master = mtd_get_master(mtd);
0494 
0495     if (!master->pairing || !master->pairing->ngroups)
0496         return 1;
0497 
0498     return master->pairing->ngroups;
0499 }
0500 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
0501 
0502 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
0503                   void *val, size_t bytes)
0504 {
0505     struct mtd_info *mtd = priv;
0506     size_t retlen;
0507     int err;
0508 
0509     err = mtd_read(mtd, offset, bytes, &retlen, val);
0510     if (err && err != -EUCLEAN)
0511         return err;
0512 
0513     return retlen == bytes ? 0 : -EIO;
0514 }
0515 
0516 static int mtd_nvmem_add(struct mtd_info *mtd)
0517 {
0518     struct device_node *node = mtd_get_of_node(mtd);
0519     struct nvmem_config config = {};
0520 
0521     config.id = -1;
0522     config.dev = &mtd->dev;
0523     config.name = dev_name(&mtd->dev);
0524     config.owner = THIS_MODULE;
0525     config.reg_read = mtd_nvmem_reg_read;
0526     config.size = mtd->size;
0527     config.word_size = 1;
0528     config.stride = 1;
0529     config.read_only = true;
0530     config.root_only = true;
0531     config.ignore_wp = true;
0532     config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
0533     config.priv = mtd;
0534 
0535     mtd->nvmem = nvmem_register(&config);
0536     if (IS_ERR(mtd->nvmem)) {
0537         /* Just ignore if there is no NVMEM support in the kernel */
0538         if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
0539             mtd->nvmem = NULL;
0540         } else {
0541             dev_err(&mtd->dev, "Failed to register NVMEM device\n");
0542             return PTR_ERR(mtd->nvmem);
0543         }
0544     }
0545 
0546     return 0;
0547 }
0548 
0549 static void mtd_check_of_node(struct mtd_info *mtd)
0550 {
0551     struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
0552     const char *pname, *prefix = "partition-";
0553     int plen, mtd_name_len, offset, prefix_len;
0554     struct mtd_info *parent;
0555     bool found = false;
0556 
0557     /* Check if MTD already has a device node */
0558     if (dev_of_node(&mtd->dev))
0559         return;
0560 
0561     /* Check if a partitions node exist */
0562     if (!mtd_is_partition(mtd))
0563         return;
0564     parent = mtd->parent;
0565     parent_dn = dev_of_node(&parent->dev);
0566     if (!parent_dn)
0567         return;
0568 
0569     partitions = of_get_child_by_name(parent_dn, "partitions");
0570     if (!partitions)
0571         goto exit_parent;
0572 
0573     prefix_len = strlen(prefix);
0574     mtd_name_len = strlen(mtd->name);
0575 
0576     /* Search if a partition is defined with the same name */
0577     for_each_child_of_node(partitions, mtd_dn) {
0578         offset = 0;
0579 
0580         /* Skip partition with no/wrong prefix */
0581         if (!of_node_name_prefix(mtd_dn, "partition-"))
0582             continue;
0583 
0584         /* Label have priority. Check that first */
0585         if (of_property_read_string(mtd_dn, "label", &pname)) {
0586             of_property_read_string(mtd_dn, "name", &pname);
0587             offset = prefix_len;
0588         }
0589 
0590         plen = strlen(pname) - offset;
0591         if (plen == mtd_name_len &&
0592             !strncmp(mtd->name, pname + offset, plen)) {
0593             found = true;
0594             break;
0595         }
0596     }
0597 
0598     if (!found)
0599         goto exit_partitions;
0600 
0601     /* Set of_node only for nvmem */
0602     if (of_device_is_compatible(mtd_dn, "nvmem-cells"))
0603         mtd_set_of_node(mtd, mtd_dn);
0604 
0605 exit_partitions:
0606     of_node_put(partitions);
0607 exit_parent:
0608     of_node_put(parent_dn);
0609 }
0610 
0611 /**
0612  *  add_mtd_device - register an MTD device
0613  *  @mtd: pointer to new MTD device info structure
0614  *
0615  *  Add a device to the list of MTD devices present in the system, and
0616  *  notify each currently active MTD 'user' of its arrival. Returns
0617  *  zero on success or non-zero on failure.
0618  */
0619 
0620 int add_mtd_device(struct mtd_info *mtd)
0621 {
0622     struct device_node *np = mtd_get_of_node(mtd);
0623     struct mtd_info *master = mtd_get_master(mtd);
0624     struct mtd_notifier *not;
0625     int i, error, ofidx;
0626 
0627     /*
0628      * May occur, for instance, on buggy drivers which call
0629      * mtd_device_parse_register() multiple times on the same master MTD,
0630      * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
0631      */
0632     if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
0633         return -EEXIST;
0634 
0635     BUG_ON(mtd->writesize == 0);
0636 
0637     /*
0638      * MTD drivers should implement ->_{write,read}() or
0639      * ->_{write,read}_oob(), but not both.
0640      */
0641     if (WARN_ON((mtd->_write && mtd->_write_oob) ||
0642             (mtd->_read && mtd->_read_oob)))
0643         return -EINVAL;
0644 
0645     if (WARN_ON((!mtd->erasesize || !master->_erase) &&
0646             !(mtd->flags & MTD_NO_ERASE)))
0647         return -EINVAL;
0648 
0649     /*
0650      * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
0651      * master is an MLC NAND and has a proper pairing scheme defined.
0652      * We also reject masters that implement ->_writev() for now, because
0653      * NAND controller drivers don't implement this hook, and adding the
0654      * SLC -> MLC address/length conversion to this path is useless if we
0655      * don't have a user.
0656      */
0657     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
0658         (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
0659          !master->pairing || master->_writev))
0660         return -EINVAL;
0661 
0662     mutex_lock(&mtd_table_mutex);
0663 
0664     ofidx = -1;
0665     if (np)
0666         ofidx = of_alias_get_id(np, "mtd");
0667     if (ofidx >= 0)
0668         i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
0669     else
0670         i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
0671     if (i < 0) {
0672         error = i;
0673         goto fail_locked;
0674     }
0675 
0676     mtd->index = i;
0677     mtd->usecount = 0;
0678 
0679     /* default value if not set by driver */
0680     if (mtd->bitflip_threshold == 0)
0681         mtd->bitflip_threshold = mtd->ecc_strength;
0682 
0683     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
0684         int ngroups = mtd_pairing_groups(master);
0685 
0686         mtd->erasesize /= ngroups;
0687         mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
0688                 mtd->erasesize;
0689     }
0690 
0691     if (is_power_of_2(mtd->erasesize))
0692         mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
0693     else
0694         mtd->erasesize_shift = 0;
0695 
0696     if (is_power_of_2(mtd->writesize))
0697         mtd->writesize_shift = ffs(mtd->writesize) - 1;
0698     else
0699         mtd->writesize_shift = 0;
0700 
0701     mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
0702     mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
0703 
0704     /* Some chips always power up locked. Unlock them now */
0705     if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
0706         error = mtd_unlock(mtd, 0, mtd->size);
0707         if (error && error != -EOPNOTSUPP)
0708             printk(KERN_WARNING
0709                    "%s: unlock failed, writes may not work\n",
0710                    mtd->name);
0711         /* Ignore unlock failures? */
0712         error = 0;
0713     }
0714 
0715     /* Caller should have set dev.parent to match the
0716      * physical device, if appropriate.
0717      */
0718     mtd->dev.type = &mtd_devtype;
0719     mtd->dev.class = &mtd_class;
0720     mtd->dev.devt = MTD_DEVT(i);
0721     dev_set_name(&mtd->dev, "mtd%d", i);
0722     dev_set_drvdata(&mtd->dev, mtd);
0723     mtd_check_of_node(mtd);
0724     of_node_get(mtd_get_of_node(mtd));
0725     error = device_register(&mtd->dev);
0726     if (error)
0727         goto fail_added;
0728 
0729     /* Add the nvmem provider */
0730     error = mtd_nvmem_add(mtd);
0731     if (error)
0732         goto fail_nvmem_add;
0733 
0734     mtd_debugfs_populate(mtd);
0735 
0736     device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
0737               "mtd%dro", i);
0738 
0739     pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
0740     /* No need to get a refcount on the module containing
0741        the notifier, since we hold the mtd_table_mutex */
0742     list_for_each_entry(not, &mtd_notifiers, list)
0743         not->add(mtd);
0744 
0745     mutex_unlock(&mtd_table_mutex);
0746     /* We _know_ we aren't being removed, because
0747        our caller is still holding us here. So none
0748        of this try_ nonsense, and no bitching about it
0749        either. :) */
0750     __module_get(THIS_MODULE);
0751     return 0;
0752 
0753 fail_nvmem_add:
0754     device_unregister(&mtd->dev);
0755 fail_added:
0756     of_node_put(mtd_get_of_node(mtd));
0757     idr_remove(&mtd_idr, i);
0758 fail_locked:
0759     mutex_unlock(&mtd_table_mutex);
0760     return error;
0761 }
0762 
0763 /**
0764  *  del_mtd_device - unregister an MTD device
0765  *  @mtd: pointer to MTD device info structure
0766  *
0767  *  Remove a device from the list of MTD devices present in the system,
0768  *  and notify each currently active MTD 'user' of its departure.
0769  *  Returns zero on success or 1 on failure, which currently will happen
0770  *  if the requested device does not appear to be present in the list.
0771  */
0772 
0773 int del_mtd_device(struct mtd_info *mtd)
0774 {
0775     int ret;
0776     struct mtd_notifier *not;
0777 
0778     mutex_lock(&mtd_table_mutex);
0779 
0780     if (idr_find(&mtd_idr, mtd->index) != mtd) {
0781         ret = -ENODEV;
0782         goto out_error;
0783     }
0784 
0785     /* No need to get a refcount on the module containing
0786         the notifier, since we hold the mtd_table_mutex */
0787     list_for_each_entry(not, &mtd_notifiers, list)
0788         not->remove(mtd);
0789 
0790     if (mtd->usecount) {
0791         printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
0792                mtd->index, mtd->name, mtd->usecount);
0793         ret = -EBUSY;
0794     } else {
0795         debugfs_remove_recursive(mtd->dbg.dfs_dir);
0796 
0797         /* Try to remove the NVMEM provider */
0798         nvmem_unregister(mtd->nvmem);
0799 
0800         device_unregister(&mtd->dev);
0801 
0802         /* Clear dev so mtd can be safely re-registered later if desired */
0803         memset(&mtd->dev, 0, sizeof(mtd->dev));
0804 
0805         idr_remove(&mtd_idr, mtd->index);
0806         of_node_put(mtd_get_of_node(mtd));
0807 
0808         module_put(THIS_MODULE);
0809         ret = 0;
0810     }
0811 
0812 out_error:
0813     mutex_unlock(&mtd_table_mutex);
0814     return ret;
0815 }
0816 
0817 /*
0818  * Set a few defaults based on the parent devices, if not provided by the
0819  * driver
0820  */
0821 static void mtd_set_dev_defaults(struct mtd_info *mtd)
0822 {
0823     if (mtd->dev.parent) {
0824         if (!mtd->owner && mtd->dev.parent->driver)
0825             mtd->owner = mtd->dev.parent->driver->owner;
0826         if (!mtd->name)
0827             mtd->name = dev_name(mtd->dev.parent);
0828     } else {
0829         pr_debug("mtd device won't show a device symlink in sysfs\n");
0830     }
0831 
0832     INIT_LIST_HEAD(&mtd->partitions);
0833     mutex_init(&mtd->master.partitions_lock);
0834     mutex_init(&mtd->master.chrdev_lock);
0835 }
0836 
0837 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
0838 {
0839     struct otp_info *info;
0840     ssize_t size = 0;
0841     unsigned int i;
0842     size_t retlen;
0843     int ret;
0844 
0845     info = kmalloc(PAGE_SIZE, GFP_KERNEL);
0846     if (!info)
0847         return -ENOMEM;
0848 
0849     if (is_user)
0850         ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
0851     else
0852         ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
0853     if (ret)
0854         goto err;
0855 
0856     for (i = 0; i < retlen / sizeof(*info); i++)
0857         size += info[i].length;
0858 
0859     kfree(info);
0860     return size;
0861 
0862 err:
0863     kfree(info);
0864 
0865     /* ENODATA means there is no OTP region. */
0866     return ret == -ENODATA ? 0 : ret;
0867 }
0868 
0869 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
0870                            const char *compatible,
0871                            int size,
0872                            nvmem_reg_read_t reg_read)
0873 {
0874     struct nvmem_device *nvmem = NULL;
0875     struct nvmem_config config = {};
0876     struct device_node *np;
0877 
0878     /* DT binding is optional */
0879     np = of_get_compatible_child(mtd->dev.of_node, compatible);
0880 
0881     /* OTP nvmem will be registered on the physical device */
0882     config.dev = mtd->dev.parent;
0883     config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
0884     config.id = NVMEM_DEVID_NONE;
0885     config.owner = THIS_MODULE;
0886     config.type = NVMEM_TYPE_OTP;
0887     config.root_only = true;
0888     config.ignore_wp = true;
0889     config.reg_read = reg_read;
0890     config.size = size;
0891     config.of_node = np;
0892     config.priv = mtd;
0893 
0894     nvmem = nvmem_register(&config);
0895     /* Just ignore if there is no NVMEM support in the kernel */
0896     if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
0897         nvmem = NULL;
0898 
0899     of_node_put(np);
0900     kfree(config.name);
0901 
0902     return nvmem;
0903 }
0904 
0905 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
0906                        void *val, size_t bytes)
0907 {
0908     struct mtd_info *mtd = priv;
0909     size_t retlen;
0910     int ret;
0911 
0912     ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
0913     if (ret)
0914         return ret;
0915 
0916     return retlen == bytes ? 0 : -EIO;
0917 }
0918 
0919 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
0920                        void *val, size_t bytes)
0921 {
0922     struct mtd_info *mtd = priv;
0923     size_t retlen;
0924     int ret;
0925 
0926     ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
0927     if (ret)
0928         return ret;
0929 
0930     return retlen == bytes ? 0 : -EIO;
0931 }
0932 
0933 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
0934 {
0935     struct nvmem_device *nvmem;
0936     ssize_t size;
0937     int err;
0938 
0939     if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
0940         size = mtd_otp_size(mtd, true);
0941         if (size < 0)
0942             return size;
0943 
0944         if (size > 0) {
0945             nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
0946                                mtd_nvmem_user_otp_reg_read);
0947             if (IS_ERR(nvmem)) {
0948                 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
0949                 return PTR_ERR(nvmem);
0950             }
0951             mtd->otp_user_nvmem = nvmem;
0952         }
0953     }
0954 
0955     if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
0956         size = mtd_otp_size(mtd, false);
0957         if (size < 0) {
0958             err = size;
0959             goto err;
0960         }
0961 
0962         if (size > 0) {
0963             nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
0964                                mtd_nvmem_fact_otp_reg_read);
0965             if (IS_ERR(nvmem)) {
0966                 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
0967                 err = PTR_ERR(nvmem);
0968                 goto err;
0969             }
0970             mtd->otp_factory_nvmem = nvmem;
0971         }
0972     }
0973 
0974     return 0;
0975 
0976 err:
0977     nvmem_unregister(mtd->otp_user_nvmem);
0978     return err;
0979 }
0980 
0981 /**
0982  * mtd_device_parse_register - parse partitions and register an MTD device.
0983  *
0984  * @mtd: the MTD device to register
0985  * @types: the list of MTD partition probes to try, see
0986  *         'parse_mtd_partitions()' for more information
0987  * @parser_data: MTD partition parser-specific data
0988  * @parts: fallback partition information to register, if parsing fails;
0989  *         only valid if %nr_parts > %0
0990  * @nr_parts: the number of partitions in parts, if zero then the full
0991  *            MTD device is registered if no partition info is found
0992  *
0993  * This function aggregates MTD partitions parsing (done by
0994  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
0995  * basically follows the most common pattern found in many MTD drivers:
0996  *
0997  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
0998  *   registered first.
0999  * * Then It tries to probe partitions on MTD device @mtd using parsers
1000  *   specified in @types (if @types is %NULL, then the default list of parsers
1001  *   is used, see 'parse_mtd_partitions()' for more information). If none are
1002  *   found this functions tries to fallback to information specified in
1003  *   @parts/@nr_parts.
1004  * * If no partitions were found this function just registers the MTD device
1005  *   @mtd and exits.
1006  *
1007  * Returns zero in case of success and a negative error code in case of failure.
1008  */
1009 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1010                   struct mtd_part_parser_data *parser_data,
1011                   const struct mtd_partition *parts,
1012                   int nr_parts)
1013 {
1014     int ret;
1015 
1016     mtd_set_dev_defaults(mtd);
1017 
1018     if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1019         ret = add_mtd_device(mtd);
1020         if (ret)
1021             return ret;
1022     }
1023 
1024     /* Prefer parsed partitions over driver-provided fallback */
1025     ret = parse_mtd_partitions(mtd, types, parser_data);
1026     if (ret == -EPROBE_DEFER)
1027         goto out;
1028 
1029     if (ret > 0)
1030         ret = 0;
1031     else if (nr_parts)
1032         ret = add_mtd_partitions(mtd, parts, nr_parts);
1033     else if (!device_is_registered(&mtd->dev))
1034         ret = add_mtd_device(mtd);
1035     else
1036         ret = 0;
1037 
1038     if (ret)
1039         goto out;
1040 
1041     /*
1042      * FIXME: some drivers unfortunately call this function more than once.
1043      * So we have to check if we've already assigned the reboot notifier.
1044      *
1045      * Generally, we can make multiple calls work for most cases, but it
1046      * does cause problems with parse_mtd_partitions() above (e.g.,
1047      * cmdlineparts will register partitions more than once).
1048      */
1049     WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1050           "MTD already registered\n");
1051     if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1052         mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1053         register_reboot_notifier(&mtd->reboot_notifier);
1054     }
1055 
1056     ret = mtd_otp_nvmem_add(mtd);
1057 
1058 out:
1059     if (ret && device_is_registered(&mtd->dev))
1060         del_mtd_device(mtd);
1061 
1062     return ret;
1063 }
1064 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1065 
1066 /**
1067  * mtd_device_unregister - unregister an existing MTD device.
1068  *
1069  * @master: the MTD device to unregister.  This will unregister both the master
1070  *          and any partitions if registered.
1071  */
1072 int mtd_device_unregister(struct mtd_info *master)
1073 {
1074     int err;
1075 
1076     if (master->_reboot) {
1077         unregister_reboot_notifier(&master->reboot_notifier);
1078         memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1079     }
1080 
1081     nvmem_unregister(master->otp_user_nvmem);
1082     nvmem_unregister(master->otp_factory_nvmem);
1083 
1084     err = del_mtd_partitions(master);
1085     if (err)
1086         return err;
1087 
1088     if (!device_is_registered(&master->dev))
1089         return 0;
1090 
1091     return del_mtd_device(master);
1092 }
1093 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1094 
1095 /**
1096  *  register_mtd_user - register a 'user' of MTD devices.
1097  *  @new: pointer to notifier info structure
1098  *
1099  *  Registers a pair of callbacks function to be called upon addition
1100  *  or removal of MTD devices. Causes the 'add' callback to be immediately
1101  *  invoked for each MTD device currently present in the system.
1102  */
1103 void register_mtd_user (struct mtd_notifier *new)
1104 {
1105     struct mtd_info *mtd;
1106 
1107     mutex_lock(&mtd_table_mutex);
1108 
1109     list_add(&new->list, &mtd_notifiers);
1110 
1111     __module_get(THIS_MODULE);
1112 
1113     mtd_for_each_device(mtd)
1114         new->add(mtd);
1115 
1116     mutex_unlock(&mtd_table_mutex);
1117 }
1118 EXPORT_SYMBOL_GPL(register_mtd_user);
1119 
1120 /**
1121  *  unregister_mtd_user - unregister a 'user' of MTD devices.
1122  *  @old: pointer to notifier info structure
1123  *
1124  *  Removes a callback function pair from the list of 'users' to be
1125  *  notified upon addition or removal of MTD devices. Causes the
1126  *  'remove' callback to be immediately invoked for each MTD device
1127  *  currently present in the system.
1128  */
1129 int unregister_mtd_user (struct mtd_notifier *old)
1130 {
1131     struct mtd_info *mtd;
1132 
1133     mutex_lock(&mtd_table_mutex);
1134 
1135     module_put(THIS_MODULE);
1136 
1137     mtd_for_each_device(mtd)
1138         old->remove(mtd);
1139 
1140     list_del(&old->list);
1141     mutex_unlock(&mtd_table_mutex);
1142     return 0;
1143 }
1144 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1145 
1146 /**
1147  *  get_mtd_device - obtain a validated handle for an MTD device
1148  *  @mtd: last known address of the required MTD device
1149  *  @num: internal device number of the required MTD device
1150  *
1151  *  Given a number and NULL address, return the num'th entry in the device
1152  *  table, if any.  Given an address and num == -1, search the device table
1153  *  for a device with that address and return if it's still present. Given
1154  *  both, return the num'th driver only if its address matches. Return
1155  *  error code if not.
1156  */
1157 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1158 {
1159     struct mtd_info *ret = NULL, *other;
1160     int err = -ENODEV;
1161 
1162     mutex_lock(&mtd_table_mutex);
1163 
1164     if (num == -1) {
1165         mtd_for_each_device(other) {
1166             if (other == mtd) {
1167                 ret = mtd;
1168                 break;
1169             }
1170         }
1171     } else if (num >= 0) {
1172         ret = idr_find(&mtd_idr, num);
1173         if (mtd && mtd != ret)
1174             ret = NULL;
1175     }
1176 
1177     if (!ret) {
1178         ret = ERR_PTR(err);
1179         goto out;
1180     }
1181 
1182     err = __get_mtd_device(ret);
1183     if (err)
1184         ret = ERR_PTR(err);
1185 out:
1186     mutex_unlock(&mtd_table_mutex);
1187     return ret;
1188 }
1189 EXPORT_SYMBOL_GPL(get_mtd_device);
1190 
1191 
1192 int __get_mtd_device(struct mtd_info *mtd)
1193 {
1194     struct mtd_info *master = mtd_get_master(mtd);
1195     int err;
1196 
1197     if (!try_module_get(master->owner))
1198         return -ENODEV;
1199 
1200     if (master->_get_device) {
1201         err = master->_get_device(mtd);
1202 
1203         if (err) {
1204             module_put(master->owner);
1205             return err;
1206         }
1207     }
1208 
1209     master->usecount++;
1210 
1211     while (mtd->parent) {
1212         mtd->usecount++;
1213         mtd = mtd->parent;
1214     }
1215 
1216     return 0;
1217 }
1218 EXPORT_SYMBOL_GPL(__get_mtd_device);
1219 
1220 /**
1221  *  get_mtd_device_nm - obtain a validated handle for an MTD device by
1222  *  device name
1223  *  @name: MTD device name to open
1224  *
1225  *  This function returns MTD device description structure in case of
1226  *  success and an error code in case of failure.
1227  */
1228 struct mtd_info *get_mtd_device_nm(const char *name)
1229 {
1230     int err = -ENODEV;
1231     struct mtd_info *mtd = NULL, *other;
1232 
1233     mutex_lock(&mtd_table_mutex);
1234 
1235     mtd_for_each_device(other) {
1236         if (!strcmp(name, other->name)) {
1237             mtd = other;
1238             break;
1239         }
1240     }
1241 
1242     if (!mtd)
1243         goto out_unlock;
1244 
1245     err = __get_mtd_device(mtd);
1246     if (err)
1247         goto out_unlock;
1248 
1249     mutex_unlock(&mtd_table_mutex);
1250     return mtd;
1251 
1252 out_unlock:
1253     mutex_unlock(&mtd_table_mutex);
1254     return ERR_PTR(err);
1255 }
1256 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1257 
1258 void put_mtd_device(struct mtd_info *mtd)
1259 {
1260     mutex_lock(&mtd_table_mutex);
1261     __put_mtd_device(mtd);
1262     mutex_unlock(&mtd_table_mutex);
1263 
1264 }
1265 EXPORT_SYMBOL_GPL(put_mtd_device);
1266 
1267 void __put_mtd_device(struct mtd_info *mtd)
1268 {
1269     struct mtd_info *master = mtd_get_master(mtd);
1270 
1271     while (mtd->parent) {
1272         --mtd->usecount;
1273         BUG_ON(mtd->usecount < 0);
1274         mtd = mtd->parent;
1275     }
1276 
1277     master->usecount--;
1278 
1279     if (master->_put_device)
1280         master->_put_device(master);
1281 
1282     module_put(master->owner);
1283 }
1284 EXPORT_SYMBOL_GPL(__put_mtd_device);
1285 
1286 /*
1287  * Erase is an synchronous operation. Device drivers are epected to return a
1288  * negative error code if the operation failed and update instr->fail_addr
1289  * to point the portion that was not properly erased.
1290  */
1291 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1292 {
1293     struct mtd_info *master = mtd_get_master(mtd);
1294     u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1295     struct erase_info adjinstr;
1296     int ret;
1297 
1298     instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1299     adjinstr = *instr;
1300 
1301     if (!mtd->erasesize || !master->_erase)
1302         return -ENOTSUPP;
1303 
1304     if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1305         return -EINVAL;
1306     if (!(mtd->flags & MTD_WRITEABLE))
1307         return -EROFS;
1308 
1309     if (!instr->len)
1310         return 0;
1311 
1312     ledtrig_mtd_activity();
1313 
1314     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1315         adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1316                 master->erasesize;
1317         adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1318                 master->erasesize) -
1319                    adjinstr.addr;
1320     }
1321 
1322     adjinstr.addr += mst_ofs;
1323 
1324     ret = master->_erase(master, &adjinstr);
1325 
1326     if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1327         instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1328         if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1329             instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1330                              master);
1331             instr->fail_addr *= mtd->erasesize;
1332         }
1333     }
1334 
1335     return ret;
1336 }
1337 EXPORT_SYMBOL_GPL(mtd_erase);
1338 
1339 /*
1340  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1341  */
1342 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1343           void **virt, resource_size_t *phys)
1344 {
1345     struct mtd_info *master = mtd_get_master(mtd);
1346 
1347     *retlen = 0;
1348     *virt = NULL;
1349     if (phys)
1350         *phys = 0;
1351     if (!master->_point)
1352         return -EOPNOTSUPP;
1353     if (from < 0 || from >= mtd->size || len > mtd->size - from)
1354         return -EINVAL;
1355     if (!len)
1356         return 0;
1357 
1358     from = mtd_get_master_ofs(mtd, from);
1359     return master->_point(master, from, len, retlen, virt, phys);
1360 }
1361 EXPORT_SYMBOL_GPL(mtd_point);
1362 
1363 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1364 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1365 {
1366     struct mtd_info *master = mtd_get_master(mtd);
1367 
1368     if (!master->_unpoint)
1369         return -EOPNOTSUPP;
1370     if (from < 0 || from >= mtd->size || len > mtd->size - from)
1371         return -EINVAL;
1372     if (!len)
1373         return 0;
1374     return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1375 }
1376 EXPORT_SYMBOL_GPL(mtd_unpoint);
1377 
1378 /*
1379  * Allow NOMMU mmap() to directly map the device (if not NULL)
1380  * - return the address to which the offset maps
1381  * - return -ENOSYS to indicate refusal to do the mapping
1382  */
1383 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1384                     unsigned long offset, unsigned long flags)
1385 {
1386     size_t retlen;
1387     void *virt;
1388     int ret;
1389 
1390     ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1391     if (ret)
1392         return ret;
1393     if (retlen != len) {
1394         mtd_unpoint(mtd, offset, retlen);
1395         return -ENOSYS;
1396     }
1397     return (unsigned long)virt;
1398 }
1399 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1400 
1401 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1402                  const struct mtd_ecc_stats *old_stats)
1403 {
1404     struct mtd_ecc_stats diff;
1405 
1406     if (master == mtd)
1407         return;
1408 
1409     diff = master->ecc_stats;
1410     diff.failed -= old_stats->failed;
1411     diff.corrected -= old_stats->corrected;
1412 
1413     while (mtd->parent) {
1414         mtd->ecc_stats.failed += diff.failed;
1415         mtd->ecc_stats.corrected += diff.corrected;
1416         mtd = mtd->parent;
1417     }
1418 }
1419 
1420 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1421          u_char *buf)
1422 {
1423     struct mtd_oob_ops ops = {
1424         .len = len,
1425         .datbuf = buf,
1426     };
1427     int ret;
1428 
1429     ret = mtd_read_oob(mtd, from, &ops);
1430     *retlen = ops.retlen;
1431 
1432     return ret;
1433 }
1434 EXPORT_SYMBOL_GPL(mtd_read);
1435 
1436 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1437           const u_char *buf)
1438 {
1439     struct mtd_oob_ops ops = {
1440         .len = len,
1441         .datbuf = (u8 *)buf,
1442     };
1443     int ret;
1444 
1445     ret = mtd_write_oob(mtd, to, &ops);
1446     *retlen = ops.retlen;
1447 
1448     return ret;
1449 }
1450 EXPORT_SYMBOL_GPL(mtd_write);
1451 
1452 /*
1453  * In blackbox flight recorder like scenarios we want to make successful writes
1454  * in interrupt context. panic_write() is only intended to be called when its
1455  * known the kernel is about to panic and we need the write to succeed. Since
1456  * the kernel is not going to be running for much longer, this function can
1457  * break locks and delay to ensure the write succeeds (but not sleep).
1458  */
1459 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1460             const u_char *buf)
1461 {
1462     struct mtd_info *master = mtd_get_master(mtd);
1463 
1464     *retlen = 0;
1465     if (!master->_panic_write)
1466         return -EOPNOTSUPP;
1467     if (to < 0 || to >= mtd->size || len > mtd->size - to)
1468         return -EINVAL;
1469     if (!(mtd->flags & MTD_WRITEABLE))
1470         return -EROFS;
1471     if (!len)
1472         return 0;
1473     if (!master->oops_panic_write)
1474         master->oops_panic_write = true;
1475 
1476     return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1477                     retlen, buf);
1478 }
1479 EXPORT_SYMBOL_GPL(mtd_panic_write);
1480 
1481 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1482                  struct mtd_oob_ops *ops)
1483 {
1484     /*
1485      * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1486      * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1487      *  this case.
1488      */
1489     if (!ops->datbuf)
1490         ops->len = 0;
1491 
1492     if (!ops->oobbuf)
1493         ops->ooblen = 0;
1494 
1495     if (offs < 0 || offs + ops->len > mtd->size)
1496         return -EINVAL;
1497 
1498     if (ops->ooblen) {
1499         size_t maxooblen;
1500 
1501         if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1502             return -EINVAL;
1503 
1504         maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1505                       mtd_div_by_ws(offs, mtd)) *
1506                  mtd_oobavail(mtd, ops)) - ops->ooboffs;
1507         if (ops->ooblen > maxooblen)
1508             return -EINVAL;
1509     }
1510 
1511     return 0;
1512 }
1513 
1514 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1515                 struct mtd_oob_ops *ops)
1516 {
1517     struct mtd_info *master = mtd_get_master(mtd);
1518     int ret;
1519 
1520     from = mtd_get_master_ofs(mtd, from);
1521     if (master->_read_oob)
1522         ret = master->_read_oob(master, from, ops);
1523     else
1524         ret = master->_read(master, from, ops->len, &ops->retlen,
1525                     ops->datbuf);
1526 
1527     return ret;
1528 }
1529 
1530 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1531                  struct mtd_oob_ops *ops)
1532 {
1533     struct mtd_info *master = mtd_get_master(mtd);
1534     int ret;
1535 
1536     to = mtd_get_master_ofs(mtd, to);
1537     if (master->_write_oob)
1538         ret = master->_write_oob(master, to, ops);
1539     else
1540         ret = master->_write(master, to, ops->len, &ops->retlen,
1541                      ops->datbuf);
1542 
1543     return ret;
1544 }
1545 
1546 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1547                    struct mtd_oob_ops *ops)
1548 {
1549     struct mtd_info *master = mtd_get_master(mtd);
1550     int ngroups = mtd_pairing_groups(master);
1551     int npairs = mtd_wunit_per_eb(master) / ngroups;
1552     struct mtd_oob_ops adjops = *ops;
1553     unsigned int wunit, oobavail;
1554     struct mtd_pairing_info info;
1555     int max_bitflips = 0;
1556     u32 ebofs, pageofs;
1557     loff_t base, pos;
1558 
1559     ebofs = mtd_mod_by_eb(start, mtd);
1560     base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1561     info.group = 0;
1562     info.pair = mtd_div_by_ws(ebofs, mtd);
1563     pageofs = mtd_mod_by_ws(ebofs, mtd);
1564     oobavail = mtd_oobavail(mtd, ops);
1565 
1566     while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1567         int ret;
1568 
1569         if (info.pair >= npairs) {
1570             info.pair = 0;
1571             base += master->erasesize;
1572         }
1573 
1574         wunit = mtd_pairing_info_to_wunit(master, &info);
1575         pos = mtd_wunit_to_offset(mtd, base, wunit);
1576 
1577         adjops.len = ops->len - ops->retlen;
1578         if (adjops.len > mtd->writesize - pageofs)
1579             adjops.len = mtd->writesize - pageofs;
1580 
1581         adjops.ooblen = ops->ooblen - ops->oobretlen;
1582         if (adjops.ooblen > oobavail - adjops.ooboffs)
1583             adjops.ooblen = oobavail - adjops.ooboffs;
1584 
1585         if (read) {
1586             ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1587             if (ret > 0)
1588                 max_bitflips = max(max_bitflips, ret);
1589         } else {
1590             ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1591         }
1592 
1593         if (ret < 0)
1594             return ret;
1595 
1596         max_bitflips = max(max_bitflips, ret);
1597         ops->retlen += adjops.retlen;
1598         ops->oobretlen += adjops.oobretlen;
1599         adjops.datbuf += adjops.retlen;
1600         adjops.oobbuf += adjops.oobretlen;
1601         adjops.ooboffs = 0;
1602         pageofs = 0;
1603         info.pair++;
1604     }
1605 
1606     return max_bitflips;
1607 }
1608 
1609 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1610 {
1611     struct mtd_info *master = mtd_get_master(mtd);
1612     struct mtd_ecc_stats old_stats = master->ecc_stats;
1613     int ret_code;
1614 
1615     ops->retlen = ops->oobretlen = 0;
1616 
1617     ret_code = mtd_check_oob_ops(mtd, from, ops);
1618     if (ret_code)
1619         return ret_code;
1620 
1621     ledtrig_mtd_activity();
1622 
1623     /* Check the validity of a potential fallback on mtd->_read */
1624     if (!master->_read_oob && (!master->_read || ops->oobbuf))
1625         return -EOPNOTSUPP;
1626 
1627     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1628         ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1629     else
1630         ret_code = mtd_read_oob_std(mtd, from, ops);
1631 
1632     mtd_update_ecc_stats(mtd, master, &old_stats);
1633 
1634     /*
1635      * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1636      * similar to mtd->_read(), returning a non-negative integer
1637      * representing max bitflips. In other cases, mtd->_read_oob() may
1638      * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1639      */
1640     if (unlikely(ret_code < 0))
1641         return ret_code;
1642     if (mtd->ecc_strength == 0)
1643         return 0;   /* device lacks ecc */
1644     return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1645 }
1646 EXPORT_SYMBOL_GPL(mtd_read_oob);
1647 
1648 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1649                 struct mtd_oob_ops *ops)
1650 {
1651     struct mtd_info *master = mtd_get_master(mtd);
1652     int ret;
1653 
1654     ops->retlen = ops->oobretlen = 0;
1655 
1656     if (!(mtd->flags & MTD_WRITEABLE))
1657         return -EROFS;
1658 
1659     ret = mtd_check_oob_ops(mtd, to, ops);
1660     if (ret)
1661         return ret;
1662 
1663     ledtrig_mtd_activity();
1664 
1665     /* Check the validity of a potential fallback on mtd->_write */
1666     if (!master->_write_oob && (!master->_write || ops->oobbuf))
1667         return -EOPNOTSUPP;
1668 
1669     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1670         return mtd_io_emulated_slc(mtd, to, false, ops);
1671 
1672     return mtd_write_oob_std(mtd, to, ops);
1673 }
1674 EXPORT_SYMBOL_GPL(mtd_write_oob);
1675 
1676 /**
1677  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1678  * @mtd: MTD device structure
1679  * @section: ECC section. Depending on the layout you may have all the ECC
1680  *       bytes stored in a single contiguous section, or one section
1681  *       per ECC chunk (and sometime several sections for a single ECC
1682  *       ECC chunk)
1683  * @oobecc: OOB region struct filled with the appropriate ECC position
1684  *      information
1685  *
1686  * This function returns ECC section information in the OOB area. If you want
1687  * to get all the ECC bytes information, then you should call
1688  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1689  *
1690  * Returns zero on success, a negative error code otherwise.
1691  */
1692 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1693               struct mtd_oob_region *oobecc)
1694 {
1695     struct mtd_info *master = mtd_get_master(mtd);
1696 
1697     memset(oobecc, 0, sizeof(*oobecc));
1698 
1699     if (!master || section < 0)
1700         return -EINVAL;
1701 
1702     if (!master->ooblayout || !master->ooblayout->ecc)
1703         return -ENOTSUPP;
1704 
1705     return master->ooblayout->ecc(master, section, oobecc);
1706 }
1707 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1708 
1709 /**
1710  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1711  *          section
1712  * @mtd: MTD device structure
1713  * @section: Free section you are interested in. Depending on the layout
1714  *       you may have all the free bytes stored in a single contiguous
1715  *       section, or one section per ECC chunk plus an extra section
1716  *       for the remaining bytes (or other funky layout).
1717  * @oobfree: OOB region struct filled with the appropriate free position
1718  *       information
1719  *
1720  * This function returns free bytes position in the OOB area. If you want
1721  * to get all the free bytes information, then you should call
1722  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1723  *
1724  * Returns zero on success, a negative error code otherwise.
1725  */
1726 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1727                struct mtd_oob_region *oobfree)
1728 {
1729     struct mtd_info *master = mtd_get_master(mtd);
1730 
1731     memset(oobfree, 0, sizeof(*oobfree));
1732 
1733     if (!master || section < 0)
1734         return -EINVAL;
1735 
1736     if (!master->ooblayout || !master->ooblayout->free)
1737         return -ENOTSUPP;
1738 
1739     return master->ooblayout->free(master, section, oobfree);
1740 }
1741 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1742 
1743 /**
1744  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1745  * @mtd: mtd info structure
1746  * @byte: the byte we are searching for
1747  * @sectionp: pointer where the section id will be stored
1748  * @oobregion: used to retrieve the ECC position
1749  * @iter: iterator function. Should be either mtd_ooblayout_free or
1750  *    mtd_ooblayout_ecc depending on the region type you're searching for
1751  *
1752  * This function returns the section id and oobregion information of a
1753  * specific byte. For example, say you want to know where the 4th ECC byte is
1754  * stored, you'll use:
1755  *
1756  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1757  *
1758  * Returns zero on success, a negative error code otherwise.
1759  */
1760 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1761                 int *sectionp, struct mtd_oob_region *oobregion,
1762                 int (*iter)(struct mtd_info *,
1763                         int section,
1764                         struct mtd_oob_region *oobregion))
1765 {
1766     int pos = 0, ret, section = 0;
1767 
1768     memset(oobregion, 0, sizeof(*oobregion));
1769 
1770     while (1) {
1771         ret = iter(mtd, section, oobregion);
1772         if (ret)
1773             return ret;
1774 
1775         if (pos + oobregion->length > byte)
1776             break;
1777 
1778         pos += oobregion->length;
1779         section++;
1780     }
1781 
1782     /*
1783      * Adjust region info to make it start at the beginning at the
1784      * 'start' ECC byte.
1785      */
1786     oobregion->offset += byte - pos;
1787     oobregion->length -= byte - pos;
1788     *sectionp = section;
1789 
1790     return 0;
1791 }
1792 
1793 /**
1794  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1795  *                ECC byte
1796  * @mtd: mtd info structure
1797  * @eccbyte: the byte we are searching for
1798  * @section: pointer where the section id will be stored
1799  * @oobregion: OOB region information
1800  *
1801  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1802  * byte.
1803  *
1804  * Returns zero on success, a negative error code otherwise.
1805  */
1806 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1807                  int *section,
1808                  struct mtd_oob_region *oobregion)
1809 {
1810     return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1811                      mtd_ooblayout_ecc);
1812 }
1813 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1814 
1815 /**
1816  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1817  * @mtd: mtd info structure
1818  * @buf: destination buffer to store OOB bytes
1819  * @oobbuf: OOB buffer
1820  * @start: first byte to retrieve
1821  * @nbytes: number of bytes to retrieve
1822  * @iter: section iterator
1823  *
1824  * Extract bytes attached to a specific category (ECC or free)
1825  * from the OOB buffer and copy them into buf.
1826  *
1827  * Returns zero on success, a negative error code otherwise.
1828  */
1829 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1830                 const u8 *oobbuf, int start, int nbytes,
1831                 int (*iter)(struct mtd_info *,
1832                         int section,
1833                         struct mtd_oob_region *oobregion))
1834 {
1835     struct mtd_oob_region oobregion;
1836     int section, ret;
1837 
1838     ret = mtd_ooblayout_find_region(mtd, start, &section,
1839                     &oobregion, iter);
1840 
1841     while (!ret) {
1842         int cnt;
1843 
1844         cnt = min_t(int, nbytes, oobregion.length);
1845         memcpy(buf, oobbuf + oobregion.offset, cnt);
1846         buf += cnt;
1847         nbytes -= cnt;
1848 
1849         if (!nbytes)
1850             break;
1851 
1852         ret = iter(mtd, ++section, &oobregion);
1853     }
1854 
1855     return ret;
1856 }
1857 
1858 /**
1859  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1860  * @mtd: mtd info structure
1861  * @buf: source buffer to get OOB bytes from
1862  * @oobbuf: OOB buffer
1863  * @start: first OOB byte to set
1864  * @nbytes: number of OOB bytes to set
1865  * @iter: section iterator
1866  *
1867  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1868  * is selected by passing the appropriate iterator.
1869  *
1870  * Returns zero on success, a negative error code otherwise.
1871  */
1872 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1873                 u8 *oobbuf, int start, int nbytes,
1874                 int (*iter)(struct mtd_info *,
1875                         int section,
1876                         struct mtd_oob_region *oobregion))
1877 {
1878     struct mtd_oob_region oobregion;
1879     int section, ret;
1880 
1881     ret = mtd_ooblayout_find_region(mtd, start, &section,
1882                     &oobregion, iter);
1883 
1884     while (!ret) {
1885         int cnt;
1886 
1887         cnt = min_t(int, nbytes, oobregion.length);
1888         memcpy(oobbuf + oobregion.offset, buf, cnt);
1889         buf += cnt;
1890         nbytes -= cnt;
1891 
1892         if (!nbytes)
1893             break;
1894 
1895         ret = iter(mtd, ++section, &oobregion);
1896     }
1897 
1898     return ret;
1899 }
1900 
1901 /**
1902  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1903  * @mtd: mtd info structure
1904  * @iter: category iterator
1905  *
1906  * Count the number of bytes in a given category.
1907  *
1908  * Returns a positive value on success, a negative error code otherwise.
1909  */
1910 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1911                 int (*iter)(struct mtd_info *,
1912                         int section,
1913                         struct mtd_oob_region *oobregion))
1914 {
1915     struct mtd_oob_region oobregion;
1916     int section = 0, ret, nbytes = 0;
1917 
1918     while (1) {
1919         ret = iter(mtd, section++, &oobregion);
1920         if (ret) {
1921             if (ret == -ERANGE)
1922                 ret = nbytes;
1923             break;
1924         }
1925 
1926         nbytes += oobregion.length;
1927     }
1928 
1929     return ret;
1930 }
1931 
1932 /**
1933  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1934  * @mtd: mtd info structure
1935  * @eccbuf: destination buffer to store ECC bytes
1936  * @oobbuf: OOB buffer
1937  * @start: first ECC byte to retrieve
1938  * @nbytes: number of ECC bytes to retrieve
1939  *
1940  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1941  *
1942  * Returns zero on success, a negative error code otherwise.
1943  */
1944 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1945                    const u8 *oobbuf, int start, int nbytes)
1946 {
1947     return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1948                        mtd_ooblayout_ecc);
1949 }
1950 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1951 
1952 /**
1953  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1954  * @mtd: mtd info structure
1955  * @eccbuf: source buffer to get ECC bytes from
1956  * @oobbuf: OOB buffer
1957  * @start: first ECC byte to set
1958  * @nbytes: number of ECC bytes to set
1959  *
1960  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1961  *
1962  * Returns zero on success, a negative error code otherwise.
1963  */
1964 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1965                    u8 *oobbuf, int start, int nbytes)
1966 {
1967     return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1968                        mtd_ooblayout_ecc);
1969 }
1970 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1971 
1972 /**
1973  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1974  * @mtd: mtd info structure
1975  * @databuf: destination buffer to store ECC bytes
1976  * @oobbuf: OOB buffer
1977  * @start: first ECC byte to retrieve
1978  * @nbytes: number of ECC bytes to retrieve
1979  *
1980  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1981  *
1982  * Returns zero on success, a negative error code otherwise.
1983  */
1984 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1985                 const u8 *oobbuf, int start, int nbytes)
1986 {
1987     return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1988                        mtd_ooblayout_free);
1989 }
1990 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1991 
1992 /**
1993  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1994  * @mtd: mtd info structure
1995  * @databuf: source buffer to get data bytes from
1996  * @oobbuf: OOB buffer
1997  * @start: first ECC byte to set
1998  * @nbytes: number of ECC bytes to set
1999  *
2000  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2001  *
2002  * Returns zero on success, a negative error code otherwise.
2003  */
2004 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2005                 u8 *oobbuf, int start, int nbytes)
2006 {
2007     return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2008                        mtd_ooblayout_free);
2009 }
2010 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2011 
2012 /**
2013  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2014  * @mtd: mtd info structure
2015  *
2016  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2017  *
2018  * Returns zero on success, a negative error code otherwise.
2019  */
2020 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2021 {
2022     return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2023 }
2024 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2025 
2026 /**
2027  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2028  * @mtd: mtd info structure
2029  *
2030  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2031  *
2032  * Returns zero on success, a negative error code otherwise.
2033  */
2034 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2035 {
2036     return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2037 }
2038 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2039 
2040 /*
2041  * Method to access the protection register area, present in some flash
2042  * devices. The user data is one time programmable but the factory data is read
2043  * only.
2044  */
2045 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2046                struct otp_info *buf)
2047 {
2048     struct mtd_info *master = mtd_get_master(mtd);
2049 
2050     if (!master->_get_fact_prot_info)
2051         return -EOPNOTSUPP;
2052     if (!len)
2053         return 0;
2054     return master->_get_fact_prot_info(master, len, retlen, buf);
2055 }
2056 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2057 
2058 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2059                size_t *retlen, u_char *buf)
2060 {
2061     struct mtd_info *master = mtd_get_master(mtd);
2062 
2063     *retlen = 0;
2064     if (!master->_read_fact_prot_reg)
2065         return -EOPNOTSUPP;
2066     if (!len)
2067         return 0;
2068     return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2069 }
2070 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2071 
2072 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2073                struct otp_info *buf)
2074 {
2075     struct mtd_info *master = mtd_get_master(mtd);
2076 
2077     if (!master->_get_user_prot_info)
2078         return -EOPNOTSUPP;
2079     if (!len)
2080         return 0;
2081     return master->_get_user_prot_info(master, len, retlen, buf);
2082 }
2083 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2084 
2085 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2086                size_t *retlen, u_char *buf)
2087 {
2088     struct mtd_info *master = mtd_get_master(mtd);
2089 
2090     *retlen = 0;
2091     if (!master->_read_user_prot_reg)
2092         return -EOPNOTSUPP;
2093     if (!len)
2094         return 0;
2095     return master->_read_user_prot_reg(master, from, len, retlen, buf);
2096 }
2097 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2098 
2099 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2100                 size_t *retlen, const u_char *buf)
2101 {
2102     struct mtd_info *master = mtd_get_master(mtd);
2103     int ret;
2104 
2105     *retlen = 0;
2106     if (!master->_write_user_prot_reg)
2107         return -EOPNOTSUPP;
2108     if (!len)
2109         return 0;
2110     ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2111     if (ret)
2112         return ret;
2113 
2114     /*
2115      * If no data could be written at all, we are out of memory and
2116      * must return -ENOSPC.
2117      */
2118     return (*retlen) ? 0 : -ENOSPC;
2119 }
2120 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2121 
2122 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2123 {
2124     struct mtd_info *master = mtd_get_master(mtd);
2125 
2126     if (!master->_lock_user_prot_reg)
2127         return -EOPNOTSUPP;
2128     if (!len)
2129         return 0;
2130     return master->_lock_user_prot_reg(master, from, len);
2131 }
2132 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2133 
2134 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2135 {
2136     struct mtd_info *master = mtd_get_master(mtd);
2137 
2138     if (!master->_erase_user_prot_reg)
2139         return -EOPNOTSUPP;
2140     if (!len)
2141         return 0;
2142     return master->_erase_user_prot_reg(master, from, len);
2143 }
2144 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2145 
2146 /* Chip-supported device locking */
2147 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2148 {
2149     struct mtd_info *master = mtd_get_master(mtd);
2150 
2151     if (!master->_lock)
2152         return -EOPNOTSUPP;
2153     if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2154         return -EINVAL;
2155     if (!len)
2156         return 0;
2157 
2158     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2159         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2160         len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2161     }
2162 
2163     return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2164 }
2165 EXPORT_SYMBOL_GPL(mtd_lock);
2166 
2167 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2168 {
2169     struct mtd_info *master = mtd_get_master(mtd);
2170 
2171     if (!master->_unlock)
2172         return -EOPNOTSUPP;
2173     if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2174         return -EINVAL;
2175     if (!len)
2176         return 0;
2177 
2178     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2179         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2180         len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2181     }
2182 
2183     return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2184 }
2185 EXPORT_SYMBOL_GPL(mtd_unlock);
2186 
2187 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2188 {
2189     struct mtd_info *master = mtd_get_master(mtd);
2190 
2191     if (!master->_is_locked)
2192         return -EOPNOTSUPP;
2193     if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2194         return -EINVAL;
2195     if (!len)
2196         return 0;
2197 
2198     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2199         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2200         len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2201     }
2202 
2203     return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2204 }
2205 EXPORT_SYMBOL_GPL(mtd_is_locked);
2206 
2207 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2208 {
2209     struct mtd_info *master = mtd_get_master(mtd);
2210 
2211     if (ofs < 0 || ofs >= mtd->size)
2212         return -EINVAL;
2213     if (!master->_block_isreserved)
2214         return 0;
2215 
2216     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2217         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2218 
2219     return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2220 }
2221 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2222 
2223 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2224 {
2225     struct mtd_info *master = mtd_get_master(mtd);
2226 
2227     if (ofs < 0 || ofs >= mtd->size)
2228         return -EINVAL;
2229     if (!master->_block_isbad)
2230         return 0;
2231 
2232     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2233         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2234 
2235     return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2236 }
2237 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2238 
2239 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2240 {
2241     struct mtd_info *master = mtd_get_master(mtd);
2242     int ret;
2243 
2244     if (!master->_block_markbad)
2245         return -EOPNOTSUPP;
2246     if (ofs < 0 || ofs >= mtd->size)
2247         return -EINVAL;
2248     if (!(mtd->flags & MTD_WRITEABLE))
2249         return -EROFS;
2250 
2251     if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2252         ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2253 
2254     ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2255     if (ret)
2256         return ret;
2257 
2258     while (mtd->parent) {
2259         mtd->ecc_stats.badblocks++;
2260         mtd = mtd->parent;
2261     }
2262 
2263     return 0;
2264 }
2265 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2266 
2267 /*
2268  * default_mtd_writev - the default writev method
2269  * @mtd: mtd device description object pointer
2270  * @vecs: the vectors to write
2271  * @count: count of vectors in @vecs
2272  * @to: the MTD device offset to write to
2273  * @retlen: on exit contains the count of bytes written to the MTD device.
2274  *
2275  * This function returns zero in case of success and a negative error code in
2276  * case of failure.
2277  */
2278 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2279                   unsigned long count, loff_t to, size_t *retlen)
2280 {
2281     unsigned long i;
2282     size_t totlen = 0, thislen;
2283     int ret = 0;
2284 
2285     for (i = 0; i < count; i++) {
2286         if (!vecs[i].iov_len)
2287             continue;
2288         ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2289                 vecs[i].iov_base);
2290         totlen += thislen;
2291         if (ret || thislen != vecs[i].iov_len)
2292             break;
2293         to += vecs[i].iov_len;
2294     }
2295     *retlen = totlen;
2296     return ret;
2297 }
2298 
2299 /*
2300  * mtd_writev - the vector-based MTD write method
2301  * @mtd: mtd device description object pointer
2302  * @vecs: the vectors to write
2303  * @count: count of vectors in @vecs
2304  * @to: the MTD device offset to write to
2305  * @retlen: on exit contains the count of bytes written to the MTD device.
2306  *
2307  * This function returns zero in case of success and a negative error code in
2308  * case of failure.
2309  */
2310 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2311            unsigned long count, loff_t to, size_t *retlen)
2312 {
2313     struct mtd_info *master = mtd_get_master(mtd);
2314 
2315     *retlen = 0;
2316     if (!(mtd->flags & MTD_WRITEABLE))
2317         return -EROFS;
2318 
2319     if (!master->_writev)
2320         return default_mtd_writev(mtd, vecs, count, to, retlen);
2321 
2322     return master->_writev(master, vecs, count,
2323                    mtd_get_master_ofs(mtd, to), retlen);
2324 }
2325 EXPORT_SYMBOL_GPL(mtd_writev);
2326 
2327 /**
2328  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2329  * @mtd: mtd device description object pointer
2330  * @size: a pointer to the ideal or maximum size of the allocation, points
2331  *        to the actual allocation size on success.
2332  *
2333  * This routine attempts to allocate a contiguous kernel buffer up to
2334  * the specified size, backing off the size of the request exponentially
2335  * until the request succeeds or until the allocation size falls below
2336  * the system page size. This attempts to make sure it does not adversely
2337  * impact system performance, so when allocating more than one page, we
2338  * ask the memory allocator to avoid re-trying, swapping, writing back
2339  * or performing I/O.
2340  *
2341  * Note, this function also makes sure that the allocated buffer is aligned to
2342  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2343  *
2344  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2345  * to handle smaller (i.e. degraded) buffer allocations under low- or
2346  * fragmented-memory situations where such reduced allocations, from a
2347  * requested ideal, are allowed.
2348  *
2349  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2350  */
2351 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2352 {
2353     gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2354     size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2355     void *kbuf;
2356 
2357     *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2358 
2359     while (*size > min_alloc) {
2360         kbuf = kmalloc(*size, flags);
2361         if (kbuf)
2362             return kbuf;
2363 
2364         *size >>= 1;
2365         *size = ALIGN(*size, mtd->writesize);
2366     }
2367 
2368     /*
2369      * For the last resort allocation allow 'kmalloc()' to do all sorts of
2370      * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2371      */
2372     return kmalloc(*size, GFP_KERNEL);
2373 }
2374 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2375 
2376 #ifdef CONFIG_PROC_FS
2377 
2378 /*====================================================================*/
2379 /* Support for /proc/mtd */
2380 
2381 static int mtd_proc_show(struct seq_file *m, void *v)
2382 {
2383     struct mtd_info *mtd;
2384 
2385     seq_puts(m, "dev:    size   erasesize  name\n");
2386     mutex_lock(&mtd_table_mutex);
2387     mtd_for_each_device(mtd) {
2388         seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2389                mtd->index, (unsigned long long)mtd->size,
2390                mtd->erasesize, mtd->name);
2391     }
2392     mutex_unlock(&mtd_table_mutex);
2393     return 0;
2394 }
2395 #endif /* CONFIG_PROC_FS */
2396 
2397 /*====================================================================*/
2398 /* Init code */
2399 
2400 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2401 {
2402     struct backing_dev_info *bdi;
2403     int ret;
2404 
2405     bdi = bdi_alloc(NUMA_NO_NODE);
2406     if (!bdi)
2407         return ERR_PTR(-ENOMEM);
2408     bdi->ra_pages = 0;
2409     bdi->io_pages = 0;
2410 
2411     /*
2412      * We put '-0' suffix to the name to get the same name format as we
2413      * used to get. Since this is called only once, we get a unique name. 
2414      */
2415     ret = bdi_register(bdi, "%.28s-0", name);
2416     if (ret)
2417         bdi_put(bdi);
2418 
2419     return ret ? ERR_PTR(ret) : bdi;
2420 }
2421 
2422 static struct proc_dir_entry *proc_mtd;
2423 
2424 static int __init init_mtd(void)
2425 {
2426     int ret;
2427 
2428     ret = class_register(&mtd_class);
2429     if (ret)
2430         goto err_reg;
2431 
2432     mtd_bdi = mtd_bdi_init("mtd");
2433     if (IS_ERR(mtd_bdi)) {
2434         ret = PTR_ERR(mtd_bdi);
2435         goto err_bdi;
2436     }
2437 
2438     proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2439 
2440     ret = init_mtdchar();
2441     if (ret)
2442         goto out_procfs;
2443 
2444     dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2445     debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2446                 &mtd_expert_analysis_mode);
2447 
2448     return 0;
2449 
2450 out_procfs:
2451     if (proc_mtd)
2452         remove_proc_entry("mtd", NULL);
2453     bdi_put(mtd_bdi);
2454 err_bdi:
2455     class_unregister(&mtd_class);
2456 err_reg:
2457     pr_err("Error registering mtd class or bdi: %d\n", ret);
2458     return ret;
2459 }
2460 
2461 static void __exit cleanup_mtd(void)
2462 {
2463     debugfs_remove_recursive(dfs_dir_mtd);
2464     cleanup_mtdchar();
2465     if (proc_mtd)
2466         remove_proc_entry("mtd", NULL);
2467     class_unregister(&mtd_class);
2468     bdi_unregister(mtd_bdi);
2469     bdi_put(mtd_bdi);
2470     idr_destroy(&mtd_idr);
2471 }
2472 
2473 module_init(init_mtd);
2474 module_exit(cleanup_mtd);
2475 
2476 MODULE_LICENSE("GPL");
2477 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2478 MODULE_DESCRIPTION("Core MTD registration and access routines");