Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
0004  */
0005 
0006 #include <linux/device.h>
0007 #include <linux/fs.h>
0008 #include <linux/mm.h>
0009 #include <linux/err.h>
0010 #include <linux/init.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/slab.h>
0014 #include <linux/sched.h>
0015 #include <linux/mutex.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/compat.h>
0018 #include <linux/mount.h>
0019 #include <linux/blkpg.h>
0020 #include <linux/magic.h>
0021 #include <linux/major.h>
0022 #include <linux/mtd/mtd.h>
0023 #include <linux/mtd/partitions.h>
0024 #include <linux/mtd/map.h>
0025 
0026 #include <linux/uaccess.h>
0027 
0028 #include "mtdcore.h"
0029 
0030 /*
0031  * Data structure to hold the pointer to the mtd device as well
0032  * as mode information of various use cases.
0033  */
0034 struct mtd_file_info {
0035     struct mtd_info *mtd;
0036     enum mtd_file_modes mode;
0037 };
0038 
0039 static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
0040 {
0041     struct mtd_file_info *mfi = file->private_data;
0042     return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
0043 }
0044 
0045 static int mtdchar_open(struct inode *inode, struct file *file)
0046 {
0047     int minor = iminor(inode);
0048     int devnum = minor >> 1;
0049     int ret = 0;
0050     struct mtd_info *mtd;
0051     struct mtd_file_info *mfi;
0052 
0053     pr_debug("MTD_open\n");
0054 
0055     /* You can't open the RO devices RW */
0056     if ((file->f_mode & FMODE_WRITE) && (minor & 1))
0057         return -EACCES;
0058 
0059     mtd = get_mtd_device(NULL, devnum);
0060 
0061     if (IS_ERR(mtd))
0062         return PTR_ERR(mtd);
0063 
0064     if (mtd->type == MTD_ABSENT) {
0065         ret = -ENODEV;
0066         goto out1;
0067     }
0068 
0069     /* You can't open it RW if it's not a writeable device */
0070     if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
0071         ret = -EACCES;
0072         goto out1;
0073     }
0074 
0075     mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
0076     if (!mfi) {
0077         ret = -ENOMEM;
0078         goto out1;
0079     }
0080     mfi->mtd = mtd;
0081     file->private_data = mfi;
0082     return 0;
0083 
0084 out1:
0085     put_mtd_device(mtd);
0086     return ret;
0087 } /* mtdchar_open */
0088 
0089 /*====================================================================*/
0090 
0091 static int mtdchar_close(struct inode *inode, struct file *file)
0092 {
0093     struct mtd_file_info *mfi = file->private_data;
0094     struct mtd_info *mtd = mfi->mtd;
0095 
0096     pr_debug("MTD_close\n");
0097 
0098     /* Only sync if opened RW */
0099     if ((file->f_mode & FMODE_WRITE))
0100         mtd_sync(mtd);
0101 
0102     put_mtd_device(mtd);
0103     file->private_data = NULL;
0104     kfree(mfi);
0105 
0106     return 0;
0107 } /* mtdchar_close */
0108 
0109 /* Back in June 2001, dwmw2 wrote:
0110  *
0111  *   FIXME: This _really_ needs to die. In 2.5, we should lock the
0112  *   userspace buffer down and use it directly with readv/writev.
0113  *
0114  * The implementation below, using mtd_kmalloc_up_to, mitigates
0115  * allocation failures when the system is under low-memory situations
0116  * or if memory is highly fragmented at the cost of reducing the
0117  * performance of the requested transfer due to a smaller buffer size.
0118  *
0119  * A more complex but more memory-efficient implementation based on
0120  * get_user_pages and iovecs to cover extents of those pages is a
0121  * longer-term goal, as intimated by dwmw2 above. However, for the
0122  * write case, this requires yet more complex head and tail transfer
0123  * handling when those head and tail offsets and sizes are such that
0124  * alignment requirements are not met in the NAND subdriver.
0125  */
0126 
0127 static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
0128             loff_t *ppos)
0129 {
0130     struct mtd_file_info *mfi = file->private_data;
0131     struct mtd_info *mtd = mfi->mtd;
0132     size_t retlen;
0133     size_t total_retlen=0;
0134     int ret=0;
0135     int len;
0136     size_t size = count;
0137     char *kbuf;
0138 
0139     pr_debug("MTD_read\n");
0140 
0141     if (*ppos + count > mtd->size) {
0142         if (*ppos < mtd->size)
0143             count = mtd->size - *ppos;
0144         else
0145             count = 0;
0146     }
0147 
0148     if (!count)
0149         return 0;
0150 
0151     kbuf = mtd_kmalloc_up_to(mtd, &size);
0152     if (!kbuf)
0153         return -ENOMEM;
0154 
0155     while (count) {
0156         len = min_t(size_t, count, size);
0157 
0158         switch (mfi->mode) {
0159         case MTD_FILE_MODE_OTP_FACTORY:
0160             ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
0161                              &retlen, kbuf);
0162             break;
0163         case MTD_FILE_MODE_OTP_USER:
0164             ret = mtd_read_user_prot_reg(mtd, *ppos, len,
0165                              &retlen, kbuf);
0166             break;
0167         case MTD_FILE_MODE_RAW:
0168         {
0169             struct mtd_oob_ops ops = {};
0170 
0171             ops.mode = MTD_OPS_RAW;
0172             ops.datbuf = kbuf;
0173             ops.oobbuf = NULL;
0174             ops.len = len;
0175 
0176             ret = mtd_read_oob(mtd, *ppos, &ops);
0177             retlen = ops.retlen;
0178             break;
0179         }
0180         default:
0181             ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
0182         }
0183         /* Nand returns -EBADMSG on ECC errors, but it returns
0184          * the data. For our userspace tools it is important
0185          * to dump areas with ECC errors!
0186          * For kernel internal usage it also might return -EUCLEAN
0187          * to signal the caller that a bitflip has occurred and has
0188          * been corrected by the ECC algorithm.
0189          * Userspace software which accesses NAND this way
0190          * must be aware of the fact that it deals with NAND
0191          */
0192         if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
0193             *ppos += retlen;
0194             if (copy_to_user(buf, kbuf, retlen)) {
0195                 kfree(kbuf);
0196                 return -EFAULT;
0197             }
0198             else
0199                 total_retlen += retlen;
0200 
0201             count -= retlen;
0202             buf += retlen;
0203             if (retlen == 0)
0204                 count = 0;
0205         }
0206         else {
0207             kfree(kbuf);
0208             return ret;
0209         }
0210 
0211     }
0212 
0213     kfree(kbuf);
0214     return total_retlen;
0215 } /* mtdchar_read */
0216 
0217 static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
0218             loff_t *ppos)
0219 {
0220     struct mtd_file_info *mfi = file->private_data;
0221     struct mtd_info *mtd = mfi->mtd;
0222     size_t size = count;
0223     char *kbuf;
0224     size_t retlen;
0225     size_t total_retlen=0;
0226     int ret=0;
0227     int len;
0228 
0229     pr_debug("MTD_write\n");
0230 
0231     if (*ppos >= mtd->size)
0232         return -ENOSPC;
0233 
0234     if (*ppos + count > mtd->size)
0235         count = mtd->size - *ppos;
0236 
0237     if (!count)
0238         return 0;
0239 
0240     kbuf = mtd_kmalloc_up_to(mtd, &size);
0241     if (!kbuf)
0242         return -ENOMEM;
0243 
0244     while (count) {
0245         len = min_t(size_t, count, size);
0246 
0247         if (copy_from_user(kbuf, buf, len)) {
0248             kfree(kbuf);
0249             return -EFAULT;
0250         }
0251 
0252         switch (mfi->mode) {
0253         case MTD_FILE_MODE_OTP_FACTORY:
0254             ret = -EROFS;
0255             break;
0256         case MTD_FILE_MODE_OTP_USER:
0257             ret = mtd_write_user_prot_reg(mtd, *ppos, len,
0258                               &retlen, kbuf);
0259             break;
0260 
0261         case MTD_FILE_MODE_RAW:
0262         {
0263             struct mtd_oob_ops ops = {};
0264 
0265             ops.mode = MTD_OPS_RAW;
0266             ops.datbuf = kbuf;
0267             ops.oobbuf = NULL;
0268             ops.ooboffs = 0;
0269             ops.len = len;
0270 
0271             ret = mtd_write_oob(mtd, *ppos, &ops);
0272             retlen = ops.retlen;
0273             break;
0274         }
0275 
0276         default:
0277             ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
0278         }
0279 
0280         /*
0281          * Return -ENOSPC only if no data could be written at all.
0282          * Otherwise just return the number of bytes that actually
0283          * have been written.
0284          */
0285         if ((ret == -ENOSPC) && (total_retlen))
0286             break;
0287 
0288         if (!ret) {
0289             *ppos += retlen;
0290             total_retlen += retlen;
0291             count -= retlen;
0292             buf += retlen;
0293         }
0294         else {
0295             kfree(kbuf);
0296             return ret;
0297         }
0298     }
0299 
0300     kfree(kbuf);
0301     return total_retlen;
0302 } /* mtdchar_write */
0303 
0304 /*======================================================================
0305 
0306     IOCTL calls for getting device parameters.
0307 
0308 ======================================================================*/
0309 
0310 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
0311 {
0312     struct mtd_info *mtd = mfi->mtd;
0313     size_t retlen;
0314 
0315     switch (mode) {
0316     case MTD_OTP_FACTORY:
0317         if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
0318                 -EOPNOTSUPP)
0319             return -EOPNOTSUPP;
0320 
0321         mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
0322         break;
0323     case MTD_OTP_USER:
0324         if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
0325                 -EOPNOTSUPP)
0326             return -EOPNOTSUPP;
0327 
0328         mfi->mode = MTD_FILE_MODE_OTP_USER;
0329         break;
0330     case MTD_OTP_OFF:
0331         mfi->mode = MTD_FILE_MODE_NORMAL;
0332         break;
0333     default:
0334         return -EINVAL;
0335     }
0336 
0337     return 0;
0338 }
0339 
0340 static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
0341     uint64_t start, uint32_t length, void __user *ptr,
0342     uint32_t __user *retp)
0343 {
0344     struct mtd_info *master  = mtd_get_master(mtd);
0345     struct mtd_file_info *mfi = file->private_data;
0346     struct mtd_oob_ops ops = {};
0347     uint32_t retlen;
0348     int ret = 0;
0349 
0350     if (length > 4096)
0351         return -EINVAL;
0352 
0353     if (!master->_write_oob)
0354         return -EOPNOTSUPP;
0355 
0356     ops.ooblen = length;
0357     ops.ooboffs = start & (mtd->writesize - 1);
0358     ops.datbuf = NULL;
0359     ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
0360         MTD_OPS_PLACE_OOB;
0361 
0362     if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
0363         return -EINVAL;
0364 
0365     ops.oobbuf = memdup_user(ptr, length);
0366     if (IS_ERR(ops.oobbuf))
0367         return PTR_ERR(ops.oobbuf);
0368 
0369     start &= ~((uint64_t)mtd->writesize - 1);
0370     ret = mtd_write_oob(mtd, start, &ops);
0371 
0372     if (ops.oobretlen > 0xFFFFFFFFU)
0373         ret = -EOVERFLOW;
0374     retlen = ops.oobretlen;
0375     if (copy_to_user(retp, &retlen, sizeof(length)))
0376         ret = -EFAULT;
0377 
0378     kfree(ops.oobbuf);
0379     return ret;
0380 }
0381 
0382 static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
0383     uint64_t start, uint32_t length, void __user *ptr,
0384     uint32_t __user *retp)
0385 {
0386     struct mtd_file_info *mfi = file->private_data;
0387     struct mtd_oob_ops ops = {};
0388     int ret = 0;
0389 
0390     if (length > 4096)
0391         return -EINVAL;
0392 
0393     ops.ooblen = length;
0394     ops.ooboffs = start & (mtd->writesize - 1);
0395     ops.datbuf = NULL;
0396     ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
0397         MTD_OPS_PLACE_OOB;
0398 
0399     if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
0400         return -EINVAL;
0401 
0402     ops.oobbuf = kmalloc(length, GFP_KERNEL);
0403     if (!ops.oobbuf)
0404         return -ENOMEM;
0405 
0406     start &= ~((uint64_t)mtd->writesize - 1);
0407     ret = mtd_read_oob(mtd, start, &ops);
0408 
0409     if (put_user(ops.oobretlen, retp))
0410         ret = -EFAULT;
0411     else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
0412                         ops.oobretlen))
0413         ret = -EFAULT;
0414 
0415     kfree(ops.oobbuf);
0416 
0417     /*
0418      * NAND returns -EBADMSG on ECC errors, but it returns the OOB
0419      * data. For our userspace tools it is important to dump areas
0420      * with ECC errors!
0421      * For kernel internal usage it also might return -EUCLEAN
0422      * to signal the caller that a bitflip has occurred and has
0423      * been corrected by the ECC algorithm.
0424      *
0425      * Note: currently the standard NAND function, nand_read_oob_std,
0426      * does not calculate ECC for the OOB area, so do not rely on
0427      * this behavior unless you have replaced it with your own.
0428      */
0429     if (mtd_is_bitflip_or_eccerr(ret))
0430         return 0;
0431 
0432     return ret;
0433 }
0434 
0435 /*
0436  * Copies (and truncates, if necessary) OOB layout information to the
0437  * deprecated layout struct, nand_ecclayout_user. This is necessary only to
0438  * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
0439  * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
0440  * can describe any kind of OOB layout with almost zero overhead from a
0441  * memory usage point of view).
0442  */
0443 static int shrink_ecclayout(struct mtd_info *mtd,
0444                 struct nand_ecclayout_user *to)
0445 {
0446     struct mtd_oob_region oobregion;
0447     int i, section = 0, ret;
0448 
0449     if (!mtd || !to)
0450         return -EINVAL;
0451 
0452     memset(to, 0, sizeof(*to));
0453 
0454     to->eccbytes = 0;
0455     for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
0456         u32 eccpos;
0457 
0458         ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
0459         if (ret < 0) {
0460             if (ret != -ERANGE)
0461                 return ret;
0462 
0463             break;
0464         }
0465 
0466         eccpos = oobregion.offset;
0467         for (; i < MTD_MAX_ECCPOS_ENTRIES &&
0468                eccpos < oobregion.offset + oobregion.length; i++) {
0469             to->eccpos[i] = eccpos++;
0470             to->eccbytes++;
0471         }
0472     }
0473 
0474     for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
0475         ret = mtd_ooblayout_free(mtd, i, &oobregion);
0476         if (ret < 0) {
0477             if (ret != -ERANGE)
0478                 return ret;
0479 
0480             break;
0481         }
0482 
0483         to->oobfree[i].offset = oobregion.offset;
0484         to->oobfree[i].length = oobregion.length;
0485         to->oobavail += to->oobfree[i].length;
0486     }
0487 
0488     return 0;
0489 }
0490 
0491 static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
0492 {
0493     struct mtd_oob_region oobregion;
0494     int i, section = 0, ret;
0495 
0496     if (!mtd || !to)
0497         return -EINVAL;
0498 
0499     memset(to, 0, sizeof(*to));
0500 
0501     to->eccbytes = 0;
0502     for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
0503         u32 eccpos;
0504 
0505         ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
0506         if (ret < 0) {
0507             if (ret != -ERANGE)
0508                 return ret;
0509 
0510             break;
0511         }
0512 
0513         if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
0514             return -EINVAL;
0515 
0516         eccpos = oobregion.offset;
0517         for (; eccpos < oobregion.offset + oobregion.length; i++) {
0518             to->eccpos[i] = eccpos++;
0519             to->eccbytes++;
0520         }
0521     }
0522 
0523     for (i = 0; i < 8; i++) {
0524         ret = mtd_ooblayout_free(mtd, i, &oobregion);
0525         if (ret < 0) {
0526             if (ret != -ERANGE)
0527                 return ret;
0528 
0529             break;
0530         }
0531 
0532         to->oobfree[i][0] = oobregion.offset;
0533         to->oobfree[i][1] = oobregion.length;
0534     }
0535 
0536     to->useecc = MTD_NANDECC_AUTOPLACE;
0537 
0538     return 0;
0539 }
0540 
0541 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
0542                    struct blkpg_ioctl_arg *arg)
0543 {
0544     struct blkpg_partition p;
0545 
0546     if (!capable(CAP_SYS_ADMIN))
0547         return -EPERM;
0548 
0549     if (copy_from_user(&p, arg->data, sizeof(p)))
0550         return -EFAULT;
0551 
0552     switch (arg->op) {
0553     case BLKPG_ADD_PARTITION:
0554 
0555         /* Only master mtd device must be used to add partitions */
0556         if (mtd_is_partition(mtd))
0557             return -EINVAL;
0558 
0559         /* Sanitize user input */
0560         p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
0561 
0562         return mtd_add_partition(mtd, p.devname, p.start, p.length);
0563 
0564     case BLKPG_DEL_PARTITION:
0565 
0566         if (p.pno < 0)
0567             return -EINVAL;
0568 
0569         return mtd_del_partition(mtd, p.pno);
0570 
0571     default:
0572         return -EINVAL;
0573     }
0574 }
0575 
0576 static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
0577                   struct mtd_oob_ops *ops)
0578 {
0579     uint32_t start_page, end_page;
0580     u32 oob_per_page;
0581 
0582     if (ops->len == 0 || ops->ooblen == 0)
0583         return;
0584 
0585     start_page = mtd_div_by_ws(start, mtd);
0586     end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
0587     oob_per_page = mtd_oobavail(mtd, ops);
0588 
0589     ops->ooblen = min_t(size_t, ops->ooblen,
0590                 (end_page - start_page + 1) * oob_per_page);
0591 }
0592 
0593 static int mtdchar_write_ioctl(struct mtd_info *mtd,
0594         struct mtd_write_req __user *argp)
0595 {
0596     struct mtd_info *master = mtd_get_master(mtd);
0597     struct mtd_write_req req;
0598     const void __user *usr_data, *usr_oob;
0599     uint8_t *datbuf = NULL, *oobbuf = NULL;
0600     size_t datbuf_len, oobbuf_len;
0601     int ret = 0;
0602 
0603     if (copy_from_user(&req, argp, sizeof(req)))
0604         return -EFAULT;
0605 
0606     usr_data = (const void __user *)(uintptr_t)req.usr_data;
0607     usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
0608 
0609     if (!master->_write_oob)
0610         return -EOPNOTSUPP;
0611 
0612     if (!usr_data)
0613         req.len = 0;
0614 
0615     if (!usr_oob)
0616         req.ooblen = 0;
0617 
0618     req.len &= 0xffffffff;
0619     req.ooblen &= 0xffffffff;
0620 
0621     if (req.start + req.len > mtd->size)
0622         return -EINVAL;
0623 
0624     datbuf_len = min_t(size_t, req.len, mtd->erasesize);
0625     if (datbuf_len > 0) {
0626         datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
0627         if (!datbuf)
0628             return -ENOMEM;
0629     }
0630 
0631     oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
0632     if (oobbuf_len > 0) {
0633         oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
0634         if (!oobbuf) {
0635             kvfree(datbuf);
0636             return -ENOMEM;
0637         }
0638     }
0639 
0640     while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
0641         struct mtd_oob_ops ops = {
0642             .mode = req.mode,
0643             .len = min_t(size_t, req.len, datbuf_len),
0644             .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
0645             .datbuf = datbuf,
0646             .oobbuf = oobbuf,
0647         };
0648 
0649         /*
0650          * Shorten non-page-aligned, eraseblock-sized writes so that
0651          * the write ends on an eraseblock boundary.  This is necessary
0652          * for adjust_oob_length() to properly handle non-page-aligned
0653          * writes.
0654          */
0655         if (ops.len == mtd->erasesize)
0656             ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
0657 
0658         /*
0659          * For writes which are not OOB-only, adjust the amount of OOB
0660          * data written according to the number of data pages written.
0661          * This is necessary to prevent OOB data from being skipped
0662          * over in data+OOB writes requiring multiple mtd_write_oob()
0663          * calls to be completed.
0664          */
0665         adjust_oob_length(mtd, req.start, &ops);
0666 
0667         if (copy_from_user(datbuf, usr_data, ops.len) ||
0668             copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
0669             ret = -EFAULT;
0670             break;
0671         }
0672 
0673         ret = mtd_write_oob(mtd, req.start, &ops);
0674         if (ret)
0675             break;
0676 
0677         req.start += ops.retlen;
0678         req.len -= ops.retlen;
0679         usr_data += ops.retlen;
0680 
0681         req.ooblen -= ops.oobretlen;
0682         usr_oob += ops.oobretlen;
0683     }
0684 
0685     kvfree(datbuf);
0686     kvfree(oobbuf);
0687 
0688     return ret;
0689 }
0690 
0691 static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
0692 {
0693     struct mtd_file_info *mfi = file->private_data;
0694     struct mtd_info *mtd = mfi->mtd;
0695     struct mtd_info *master = mtd_get_master(mtd);
0696     void __user *argp = (void __user *)arg;
0697     int ret = 0;
0698     struct mtd_info_user info;
0699 
0700     pr_debug("MTD_ioctl\n");
0701 
0702     /*
0703      * Check the file mode to require "dangerous" commands to have write
0704      * permissions.
0705      */
0706     switch (cmd) {
0707     /* "safe" commands */
0708     case MEMGETREGIONCOUNT:
0709     case MEMGETREGIONINFO:
0710     case MEMGETINFO:
0711     case MEMREADOOB:
0712     case MEMREADOOB64:
0713     case MEMISLOCKED:
0714     case MEMGETOOBSEL:
0715     case MEMGETBADBLOCK:
0716     case OTPSELECT:
0717     case OTPGETREGIONCOUNT:
0718     case OTPGETREGIONINFO:
0719     case ECCGETLAYOUT:
0720     case ECCGETSTATS:
0721     case MTDFILEMODE:
0722     case BLKPG:
0723     case BLKRRPART:
0724         break;
0725 
0726     /* "dangerous" commands */
0727     case MEMERASE:
0728     case MEMERASE64:
0729     case MEMLOCK:
0730     case MEMUNLOCK:
0731     case MEMSETBADBLOCK:
0732     case MEMWRITEOOB:
0733     case MEMWRITEOOB64:
0734     case MEMWRITE:
0735     case OTPLOCK:
0736     case OTPERASE:
0737         if (!(file->f_mode & FMODE_WRITE))
0738             return -EPERM;
0739         break;
0740 
0741     default:
0742         return -ENOTTY;
0743     }
0744 
0745     switch (cmd) {
0746     case MEMGETREGIONCOUNT:
0747         if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
0748             return -EFAULT;
0749         break;
0750 
0751     case MEMGETREGIONINFO:
0752     {
0753         uint32_t ur_idx;
0754         struct mtd_erase_region_info *kr;
0755         struct region_info_user __user *ur = argp;
0756 
0757         if (get_user(ur_idx, &(ur->regionindex)))
0758             return -EFAULT;
0759 
0760         if (ur_idx >= mtd->numeraseregions)
0761             return -EINVAL;
0762 
0763         kr = &(mtd->eraseregions[ur_idx]);
0764 
0765         if (put_user(kr->offset, &(ur->offset))
0766             || put_user(kr->erasesize, &(ur->erasesize))
0767             || put_user(kr->numblocks, &(ur->numblocks)))
0768             return -EFAULT;
0769 
0770         break;
0771     }
0772 
0773     case MEMGETINFO:
0774         memset(&info, 0, sizeof(info));
0775         info.type   = mtd->type;
0776         info.flags  = mtd->flags;
0777         info.size   = mtd->size;
0778         info.erasesize  = mtd->erasesize;
0779         info.writesize  = mtd->writesize;
0780         info.oobsize    = mtd->oobsize;
0781         /* The below field is obsolete */
0782         info.padding    = 0;
0783         if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
0784             return -EFAULT;
0785         break;
0786 
0787     case MEMERASE:
0788     case MEMERASE64:
0789     {
0790         struct erase_info *erase;
0791 
0792         erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
0793         if (!erase)
0794             ret = -ENOMEM;
0795         else {
0796             if (cmd == MEMERASE64) {
0797                 struct erase_info_user64 einfo64;
0798 
0799                 if (copy_from_user(&einfo64, argp,
0800                         sizeof(struct erase_info_user64))) {
0801                     kfree(erase);
0802                     return -EFAULT;
0803                 }
0804                 erase->addr = einfo64.start;
0805                 erase->len = einfo64.length;
0806             } else {
0807                 struct erase_info_user einfo32;
0808 
0809                 if (copy_from_user(&einfo32, argp,
0810                         sizeof(struct erase_info_user))) {
0811                     kfree(erase);
0812                     return -EFAULT;
0813                 }
0814                 erase->addr = einfo32.start;
0815                 erase->len = einfo32.length;
0816             }
0817 
0818             ret = mtd_erase(mtd, erase);
0819             kfree(erase);
0820         }
0821         break;
0822     }
0823 
0824     case MEMWRITEOOB:
0825     {
0826         struct mtd_oob_buf buf;
0827         struct mtd_oob_buf __user *buf_user = argp;
0828 
0829         /* NOTE: writes return length to buf_user->length */
0830         if (copy_from_user(&buf, argp, sizeof(buf)))
0831             ret = -EFAULT;
0832         else
0833             ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
0834                 buf.ptr, &buf_user->length);
0835         break;
0836     }
0837 
0838     case MEMREADOOB:
0839     {
0840         struct mtd_oob_buf buf;
0841         struct mtd_oob_buf __user *buf_user = argp;
0842 
0843         /* NOTE: writes return length to buf_user->start */
0844         if (copy_from_user(&buf, argp, sizeof(buf)))
0845             ret = -EFAULT;
0846         else
0847             ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
0848                 buf.ptr, &buf_user->start);
0849         break;
0850     }
0851 
0852     case MEMWRITEOOB64:
0853     {
0854         struct mtd_oob_buf64 buf;
0855         struct mtd_oob_buf64 __user *buf_user = argp;
0856 
0857         if (copy_from_user(&buf, argp, sizeof(buf)))
0858             ret = -EFAULT;
0859         else
0860             ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
0861                 (void __user *)(uintptr_t)buf.usr_ptr,
0862                 &buf_user->length);
0863         break;
0864     }
0865 
0866     case MEMREADOOB64:
0867     {
0868         struct mtd_oob_buf64 buf;
0869         struct mtd_oob_buf64 __user *buf_user = argp;
0870 
0871         if (copy_from_user(&buf, argp, sizeof(buf)))
0872             ret = -EFAULT;
0873         else
0874             ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
0875                 (void __user *)(uintptr_t)buf.usr_ptr,
0876                 &buf_user->length);
0877         break;
0878     }
0879 
0880     case MEMWRITE:
0881     {
0882         ret = mtdchar_write_ioctl(mtd,
0883               (struct mtd_write_req __user *)arg);
0884         break;
0885     }
0886 
0887     case MEMLOCK:
0888     {
0889         struct erase_info_user einfo;
0890 
0891         if (copy_from_user(&einfo, argp, sizeof(einfo)))
0892             return -EFAULT;
0893 
0894         ret = mtd_lock(mtd, einfo.start, einfo.length);
0895         break;
0896     }
0897 
0898     case MEMUNLOCK:
0899     {
0900         struct erase_info_user einfo;
0901 
0902         if (copy_from_user(&einfo, argp, sizeof(einfo)))
0903             return -EFAULT;
0904 
0905         ret = mtd_unlock(mtd, einfo.start, einfo.length);
0906         break;
0907     }
0908 
0909     case MEMISLOCKED:
0910     {
0911         struct erase_info_user einfo;
0912 
0913         if (copy_from_user(&einfo, argp, sizeof(einfo)))
0914             return -EFAULT;
0915 
0916         ret = mtd_is_locked(mtd, einfo.start, einfo.length);
0917         break;
0918     }
0919 
0920     /* Legacy interface */
0921     case MEMGETOOBSEL:
0922     {
0923         struct nand_oobinfo oi;
0924 
0925         if (!master->ooblayout)
0926             return -EOPNOTSUPP;
0927 
0928         ret = get_oobinfo(mtd, &oi);
0929         if (ret)
0930             return ret;
0931 
0932         if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
0933             return -EFAULT;
0934         break;
0935     }
0936 
0937     case MEMGETBADBLOCK:
0938     {
0939         loff_t offs;
0940 
0941         if (copy_from_user(&offs, argp, sizeof(loff_t)))
0942             return -EFAULT;
0943         return mtd_block_isbad(mtd, offs);
0944     }
0945 
0946     case MEMSETBADBLOCK:
0947     {
0948         loff_t offs;
0949 
0950         if (copy_from_user(&offs, argp, sizeof(loff_t)))
0951             return -EFAULT;
0952         return mtd_block_markbad(mtd, offs);
0953     }
0954 
0955     case OTPSELECT:
0956     {
0957         int mode;
0958         if (copy_from_user(&mode, argp, sizeof(int)))
0959             return -EFAULT;
0960 
0961         mfi->mode = MTD_FILE_MODE_NORMAL;
0962 
0963         ret = otp_select_filemode(mfi, mode);
0964 
0965         file->f_pos = 0;
0966         break;
0967     }
0968 
0969     case OTPGETREGIONCOUNT:
0970     case OTPGETREGIONINFO:
0971     {
0972         struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
0973         size_t retlen;
0974         if (!buf)
0975             return -ENOMEM;
0976         switch (mfi->mode) {
0977         case MTD_FILE_MODE_OTP_FACTORY:
0978             ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
0979             break;
0980         case MTD_FILE_MODE_OTP_USER:
0981             ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
0982             break;
0983         default:
0984             ret = -EINVAL;
0985             break;
0986         }
0987         if (!ret) {
0988             if (cmd == OTPGETREGIONCOUNT) {
0989                 int nbr = retlen / sizeof(struct otp_info);
0990                 ret = copy_to_user(argp, &nbr, sizeof(int));
0991             } else
0992                 ret = copy_to_user(argp, buf, retlen);
0993             if (ret)
0994                 ret = -EFAULT;
0995         }
0996         kfree(buf);
0997         break;
0998     }
0999 
1000     case OTPLOCK:
1001     case OTPERASE:
1002     {
1003         struct otp_info oinfo;
1004 
1005         if (mfi->mode != MTD_FILE_MODE_OTP_USER)
1006             return -EINVAL;
1007         if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
1008             return -EFAULT;
1009         if (cmd == OTPLOCK)
1010             ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
1011         else
1012             ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
1013         break;
1014     }
1015 
1016     /* This ioctl is being deprecated - it truncates the ECC layout */
1017     case ECCGETLAYOUT:
1018     {
1019         struct nand_ecclayout_user *usrlay;
1020 
1021         if (!master->ooblayout)
1022             return -EOPNOTSUPP;
1023 
1024         usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
1025         if (!usrlay)
1026             return -ENOMEM;
1027 
1028         shrink_ecclayout(mtd, usrlay);
1029 
1030         if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
1031             ret = -EFAULT;
1032         kfree(usrlay);
1033         break;
1034     }
1035 
1036     case ECCGETSTATS:
1037     {
1038         if (copy_to_user(argp, &mtd->ecc_stats,
1039                  sizeof(struct mtd_ecc_stats)))
1040             return -EFAULT;
1041         break;
1042     }
1043 
1044     case MTDFILEMODE:
1045     {
1046         mfi->mode = 0;
1047 
1048         switch(arg) {
1049         case MTD_FILE_MODE_OTP_FACTORY:
1050         case MTD_FILE_MODE_OTP_USER:
1051             ret = otp_select_filemode(mfi, arg);
1052             break;
1053 
1054         case MTD_FILE_MODE_RAW:
1055             if (!mtd_has_oob(mtd))
1056                 return -EOPNOTSUPP;
1057             mfi->mode = arg;
1058             break;
1059 
1060         case MTD_FILE_MODE_NORMAL:
1061             break;
1062         default:
1063             ret = -EINVAL;
1064         }
1065         file->f_pos = 0;
1066         break;
1067     }
1068 
1069     case BLKPG:
1070     {
1071         struct blkpg_ioctl_arg __user *blk_arg = argp;
1072         struct blkpg_ioctl_arg a;
1073 
1074         if (copy_from_user(&a, blk_arg, sizeof(a)))
1075             ret = -EFAULT;
1076         else
1077             ret = mtdchar_blkpg_ioctl(mtd, &a);
1078         break;
1079     }
1080 
1081     case BLKRRPART:
1082     {
1083         /* No reread partition feature. Just return ok */
1084         ret = 0;
1085         break;
1086     }
1087     }
1088 
1089     return ret;
1090 } /* memory_ioctl */
1091 
1092 static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1093 {
1094     struct mtd_file_info *mfi = file->private_data;
1095     struct mtd_info *mtd = mfi->mtd;
1096     struct mtd_info *master = mtd_get_master(mtd);
1097     int ret;
1098 
1099     mutex_lock(&master->master.chrdev_lock);
1100     ret = mtdchar_ioctl(file, cmd, arg);
1101     mutex_unlock(&master->master.chrdev_lock);
1102 
1103     return ret;
1104 }
1105 
1106 #ifdef CONFIG_COMPAT
1107 
1108 struct mtd_oob_buf32 {
1109     u_int32_t start;
1110     u_int32_t length;
1111     compat_caddr_t ptr; /* unsigned char* */
1112 };
1113 
1114 #define MEMWRITEOOB32       _IOWR('M', 3, struct mtd_oob_buf32)
1115 #define MEMREADOOB32        _IOWR('M', 4, struct mtd_oob_buf32)
1116 
1117 static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1118     unsigned long arg)
1119 {
1120     struct mtd_file_info *mfi = file->private_data;
1121     struct mtd_info *mtd = mfi->mtd;
1122     struct mtd_info *master = mtd_get_master(mtd);
1123     void __user *argp = compat_ptr(arg);
1124     int ret = 0;
1125 
1126     mutex_lock(&master->master.chrdev_lock);
1127 
1128     switch (cmd) {
1129     case MEMWRITEOOB32:
1130     {
1131         struct mtd_oob_buf32 buf;
1132         struct mtd_oob_buf32 __user *buf_user = argp;
1133 
1134         if (!(file->f_mode & FMODE_WRITE)) {
1135             ret = -EPERM;
1136             break;
1137         }
1138 
1139         if (copy_from_user(&buf, argp, sizeof(buf)))
1140             ret = -EFAULT;
1141         else
1142             ret = mtdchar_writeoob(file, mtd, buf.start,
1143                 buf.length, compat_ptr(buf.ptr),
1144                 &buf_user->length);
1145         break;
1146     }
1147 
1148     case MEMREADOOB32:
1149     {
1150         struct mtd_oob_buf32 buf;
1151         struct mtd_oob_buf32 __user *buf_user = argp;
1152 
1153         /* NOTE: writes return length to buf->start */
1154         if (copy_from_user(&buf, argp, sizeof(buf)))
1155             ret = -EFAULT;
1156         else
1157             ret = mtdchar_readoob(file, mtd, buf.start,
1158                 buf.length, compat_ptr(buf.ptr),
1159                 &buf_user->start);
1160         break;
1161     }
1162 
1163     case BLKPG:
1164     {
1165         /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
1166         struct blkpg_compat_ioctl_arg __user *uarg = argp;
1167         struct blkpg_compat_ioctl_arg compat_arg;
1168         struct blkpg_ioctl_arg a;
1169 
1170         if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
1171             ret = -EFAULT;
1172             break;
1173         }
1174 
1175         memset(&a, 0, sizeof(a));
1176         a.op = compat_arg.op;
1177         a.flags = compat_arg.flags;
1178         a.datalen = compat_arg.datalen;
1179         a.data = compat_ptr(compat_arg.data);
1180 
1181         ret = mtdchar_blkpg_ioctl(mtd, &a);
1182         break;
1183     }
1184 
1185     default:
1186         ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1187     }
1188 
1189     mutex_unlock(&master->master.chrdev_lock);
1190 
1191     return ret;
1192 }
1193 
1194 #endif /* CONFIG_COMPAT */
1195 
1196 /*
1197  * try to determine where a shared mapping can be made
1198  * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1199  *   mappings)
1200  */
1201 #ifndef CONFIG_MMU
1202 static unsigned long mtdchar_get_unmapped_area(struct file *file,
1203                        unsigned long addr,
1204                        unsigned long len,
1205                        unsigned long pgoff,
1206                        unsigned long flags)
1207 {
1208     struct mtd_file_info *mfi = file->private_data;
1209     struct mtd_info *mtd = mfi->mtd;
1210     unsigned long offset;
1211     int ret;
1212 
1213     if (addr != 0)
1214         return (unsigned long) -EINVAL;
1215 
1216     if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1217         return (unsigned long) -EINVAL;
1218 
1219     offset = pgoff << PAGE_SHIFT;
1220     if (offset > mtd->size - len)
1221         return (unsigned long) -EINVAL;
1222 
1223     ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1224     return ret == -EOPNOTSUPP ? -ENODEV : ret;
1225 }
1226 
1227 static unsigned mtdchar_mmap_capabilities(struct file *file)
1228 {
1229     struct mtd_file_info *mfi = file->private_data;
1230 
1231     return mtd_mmap_capabilities(mfi->mtd);
1232 }
1233 #endif
1234 
1235 /*
1236  * set up a mapping for shared memory segments
1237  */
1238 static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1239 {
1240 #ifdef CONFIG_MMU
1241     struct mtd_file_info *mfi = file->private_data;
1242     struct mtd_info *mtd = mfi->mtd;
1243     struct map_info *map = mtd->priv;
1244 
1245         /* This is broken because it assumes the MTD device is map-based
1246        and that mtd->priv is a valid struct map_info.  It should be
1247        replaced with something that uses the mtd_get_unmapped_area()
1248        operation properly. */
1249     if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1250 #ifdef pgprot_noncached
1251         if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1252             vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1253 #endif
1254         return vm_iomap_memory(vma, map->phys, map->size);
1255     }
1256     return -ENODEV;
1257 #else
1258     return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1259 #endif
1260 }
1261 
1262 static const struct file_operations mtd_fops = {
1263     .owner      = THIS_MODULE,
1264     .llseek     = mtdchar_lseek,
1265     .read       = mtdchar_read,
1266     .write      = mtdchar_write,
1267     .unlocked_ioctl = mtdchar_unlocked_ioctl,
1268 #ifdef CONFIG_COMPAT
1269     .compat_ioctl   = mtdchar_compat_ioctl,
1270 #endif
1271     .open       = mtdchar_open,
1272     .release    = mtdchar_close,
1273     .mmap       = mtdchar_mmap,
1274 #ifndef CONFIG_MMU
1275     .get_unmapped_area = mtdchar_get_unmapped_area,
1276     .mmap_capabilities = mtdchar_mmap_capabilities,
1277 #endif
1278 };
1279 
1280 int __init init_mtdchar(void)
1281 {
1282     int ret;
1283 
1284     ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1285                    "mtd", &mtd_fops);
1286     if (ret < 0) {
1287         pr_err("Can't allocate major number %d for MTD\n",
1288                MTD_CHAR_MAJOR);
1289         return ret;
1290     }
1291 
1292     return ret;
1293 }
1294 
1295 void __exit cleanup_mtdchar(void)
1296 {
1297     __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1298 }
1299 
1300 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);