Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Block driver for media (i.e., flash cards)
0004  *
0005  * Copyright 2002 Hewlett-Packard Company
0006  * Copyright 2005-2008 Pierre Ossman
0007  *
0008  * Use consistent with the GNU GPL is permitted,
0009  * provided that this copyright notice is
0010  * preserved in its entirety in all copies and derived works.
0011  *
0012  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
0013  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
0014  * FITNESS FOR ANY PARTICULAR PURPOSE.
0015  *
0016  * Many thanks to Alessandro Rubini and Jonathan Corbet!
0017  *
0018  * Author:  Andrew Christian
0019  *          28 May 2002
0020  */
0021 #include <linux/moduleparam.h>
0022 #include <linux/module.h>
0023 #include <linux/init.h>
0024 
0025 #include <linux/kernel.h>
0026 #include <linux/fs.h>
0027 #include <linux/slab.h>
0028 #include <linux/errno.h>
0029 #include <linux/hdreg.h>
0030 #include <linux/kdev_t.h>
0031 #include <linux/kref.h>
0032 #include <linux/blkdev.h>
0033 #include <linux/cdev.h>
0034 #include <linux/mutex.h>
0035 #include <linux/scatterlist.h>
0036 #include <linux/string_helpers.h>
0037 #include <linux/delay.h>
0038 #include <linux/capability.h>
0039 #include <linux/compat.h>
0040 #include <linux/pm_runtime.h>
0041 #include <linux/idr.h>
0042 #include <linux/debugfs.h>
0043 
0044 #include <linux/mmc/ioctl.h>
0045 #include <linux/mmc/card.h>
0046 #include <linux/mmc/host.h>
0047 #include <linux/mmc/mmc.h>
0048 #include <linux/mmc/sd.h>
0049 
0050 #include <linux/uaccess.h>
0051 
0052 #include "queue.h"
0053 #include "block.h"
0054 #include "core.h"
0055 #include "card.h"
0056 #include "crypto.h"
0057 #include "host.h"
0058 #include "bus.h"
0059 #include "mmc_ops.h"
0060 #include "quirks.h"
0061 #include "sd_ops.h"
0062 
0063 MODULE_ALIAS("mmc:block");
0064 #ifdef MODULE_PARAM_PREFIX
0065 #undef MODULE_PARAM_PREFIX
0066 #endif
0067 #define MODULE_PARAM_PREFIX "mmcblk."
0068 
0069 /*
0070  * Set a 10 second timeout for polling write request busy state. Note, mmc core
0071  * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
0072  * second software timer to timeout the whole request, so 10 seconds should be
0073  * ample.
0074  */
0075 #define MMC_BLK_TIMEOUT_MS  (10 * 1000)
0076 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
0077 #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
0078 
0079 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
0080                   (rq_data_dir(req) == WRITE))
0081 static DEFINE_MUTEX(block_mutex);
0082 
0083 /*
0084  * The defaults come from config options but can be overriden by module
0085  * or bootarg options.
0086  */
0087 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
0088 
0089 /*
0090  * We've only got one major, so number of mmcblk devices is
0091  * limited to (1 << 20) / number of minors per device.  It is also
0092  * limited by the MAX_DEVICES below.
0093  */
0094 static int max_devices;
0095 
0096 #define MAX_DEVICES 256
0097 
0098 static DEFINE_IDA(mmc_blk_ida);
0099 static DEFINE_IDA(mmc_rpmb_ida);
0100 
0101 struct mmc_blk_busy_data {
0102     struct mmc_card *card;
0103     u32 status;
0104 };
0105 
0106 /*
0107  * There is one mmc_blk_data per slot.
0108  */
0109 struct mmc_blk_data {
0110     struct device   *parent;
0111     struct gendisk  *disk;
0112     struct mmc_queue queue;
0113     struct list_head part;
0114     struct list_head rpmbs;
0115 
0116     unsigned int    flags;
0117 #define MMC_BLK_CMD23   (1 << 0)    /* Can do SET_BLOCK_COUNT for multiblock */
0118 #define MMC_BLK_REL_WR  (1 << 1)    /* MMC Reliable write support */
0119 
0120     struct kref kref;
0121     unsigned int    read_only;
0122     unsigned int    part_type;
0123     unsigned int    reset_done;
0124 #define MMC_BLK_READ        BIT(0)
0125 #define MMC_BLK_WRITE       BIT(1)
0126 #define MMC_BLK_DISCARD     BIT(2)
0127 #define MMC_BLK_SECDISCARD  BIT(3)
0128 #define MMC_BLK_CQE_RECOVERY    BIT(4)
0129 #define MMC_BLK_TRIM        BIT(5)
0130 
0131     /*
0132      * Only set in main mmc_blk_data associated
0133      * with mmc_card with dev_set_drvdata, and keeps
0134      * track of the current selected device partition.
0135      */
0136     unsigned int    part_curr;
0137     int area_type;
0138 
0139     /* debugfs files (only in main mmc_blk_data) */
0140     struct dentry *status_dentry;
0141     struct dentry *ext_csd_dentry;
0142 };
0143 
0144 /* Device type for RPMB character devices */
0145 static dev_t mmc_rpmb_devt;
0146 
0147 /* Bus type for RPMB character devices */
0148 static struct bus_type mmc_rpmb_bus_type = {
0149     .name = "mmc_rpmb",
0150 };
0151 
0152 /**
0153  * struct mmc_rpmb_data - special RPMB device type for these areas
0154  * @dev: the device for the RPMB area
0155  * @chrdev: character device for the RPMB area
0156  * @id: unique device ID number
0157  * @part_index: partition index (0 on first)
0158  * @md: parent MMC block device
0159  * @node: list item, so we can put this device on a list
0160  */
0161 struct mmc_rpmb_data {
0162     struct device dev;
0163     struct cdev chrdev;
0164     int id;
0165     unsigned int part_index;
0166     struct mmc_blk_data *md;
0167     struct list_head node;
0168 };
0169 
0170 static DEFINE_MUTEX(open_lock);
0171 
0172 module_param(perdev_minors, int, 0444);
0173 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
0174 
0175 static inline int mmc_blk_part_switch(struct mmc_card *card,
0176                       unsigned int part_type);
0177 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
0178                    struct mmc_card *card,
0179                    int recovery_mode,
0180                    struct mmc_queue *mq);
0181 static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
0182 
0183 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
0184 {
0185     struct mmc_blk_data *md;
0186 
0187     mutex_lock(&open_lock);
0188     md = disk->private_data;
0189     if (md && !kref_get_unless_zero(&md->kref))
0190         md = NULL;
0191     mutex_unlock(&open_lock);
0192 
0193     return md;
0194 }
0195 
0196 static inline int mmc_get_devidx(struct gendisk *disk)
0197 {
0198     int devidx = disk->first_minor / perdev_minors;
0199     return devidx;
0200 }
0201 
0202 static void mmc_blk_kref_release(struct kref *ref)
0203 {
0204     struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref);
0205     int devidx;
0206 
0207     devidx = mmc_get_devidx(md->disk);
0208     ida_simple_remove(&mmc_blk_ida, devidx);
0209 
0210     mutex_lock(&open_lock);
0211     md->disk->private_data = NULL;
0212     mutex_unlock(&open_lock);
0213 
0214     put_disk(md->disk);
0215     kfree(md);
0216 }
0217 
0218 static void mmc_blk_put(struct mmc_blk_data *md)
0219 {
0220     kref_put(&md->kref, mmc_blk_kref_release);
0221 }
0222 
0223 static ssize_t power_ro_lock_show(struct device *dev,
0224         struct device_attribute *attr, char *buf)
0225 {
0226     int ret;
0227     struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
0228     struct mmc_card *card = md->queue.card;
0229     int locked = 0;
0230 
0231     if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
0232         locked = 2;
0233     else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
0234         locked = 1;
0235 
0236     ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
0237 
0238     mmc_blk_put(md);
0239 
0240     return ret;
0241 }
0242 
0243 static ssize_t power_ro_lock_store(struct device *dev,
0244         struct device_attribute *attr, const char *buf, size_t count)
0245 {
0246     int ret;
0247     struct mmc_blk_data *md, *part_md;
0248     struct mmc_queue *mq;
0249     struct request *req;
0250     unsigned long set;
0251 
0252     if (kstrtoul(buf, 0, &set))
0253         return -EINVAL;
0254 
0255     if (set != 1)
0256         return count;
0257 
0258     md = mmc_blk_get(dev_to_disk(dev));
0259     mq = &md->queue;
0260 
0261     /* Dispatch locking to the block layer */
0262     req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
0263     if (IS_ERR(req)) {
0264         count = PTR_ERR(req);
0265         goto out_put;
0266     }
0267     req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
0268     blk_execute_rq(req, false);
0269     ret = req_to_mmc_queue_req(req)->drv_op_result;
0270     blk_mq_free_request(req);
0271 
0272     if (!ret) {
0273         pr_info("%s: Locking boot partition ro until next power on\n",
0274             md->disk->disk_name);
0275         set_disk_ro(md->disk, 1);
0276 
0277         list_for_each_entry(part_md, &md->part, part)
0278             if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
0279                 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
0280                 set_disk_ro(part_md->disk, 1);
0281             }
0282     }
0283 out_put:
0284     mmc_blk_put(md);
0285     return count;
0286 }
0287 
0288 static DEVICE_ATTR(ro_lock_until_next_power_on, 0,
0289         power_ro_lock_show, power_ro_lock_store);
0290 
0291 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
0292                  char *buf)
0293 {
0294     int ret;
0295     struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
0296 
0297     ret = snprintf(buf, PAGE_SIZE, "%d\n",
0298                get_disk_ro(dev_to_disk(dev)) ^
0299                md->read_only);
0300     mmc_blk_put(md);
0301     return ret;
0302 }
0303 
0304 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
0305                   const char *buf, size_t count)
0306 {
0307     int ret;
0308     char *end;
0309     struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
0310     unsigned long set = simple_strtoul(buf, &end, 0);
0311     if (end == buf) {
0312         ret = -EINVAL;
0313         goto out;
0314     }
0315 
0316     set_disk_ro(dev_to_disk(dev), set || md->read_only);
0317     ret = count;
0318 out:
0319     mmc_blk_put(md);
0320     return ret;
0321 }
0322 
0323 static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store);
0324 
0325 static struct attribute *mmc_disk_attrs[] = {
0326     &dev_attr_force_ro.attr,
0327     &dev_attr_ro_lock_until_next_power_on.attr,
0328     NULL,
0329 };
0330 
0331 static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj,
0332         struct attribute *a, int n)
0333 {
0334     struct device *dev = kobj_to_dev(kobj);
0335     struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
0336     umode_t mode = a->mode;
0337 
0338     if (a == &dev_attr_ro_lock_until_next_power_on.attr &&
0339         (md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
0340         md->queue.card->ext_csd.boot_ro_lockable) {
0341         mode = S_IRUGO;
0342         if (!(md->queue.card->ext_csd.boot_ro_lock &
0343                 EXT_CSD_BOOT_WP_B_PWR_WP_DIS))
0344             mode |= S_IWUSR;
0345     }
0346 
0347     mmc_blk_put(md);
0348     return mode;
0349 }
0350 
0351 static const struct attribute_group mmc_disk_attr_group = {
0352     .is_visible = mmc_disk_attrs_is_visible,
0353     .attrs      = mmc_disk_attrs,
0354 };
0355 
0356 static const struct attribute_group *mmc_disk_attr_groups[] = {
0357     &mmc_disk_attr_group,
0358     NULL,
0359 };
0360 
0361 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
0362 {
0363     struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
0364     int ret = -ENXIO;
0365 
0366     mutex_lock(&block_mutex);
0367     if (md) {
0368         ret = 0;
0369         if ((mode & FMODE_WRITE) && md->read_only) {
0370             mmc_blk_put(md);
0371             ret = -EROFS;
0372         }
0373     }
0374     mutex_unlock(&block_mutex);
0375 
0376     return ret;
0377 }
0378 
0379 static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
0380 {
0381     struct mmc_blk_data *md = disk->private_data;
0382 
0383     mutex_lock(&block_mutex);
0384     mmc_blk_put(md);
0385     mutex_unlock(&block_mutex);
0386 }
0387 
0388 static int
0389 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
0390 {
0391     geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
0392     geo->heads = 4;
0393     geo->sectors = 16;
0394     return 0;
0395 }
0396 
0397 struct mmc_blk_ioc_data {
0398     struct mmc_ioc_cmd ic;
0399     unsigned char *buf;
0400     u64 buf_bytes;
0401     struct mmc_rpmb_data *rpmb;
0402 };
0403 
0404 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
0405     struct mmc_ioc_cmd __user *user)
0406 {
0407     struct mmc_blk_ioc_data *idata;
0408     int err;
0409 
0410     idata = kmalloc(sizeof(*idata), GFP_KERNEL);
0411     if (!idata) {
0412         err = -ENOMEM;
0413         goto out;
0414     }
0415 
0416     if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
0417         err = -EFAULT;
0418         goto idata_err;
0419     }
0420 
0421     idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
0422     if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
0423         err = -EOVERFLOW;
0424         goto idata_err;
0425     }
0426 
0427     if (!idata->buf_bytes) {
0428         idata->buf = NULL;
0429         return idata;
0430     }
0431 
0432     idata->buf = memdup_user((void __user *)(unsigned long)
0433                  idata->ic.data_ptr, idata->buf_bytes);
0434     if (IS_ERR(idata->buf)) {
0435         err = PTR_ERR(idata->buf);
0436         goto idata_err;
0437     }
0438 
0439     return idata;
0440 
0441 idata_err:
0442     kfree(idata);
0443 out:
0444     return ERR_PTR(err);
0445 }
0446 
0447 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
0448                       struct mmc_blk_ioc_data *idata)
0449 {
0450     struct mmc_ioc_cmd *ic = &idata->ic;
0451 
0452     if (copy_to_user(&(ic_ptr->response), ic->response,
0453              sizeof(ic->response)))
0454         return -EFAULT;
0455 
0456     if (!idata->ic.write_flag) {
0457         if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
0458                  idata->buf, idata->buf_bytes))
0459             return -EFAULT;
0460     }
0461 
0462     return 0;
0463 }
0464 
0465 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
0466                    struct mmc_blk_ioc_data *idata)
0467 {
0468     struct mmc_command cmd = {}, sbc = {};
0469     struct mmc_data data = {};
0470     struct mmc_request mrq = {};
0471     struct scatterlist sg;
0472     int err;
0473     unsigned int target_part;
0474 
0475     if (!card || !md || !idata)
0476         return -EINVAL;
0477 
0478     /*
0479      * The RPMB accesses comes in from the character device, so we
0480      * need to target these explicitly. Else we just target the
0481      * partition type for the block device the ioctl() was issued
0482      * on.
0483      */
0484     if (idata->rpmb) {
0485         /* Support multiple RPMB partitions */
0486         target_part = idata->rpmb->part_index;
0487         target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
0488     } else {
0489         target_part = md->part_type;
0490     }
0491 
0492     cmd.opcode = idata->ic.opcode;
0493     cmd.arg = idata->ic.arg;
0494     cmd.flags = idata->ic.flags;
0495 
0496     if (idata->buf_bytes) {
0497         data.sg = &sg;
0498         data.sg_len = 1;
0499         data.blksz = idata->ic.blksz;
0500         data.blocks = idata->ic.blocks;
0501 
0502         sg_init_one(data.sg, idata->buf, idata->buf_bytes);
0503 
0504         if (idata->ic.write_flag)
0505             data.flags = MMC_DATA_WRITE;
0506         else
0507             data.flags = MMC_DATA_READ;
0508 
0509         /* data.flags must already be set before doing this. */
0510         mmc_set_data_timeout(&data, card);
0511 
0512         /* Allow overriding the timeout_ns for empirical tuning. */
0513         if (idata->ic.data_timeout_ns)
0514             data.timeout_ns = idata->ic.data_timeout_ns;
0515 
0516         if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
0517             /*
0518              * Pretend this is a data transfer and rely on the
0519              * host driver to compute timeout.  When all host
0520              * drivers support cmd.cmd_timeout for R1B, this
0521              * can be changed to:
0522              *
0523              *     mrq.data = NULL;
0524              *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
0525              */
0526             data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
0527         }
0528 
0529         mrq.data = &data;
0530     }
0531 
0532     mrq.cmd = &cmd;
0533 
0534     err = mmc_blk_part_switch(card, target_part);
0535     if (err)
0536         return err;
0537 
0538     if (idata->ic.is_acmd) {
0539         err = mmc_app_cmd(card->host, card);
0540         if (err)
0541             return err;
0542     }
0543 
0544     if (idata->rpmb) {
0545         sbc.opcode = MMC_SET_BLOCK_COUNT;
0546         /*
0547          * We don't do any blockcount validation because the max size
0548          * may be increased by a future standard. We just copy the
0549          * 'Reliable Write' bit here.
0550          */
0551         sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
0552         sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
0553         mrq.sbc = &sbc;
0554     }
0555 
0556     if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
0557         (cmd.opcode == MMC_SWITCH))
0558         return mmc_sanitize(card, idata->ic.cmd_timeout_ms);
0559 
0560     mmc_wait_for_req(card->host, &mrq);
0561     memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
0562 
0563     if (cmd.error) {
0564         dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
0565                         __func__, cmd.error);
0566         return cmd.error;
0567     }
0568     if (data.error) {
0569         dev_err(mmc_dev(card->host), "%s: data error %d\n",
0570                         __func__, data.error);
0571         return data.error;
0572     }
0573 
0574     /*
0575      * Make sure the cache of the PARTITION_CONFIG register and
0576      * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
0577      * changed it successfully.
0578      */
0579     if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
0580         (cmd.opcode == MMC_SWITCH)) {
0581         struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
0582         u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
0583 
0584         /*
0585          * Update cache so the next mmc_blk_part_switch call operates
0586          * on up-to-date data.
0587          */
0588         card->ext_csd.part_config = value;
0589         main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
0590     }
0591 
0592     /*
0593      * Make sure to update CACHE_CTRL in case it was changed. The cache
0594      * will get turned back on if the card is re-initialized, e.g.
0595      * suspend/resume or hw reset in recovery.
0596      */
0597     if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
0598         (cmd.opcode == MMC_SWITCH)) {
0599         u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
0600 
0601         card->ext_csd.cache_ctrl = value;
0602     }
0603 
0604     /*
0605      * According to the SD specs, some commands require a delay after
0606      * issuing the command.
0607      */
0608     if (idata->ic.postsleep_min_us)
0609         usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
0610 
0611     if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
0612         /*
0613          * Ensure RPMB/R1B command has completed by polling CMD13 "Send Status". Here we
0614          * allow to override the default timeout value if a custom timeout is specified.
0615          */
0616         err = mmc_poll_for_busy(card, idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS,
0617                     false, MMC_BUSY_IO);
0618     }
0619 
0620     return err;
0621 }
0622 
0623 static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
0624                  struct mmc_ioc_cmd __user *ic_ptr,
0625                  struct mmc_rpmb_data *rpmb)
0626 {
0627     struct mmc_blk_ioc_data *idata;
0628     struct mmc_blk_ioc_data *idatas[1];
0629     struct mmc_queue *mq;
0630     struct mmc_card *card;
0631     int err = 0, ioc_err = 0;
0632     struct request *req;
0633 
0634     idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
0635     if (IS_ERR(idata))
0636         return PTR_ERR(idata);
0637     /* This will be NULL on non-RPMB ioctl():s */
0638     idata->rpmb = rpmb;
0639 
0640     card = md->queue.card;
0641     if (IS_ERR(card)) {
0642         err = PTR_ERR(card);
0643         goto cmd_done;
0644     }
0645 
0646     /*
0647      * Dispatch the ioctl() into the block request queue.
0648      */
0649     mq = &md->queue;
0650     req = blk_mq_alloc_request(mq->queue,
0651         idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
0652     if (IS_ERR(req)) {
0653         err = PTR_ERR(req);
0654         goto cmd_done;
0655     }
0656     idatas[0] = idata;
0657     req_to_mmc_queue_req(req)->drv_op =
0658         rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
0659     req_to_mmc_queue_req(req)->drv_op_data = idatas;
0660     req_to_mmc_queue_req(req)->ioc_count = 1;
0661     blk_execute_rq(req, false);
0662     ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
0663     err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
0664     blk_mq_free_request(req);
0665 
0666 cmd_done:
0667     kfree(idata->buf);
0668     kfree(idata);
0669     return ioc_err ? ioc_err : err;
0670 }
0671 
0672 static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
0673                    struct mmc_ioc_multi_cmd __user *user,
0674                    struct mmc_rpmb_data *rpmb)
0675 {
0676     struct mmc_blk_ioc_data **idata = NULL;
0677     struct mmc_ioc_cmd __user *cmds = user->cmds;
0678     struct mmc_card *card;
0679     struct mmc_queue *mq;
0680     int err = 0, ioc_err = 0;
0681     __u64 num_of_cmds;
0682     unsigned int i, n;
0683     struct request *req;
0684 
0685     if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
0686                sizeof(num_of_cmds)))
0687         return -EFAULT;
0688 
0689     if (!num_of_cmds)
0690         return 0;
0691 
0692     if (num_of_cmds > MMC_IOC_MAX_CMDS)
0693         return -EINVAL;
0694 
0695     n = num_of_cmds;
0696     idata = kcalloc(n, sizeof(*idata), GFP_KERNEL);
0697     if (!idata)
0698         return -ENOMEM;
0699 
0700     for (i = 0; i < n; i++) {
0701         idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
0702         if (IS_ERR(idata[i])) {
0703             err = PTR_ERR(idata[i]);
0704             n = i;
0705             goto cmd_err;
0706         }
0707         /* This will be NULL on non-RPMB ioctl():s */
0708         idata[i]->rpmb = rpmb;
0709     }
0710 
0711     card = md->queue.card;
0712     if (IS_ERR(card)) {
0713         err = PTR_ERR(card);
0714         goto cmd_err;
0715     }
0716 
0717 
0718     /*
0719      * Dispatch the ioctl()s into the block request queue.
0720      */
0721     mq = &md->queue;
0722     req = blk_mq_alloc_request(mq->queue,
0723         idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
0724     if (IS_ERR(req)) {
0725         err = PTR_ERR(req);
0726         goto cmd_err;
0727     }
0728     req_to_mmc_queue_req(req)->drv_op =
0729         rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
0730     req_to_mmc_queue_req(req)->drv_op_data = idata;
0731     req_to_mmc_queue_req(req)->ioc_count = n;
0732     blk_execute_rq(req, false);
0733     ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
0734 
0735     /* copy to user if data and response */
0736     for (i = 0; i < n && !err; i++)
0737         err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
0738 
0739     blk_mq_free_request(req);
0740 
0741 cmd_err:
0742     for (i = 0; i < n; i++) {
0743         kfree(idata[i]->buf);
0744         kfree(idata[i]);
0745     }
0746     kfree(idata);
0747     return ioc_err ? ioc_err : err;
0748 }
0749 
0750 static int mmc_blk_check_blkdev(struct block_device *bdev)
0751 {
0752     /*
0753      * The caller must have CAP_SYS_RAWIO, and must be calling this on the
0754      * whole block device, not on a partition.  This prevents overspray
0755      * between sibling partitions.
0756      */
0757     if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev))
0758         return -EPERM;
0759     return 0;
0760 }
0761 
0762 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
0763     unsigned int cmd, unsigned long arg)
0764 {
0765     struct mmc_blk_data *md;
0766     int ret;
0767 
0768     switch (cmd) {
0769     case MMC_IOC_CMD:
0770         ret = mmc_blk_check_blkdev(bdev);
0771         if (ret)
0772             return ret;
0773         md = mmc_blk_get(bdev->bd_disk);
0774         if (!md)
0775             return -EINVAL;
0776         ret = mmc_blk_ioctl_cmd(md,
0777                     (struct mmc_ioc_cmd __user *)arg,
0778                     NULL);
0779         mmc_blk_put(md);
0780         return ret;
0781     case MMC_IOC_MULTI_CMD:
0782         ret = mmc_blk_check_blkdev(bdev);
0783         if (ret)
0784             return ret;
0785         md = mmc_blk_get(bdev->bd_disk);
0786         if (!md)
0787             return -EINVAL;
0788         ret = mmc_blk_ioctl_multi_cmd(md,
0789                     (struct mmc_ioc_multi_cmd __user *)arg,
0790                     NULL);
0791         mmc_blk_put(md);
0792         return ret;
0793     default:
0794         return -EINVAL;
0795     }
0796 }
0797 
0798 #ifdef CONFIG_COMPAT
0799 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
0800     unsigned int cmd, unsigned long arg)
0801 {
0802     return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
0803 }
0804 #endif
0805 
0806 static int mmc_blk_alternative_gpt_sector(struct gendisk *disk,
0807                       sector_t *sector)
0808 {
0809     struct mmc_blk_data *md;
0810     int ret;
0811 
0812     md = mmc_blk_get(disk);
0813     if (!md)
0814         return -EINVAL;
0815 
0816     if (md->queue.card)
0817         ret = mmc_card_alternative_gpt_sector(md->queue.card, sector);
0818     else
0819         ret = -ENODEV;
0820 
0821     mmc_blk_put(md);
0822 
0823     return ret;
0824 }
0825 
0826 static const struct block_device_operations mmc_bdops = {
0827     .open           = mmc_blk_open,
0828     .release        = mmc_blk_release,
0829     .getgeo         = mmc_blk_getgeo,
0830     .owner          = THIS_MODULE,
0831     .ioctl          = mmc_blk_ioctl,
0832 #ifdef CONFIG_COMPAT
0833     .compat_ioctl       = mmc_blk_compat_ioctl,
0834 #endif
0835     .alternative_gpt_sector = mmc_blk_alternative_gpt_sector,
0836 };
0837 
0838 static int mmc_blk_part_switch_pre(struct mmc_card *card,
0839                    unsigned int part_type)
0840 {
0841     int ret = 0;
0842 
0843     if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
0844         if (card->ext_csd.cmdq_en) {
0845             ret = mmc_cmdq_disable(card);
0846             if (ret)
0847                 return ret;
0848         }
0849         mmc_retune_pause(card->host);
0850     }
0851 
0852     return ret;
0853 }
0854 
0855 static int mmc_blk_part_switch_post(struct mmc_card *card,
0856                     unsigned int part_type)
0857 {
0858     int ret = 0;
0859 
0860     if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) {
0861         mmc_retune_unpause(card->host);
0862         if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
0863             ret = mmc_cmdq_enable(card);
0864     }
0865 
0866     return ret;
0867 }
0868 
0869 static inline int mmc_blk_part_switch(struct mmc_card *card,
0870                       unsigned int part_type)
0871 {
0872     int ret = 0;
0873     struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
0874 
0875     if (main_md->part_curr == part_type)
0876         return 0;
0877 
0878     if (mmc_card_mmc(card)) {
0879         u8 part_config = card->ext_csd.part_config;
0880 
0881         ret = mmc_blk_part_switch_pre(card, part_type);
0882         if (ret)
0883             return ret;
0884 
0885         part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
0886         part_config |= part_type;
0887 
0888         ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0889                  EXT_CSD_PART_CONFIG, part_config,
0890                  card->ext_csd.part_time);
0891         if (ret) {
0892             mmc_blk_part_switch_post(card, part_type);
0893             return ret;
0894         }
0895 
0896         card->ext_csd.part_config = part_config;
0897 
0898         ret = mmc_blk_part_switch_post(card, main_md->part_curr);
0899     }
0900 
0901     main_md->part_curr = part_type;
0902     return ret;
0903 }
0904 
0905 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
0906 {
0907     int err;
0908     u32 result;
0909     __be32 *blocks;
0910 
0911     struct mmc_request mrq = {};
0912     struct mmc_command cmd = {};
0913     struct mmc_data data = {};
0914 
0915     struct scatterlist sg;
0916 
0917     cmd.opcode = MMC_APP_CMD;
0918     cmd.arg = card->rca << 16;
0919     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
0920 
0921     err = mmc_wait_for_cmd(card->host, &cmd, 0);
0922     if (err)
0923         return err;
0924     if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
0925         return -EIO;
0926 
0927     memset(&cmd, 0, sizeof(struct mmc_command));
0928 
0929     cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
0930     cmd.arg = 0;
0931     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
0932 
0933     data.blksz = 4;
0934     data.blocks = 1;
0935     data.flags = MMC_DATA_READ;
0936     data.sg = &sg;
0937     data.sg_len = 1;
0938     mmc_set_data_timeout(&data, card);
0939 
0940     mrq.cmd = &cmd;
0941     mrq.data = &data;
0942 
0943     blocks = kmalloc(4, GFP_KERNEL);
0944     if (!blocks)
0945         return -ENOMEM;
0946 
0947     sg_init_one(&sg, blocks, 4);
0948 
0949     mmc_wait_for_req(card->host, &mrq);
0950 
0951     result = ntohl(*blocks);
0952     kfree(blocks);
0953 
0954     if (cmd.error || data.error)
0955         return -EIO;
0956 
0957     *written_blocks = result;
0958 
0959     return 0;
0960 }
0961 
0962 static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
0963 {
0964     if (host->actual_clock)
0965         return host->actual_clock / 1000;
0966 
0967     /* Clock may be subject to a divisor, fudge it by a factor of 2. */
0968     if (host->ios.clock)
0969         return host->ios.clock / 2000;
0970 
0971     /* How can there be no clock */
0972     WARN_ON_ONCE(1);
0973     return 100; /* 100 kHz is minimum possible value */
0974 }
0975 
0976 static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
0977                         struct mmc_data *data)
0978 {
0979     unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
0980     unsigned int khz;
0981 
0982     if (data->timeout_clks) {
0983         khz = mmc_blk_clock_khz(host);
0984         ms += DIV_ROUND_UP(data->timeout_clks, khz);
0985     }
0986 
0987     return ms;
0988 }
0989 
0990 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
0991              int type)
0992 {
0993     int err;
0994 
0995     if (md->reset_done & type)
0996         return -EEXIST;
0997 
0998     md->reset_done |= type;
0999     err = mmc_hw_reset(host->card);
1000     /* Ensure we switch back to the correct partition */
1001     if (err) {
1002         struct mmc_blk_data *main_md =
1003             dev_get_drvdata(&host->card->dev);
1004         int part_err;
1005 
1006         main_md->part_curr = main_md->part_type;
1007         part_err = mmc_blk_part_switch(host->card, md->part_type);
1008         if (part_err) {
1009             /*
1010              * We have failed to get back into the correct
1011              * partition, so we need to abort the whole request.
1012              */
1013             return -ENODEV;
1014         }
1015     }
1016     return err;
1017 }
1018 
1019 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
1020 {
1021     md->reset_done &= ~type;
1022 }
1023 
1024 /*
1025  * The non-block commands come back from the block layer after it queued it and
1026  * processed it with all other requests and then they get issued in this
1027  * function.
1028  */
1029 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
1030 {
1031     struct mmc_queue_req *mq_rq;
1032     struct mmc_card *card = mq->card;
1033     struct mmc_blk_data *md = mq->blkdata;
1034     struct mmc_blk_ioc_data **idata;
1035     bool rpmb_ioctl;
1036     u8 **ext_csd;
1037     u32 status;
1038     int ret;
1039     int i;
1040 
1041     mq_rq = req_to_mmc_queue_req(req);
1042     rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
1043 
1044     switch (mq_rq->drv_op) {
1045     case MMC_DRV_OP_IOCTL:
1046         if (card->ext_csd.cmdq_en) {
1047             ret = mmc_cmdq_disable(card);
1048             if (ret)
1049                 break;
1050         }
1051         fallthrough;
1052     case MMC_DRV_OP_IOCTL_RPMB:
1053         idata = mq_rq->drv_op_data;
1054         for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
1055             ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
1056             if (ret)
1057                 break;
1058         }
1059         /* Always switch back to main area after RPMB access */
1060         if (rpmb_ioctl)
1061             mmc_blk_part_switch(card, 0);
1062         else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
1063             mmc_cmdq_enable(card);
1064         break;
1065     case MMC_DRV_OP_BOOT_WP:
1066         ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
1067                  card->ext_csd.boot_ro_lock |
1068                  EXT_CSD_BOOT_WP_B_PWR_WP_EN,
1069                  card->ext_csd.part_time);
1070         if (ret)
1071             pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
1072                    md->disk->disk_name, ret);
1073         else
1074             card->ext_csd.boot_ro_lock |=
1075                 EXT_CSD_BOOT_WP_B_PWR_WP_EN;
1076         break;
1077     case MMC_DRV_OP_GET_CARD_STATUS:
1078         ret = mmc_send_status(card, &status);
1079         if (!ret)
1080             ret = status;
1081         break;
1082     case MMC_DRV_OP_GET_EXT_CSD:
1083         ext_csd = mq_rq->drv_op_data;
1084         ret = mmc_get_ext_csd(card, ext_csd);
1085         break;
1086     default:
1087         pr_err("%s: unknown driver specific operation\n",
1088                md->disk->disk_name);
1089         ret = -EINVAL;
1090         break;
1091     }
1092     mq_rq->drv_op_result = ret;
1093     blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1094 }
1095 
1096 static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
1097                    int type, unsigned int erase_arg)
1098 {
1099     struct mmc_blk_data *md = mq->blkdata;
1100     struct mmc_card *card = md->queue.card;
1101     unsigned int from, nr;
1102     int err = 0;
1103     blk_status_t status = BLK_STS_OK;
1104 
1105     if (!mmc_can_erase(card)) {
1106         status = BLK_STS_NOTSUPP;
1107         goto fail;
1108     }
1109 
1110     from = blk_rq_pos(req);
1111     nr = blk_rq_sectors(req);
1112 
1113     do {
1114         err = 0;
1115         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1116             err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1117                      INAND_CMD38_ARG_EXT_CSD,
1118                      erase_arg == MMC_TRIM_ARG ?
1119                      INAND_CMD38_ARG_TRIM :
1120                      INAND_CMD38_ARG_ERASE,
1121                      card->ext_csd.generic_cmd6_time);
1122         }
1123         if (!err)
1124             err = mmc_erase(card, from, nr, erase_arg);
1125     } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
1126     if (err)
1127         status = BLK_STS_IOERR;
1128     else
1129         mmc_blk_reset_success(md, type);
1130 fail:
1131     blk_mq_end_request(req, status);
1132 }
1133 
1134 static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
1135 {
1136     mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
1137 }
1138 
1139 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
1140 {
1141     struct mmc_blk_data *md = mq->blkdata;
1142     struct mmc_card *card = md->queue.card;
1143 
1144     mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
1145 }
1146 
1147 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
1148                        struct request *req)
1149 {
1150     struct mmc_blk_data *md = mq->blkdata;
1151     struct mmc_card *card = md->queue.card;
1152     unsigned int from, nr, arg;
1153     int err = 0, type = MMC_BLK_SECDISCARD;
1154     blk_status_t status = BLK_STS_OK;
1155 
1156     if (!(mmc_can_secure_erase_trim(card))) {
1157         status = BLK_STS_NOTSUPP;
1158         goto out;
1159     }
1160 
1161     from = blk_rq_pos(req);
1162     nr = blk_rq_sectors(req);
1163 
1164     if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
1165         arg = MMC_SECURE_TRIM1_ARG;
1166     else
1167         arg = MMC_SECURE_ERASE_ARG;
1168 
1169 retry:
1170     if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1171         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1172                  INAND_CMD38_ARG_EXT_CSD,
1173                  arg == MMC_SECURE_TRIM1_ARG ?
1174                  INAND_CMD38_ARG_SECTRIM1 :
1175                  INAND_CMD38_ARG_SECERASE,
1176                  card->ext_csd.generic_cmd6_time);
1177         if (err)
1178             goto out_retry;
1179     }
1180 
1181     err = mmc_erase(card, from, nr, arg);
1182     if (err == -EIO)
1183         goto out_retry;
1184     if (err) {
1185         status = BLK_STS_IOERR;
1186         goto out;
1187     }
1188 
1189     if (arg == MMC_SECURE_TRIM1_ARG) {
1190         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
1191             err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1192                      INAND_CMD38_ARG_EXT_CSD,
1193                      INAND_CMD38_ARG_SECTRIM2,
1194                      card->ext_csd.generic_cmd6_time);
1195             if (err)
1196                 goto out_retry;
1197         }
1198 
1199         err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
1200         if (err == -EIO)
1201             goto out_retry;
1202         if (err) {
1203             status = BLK_STS_IOERR;
1204             goto out;
1205         }
1206     }
1207 
1208 out_retry:
1209     if (err && !mmc_blk_reset(md, card->host, type))
1210         goto retry;
1211     if (!err)
1212         mmc_blk_reset_success(md, type);
1213 out:
1214     blk_mq_end_request(req, status);
1215 }
1216 
1217 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
1218 {
1219     struct mmc_blk_data *md = mq->blkdata;
1220     struct mmc_card *card = md->queue.card;
1221     int ret = 0;
1222 
1223     ret = mmc_flush_cache(card->host);
1224     blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
1225 }
1226 
1227 /*
1228  * Reformat current write as a reliable write, supporting
1229  * both legacy and the enhanced reliable write MMC cards.
1230  * In each transfer we'll handle only as much as a single
1231  * reliable write can handle, thus finish the request in
1232  * partial completions.
1233  */
1234 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1235                     struct mmc_card *card,
1236                     struct request *req)
1237 {
1238     if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
1239         /* Legacy mode imposes restrictions on transfers. */
1240         if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
1241             brq->data.blocks = 1;
1242 
1243         if (brq->data.blocks > card->ext_csd.rel_sectors)
1244             brq->data.blocks = card->ext_csd.rel_sectors;
1245         else if (brq->data.blocks < card->ext_csd.rel_sectors)
1246             brq->data.blocks = 1;
1247     }
1248 }
1249 
1250 #define CMD_ERRORS_EXCL_OOR                     \
1251     (R1_ADDRESS_ERROR | /* Misaligned address */        \
1252      R1_BLOCK_LEN_ERROR |   /* Transferred block length incorrect */\
1253      R1_WP_VIOLATION |  /* Tried to write to protected block */ \
1254      R1_CARD_ECC_FAILED |   /* Card ECC failed */           \
1255      R1_CC_ERROR |      /* Card controller error */     \
1256      R1_ERROR)      /* General/unknown error */
1257 
1258 #define CMD_ERRORS                          \
1259     (CMD_ERRORS_EXCL_OOR |                      \
1260      R1_OUT_OF_RANGE)   /* Command argument out of range */ \
1261 
1262 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1263 {
1264     u32 val;
1265 
1266     /*
1267      * Per the SD specification(physical layer version 4.10)[1],
1268      * section 4.3.3, it explicitly states that "When the last
1269      * block of user area is read using CMD18, the host should
1270      * ignore OUT_OF_RANGE error that may occur even the sequence
1271      * is correct". And JESD84-B51 for eMMC also has a similar
1272      * statement on section 6.8.3.
1273      *
1274      * Multiple block read/write could be done by either predefined
1275      * method, namely CMD23, or open-ending mode. For open-ending mode,
1276      * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1277      *
1278      * However the spec[1] doesn't tell us whether we should also
1279      * ignore that for predefined method. But per the spec[1], section
1280      * 4.15 Set Block Count Command, it says"If illegal block count
1281      * is set, out of range error will be indicated during read/write
1282      * operation (For example, data transfer is stopped at user area
1283      * boundary)." In another word, we could expect a out of range error
1284      * in the response for the following CMD18/25. And if argument of
1285      * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1286      * we could also expect to get a -ETIMEDOUT or any error number from
1287      * the host drivers due to missing data response(for write)/data(for
1288      * read), as the cards will stop the data transfer by itself per the
1289      * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1290      */
1291 
1292     if (!brq->stop.error) {
1293         bool oor_with_open_end;
1294         /* If there is no error yet, check R1 response */
1295 
1296         val = brq->stop.resp[0] & CMD_ERRORS;
1297         oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1298 
1299         if (val && !oor_with_open_end)
1300             brq->stop.error = -EIO;
1301     }
1302 }
1303 
1304 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1305                   int recovery_mode, bool *do_rel_wr_p,
1306                   bool *do_data_tag_p)
1307 {
1308     struct mmc_blk_data *md = mq->blkdata;
1309     struct mmc_card *card = md->queue.card;
1310     struct mmc_blk_request *brq = &mqrq->brq;
1311     struct request *req = mmc_queue_req_to_req(mqrq);
1312     bool do_rel_wr, do_data_tag;
1313 
1314     /*
1315      * Reliable writes are used to implement Forced Unit Access and
1316      * are supported only on MMCs.
1317      */
1318     do_rel_wr = (req->cmd_flags & REQ_FUA) &&
1319             rq_data_dir(req) == WRITE &&
1320             (md->flags & MMC_BLK_REL_WR);
1321 
1322     memset(brq, 0, sizeof(struct mmc_blk_request));
1323 
1324     mmc_crypto_prepare_req(mqrq);
1325 
1326     brq->mrq.data = &brq->data;
1327     brq->mrq.tag = req->tag;
1328 
1329     brq->stop.opcode = MMC_STOP_TRANSMISSION;
1330     brq->stop.arg = 0;
1331 
1332     if (rq_data_dir(req) == READ) {
1333         brq->data.flags = MMC_DATA_READ;
1334         brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1335     } else {
1336         brq->data.flags = MMC_DATA_WRITE;
1337         brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1338     }
1339 
1340     brq->data.blksz = 512;
1341     brq->data.blocks = blk_rq_sectors(req);
1342     brq->data.blk_addr = blk_rq_pos(req);
1343 
1344     /*
1345      * The command queue supports 2 priorities: "high" (1) and "simple" (0).
1346      * The eMMC will give "high" priority tasks priority over "simple"
1347      * priority tasks. Here we always set "simple" priority by not setting
1348      * MMC_DATA_PRIO.
1349      */
1350 
1351     /*
1352      * The block layer doesn't support all sector count
1353      * restrictions, so we need to be prepared for too big
1354      * requests.
1355      */
1356     if (brq->data.blocks > card->host->max_blk_count)
1357         brq->data.blocks = card->host->max_blk_count;
1358 
1359     if (brq->data.blocks > 1) {
1360         /*
1361          * Some SD cards in SPI mode return a CRC error or even lock up
1362          * completely when trying to read the last block using a
1363          * multiblock read command.
1364          */
1365         if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
1366             (blk_rq_pos(req) + blk_rq_sectors(req) ==
1367              get_capacity(md->disk)))
1368             brq->data.blocks--;
1369 
1370         /*
1371          * After a read error, we redo the request one (native) sector
1372          * at a time in order to accurately determine which
1373          * sectors can be read successfully.
1374          */
1375         if (recovery_mode)
1376             brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
1377 
1378         /*
1379          * Some controllers have HW issues while operating
1380          * in multiple I/O mode
1381          */
1382         if (card->host->ops->multi_io_quirk)
1383             brq->data.blocks = card->host->ops->multi_io_quirk(card,
1384                         (rq_data_dir(req) == READ) ?
1385                         MMC_DATA_READ : MMC_DATA_WRITE,
1386                         brq->data.blocks);
1387     }
1388 
1389     if (do_rel_wr) {
1390         mmc_apply_rel_rw(brq, card, req);
1391         brq->data.flags |= MMC_DATA_REL_WR;
1392     }
1393 
1394     /*
1395      * Data tag is used only during writing meta data to speed
1396      * up write and any subsequent read of this meta data
1397      */
1398     do_data_tag = card->ext_csd.data_tag_unit_size &&
1399               (req->cmd_flags & REQ_META) &&
1400               (rq_data_dir(req) == WRITE) &&
1401               ((brq->data.blocks * brq->data.blksz) >=
1402                card->ext_csd.data_tag_unit_size);
1403 
1404     if (do_data_tag)
1405         brq->data.flags |= MMC_DATA_DAT_TAG;
1406 
1407     mmc_set_data_timeout(&brq->data, card);
1408 
1409     brq->data.sg = mqrq->sg;
1410     brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1411 
1412     /*
1413      * Adjust the sg list so it is the same size as the
1414      * request.
1415      */
1416     if (brq->data.blocks != blk_rq_sectors(req)) {
1417         int i, data_size = brq->data.blocks << 9;
1418         struct scatterlist *sg;
1419 
1420         for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1421             data_size -= sg->length;
1422             if (data_size <= 0) {
1423                 sg->length += data_size;
1424                 i++;
1425                 break;
1426             }
1427         }
1428         brq->data.sg_len = i;
1429     }
1430 
1431     if (do_rel_wr_p)
1432         *do_rel_wr_p = do_rel_wr;
1433 
1434     if (do_data_tag_p)
1435         *do_data_tag_p = do_data_tag;
1436 }
1437 
1438 #define MMC_CQE_RETRIES 2
1439 
1440 static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
1441 {
1442     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1443     struct mmc_request *mrq = &mqrq->brq.mrq;
1444     struct request_queue *q = req->q;
1445     struct mmc_host *host = mq->card->host;
1446     enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
1447     unsigned long flags;
1448     bool put_card;
1449     int err;
1450 
1451     mmc_cqe_post_req(host, mrq);
1452 
1453     if (mrq->cmd && mrq->cmd->error)
1454         err = mrq->cmd->error;
1455     else if (mrq->data && mrq->data->error)
1456         err = mrq->data->error;
1457     else
1458         err = 0;
1459 
1460     if (err) {
1461         if (mqrq->retries++ < MMC_CQE_RETRIES)
1462             blk_mq_requeue_request(req, true);
1463         else
1464             blk_mq_end_request(req, BLK_STS_IOERR);
1465     } else if (mrq->data) {
1466         if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
1467             blk_mq_requeue_request(req, true);
1468         else
1469             __blk_mq_end_request(req, BLK_STS_OK);
1470     } else {
1471         blk_mq_end_request(req, BLK_STS_OK);
1472     }
1473 
1474     spin_lock_irqsave(&mq->lock, flags);
1475 
1476     mq->in_flight[issue_type] -= 1;
1477 
1478     put_card = (mmc_tot_in_flight(mq) == 0);
1479 
1480     mmc_cqe_check_busy(mq);
1481 
1482     spin_unlock_irqrestore(&mq->lock, flags);
1483 
1484     if (!mq->cqe_busy)
1485         blk_mq_run_hw_queues(q, true);
1486 
1487     if (put_card)
1488         mmc_put_card(mq->card, &mq->ctx);
1489 }
1490 
1491 void mmc_blk_cqe_recovery(struct mmc_queue *mq)
1492 {
1493     struct mmc_card *card = mq->card;
1494     struct mmc_host *host = card->host;
1495     int err;
1496 
1497     pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
1498 
1499     err = mmc_cqe_recovery(host);
1500     if (err)
1501         mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
1502     mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
1503 
1504     pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
1505 }
1506 
1507 static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
1508 {
1509     struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
1510                           brq.mrq);
1511     struct request *req = mmc_queue_req_to_req(mqrq);
1512     struct request_queue *q = req->q;
1513     struct mmc_queue *mq = q->queuedata;
1514 
1515     /*
1516      * Block layer timeouts race with completions which means the normal
1517      * completion path cannot be used during recovery.
1518      */
1519     if (mq->in_recovery)
1520         mmc_blk_cqe_complete_rq(mq, req);
1521     else if (likely(!blk_should_fake_timeout(req->q)))
1522         blk_mq_complete_request(req);
1523 }
1524 
1525 static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
1526 {
1527     mrq->done       = mmc_blk_cqe_req_done;
1528     mrq->recovery_notifier  = mmc_cqe_recovery_notifier;
1529 
1530     return mmc_cqe_start_req(host, mrq);
1531 }
1532 
1533 static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
1534                          struct request *req)
1535 {
1536     struct mmc_blk_request *brq = &mqrq->brq;
1537 
1538     memset(brq, 0, sizeof(*brq));
1539 
1540     brq->mrq.cmd = &brq->cmd;
1541     brq->mrq.tag = req->tag;
1542 
1543     return &brq->mrq;
1544 }
1545 
1546 static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
1547 {
1548     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1549     struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
1550 
1551     mrq->cmd->opcode = MMC_SWITCH;
1552     mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
1553             (EXT_CSD_FLUSH_CACHE << 16) |
1554             (1 << 8) |
1555             EXT_CSD_CMD_SET_NORMAL;
1556     mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
1557 
1558     return mmc_blk_cqe_start_req(mq->card->host, mrq);
1559 }
1560 
1561 static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1562 {
1563     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1564     struct mmc_host *host = mq->card->host;
1565     int err;
1566 
1567     mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
1568     mqrq->brq.mrq.done = mmc_blk_hsq_req_done;
1569     mmc_pre_req(host, &mqrq->brq.mrq);
1570 
1571     err = mmc_cqe_start_req(host, &mqrq->brq.mrq);
1572     if (err)
1573         mmc_post_req(host, &mqrq->brq.mrq, err);
1574 
1575     return err;
1576 }
1577 
1578 static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1579 {
1580     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1581     struct mmc_host *host = mq->card->host;
1582 
1583     if (host->hsq_enabled)
1584         return mmc_blk_hsq_issue_rw_rq(mq, req);
1585 
1586     mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
1587 
1588     return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
1589 }
1590 
1591 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
1592                    struct mmc_card *card,
1593                    int recovery_mode,
1594                    struct mmc_queue *mq)
1595 {
1596     u32 readcmd, writecmd;
1597     struct mmc_blk_request *brq = &mqrq->brq;
1598     struct request *req = mmc_queue_req_to_req(mqrq);
1599     struct mmc_blk_data *md = mq->blkdata;
1600     bool do_rel_wr, do_data_tag;
1601 
1602     mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
1603 
1604     brq->mrq.cmd = &brq->cmd;
1605 
1606     brq->cmd.arg = blk_rq_pos(req);
1607     if (!mmc_card_blockaddr(card))
1608         brq->cmd.arg <<= 9;
1609     brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
1610 
1611     if (brq->data.blocks > 1 || do_rel_wr) {
1612         /* SPI multiblock writes terminate using a special
1613          * token, not a STOP_TRANSMISSION request.
1614          */
1615         if (!mmc_host_is_spi(card->host) ||
1616             rq_data_dir(req) == READ)
1617             brq->mrq.stop = &brq->stop;
1618         readcmd = MMC_READ_MULTIPLE_BLOCK;
1619         writecmd = MMC_WRITE_MULTIPLE_BLOCK;
1620     } else {
1621         brq->mrq.stop = NULL;
1622         readcmd = MMC_READ_SINGLE_BLOCK;
1623         writecmd = MMC_WRITE_BLOCK;
1624     }
1625     brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
1626 
1627     /*
1628      * Pre-defined multi-block transfers are preferable to
1629      * open ended-ones (and necessary for reliable writes).
1630      * However, it is not sufficient to just send CMD23,
1631      * and avoid the final CMD12, as on an error condition
1632      * CMD12 (stop) needs to be sent anyway. This, coupled
1633      * with Auto-CMD23 enhancements provided by some
1634      * hosts, means that the complexity of dealing
1635      * with this is best left to the host. If CMD23 is
1636      * supported by card and host, we'll fill sbc in and let
1637      * the host deal with handling it correctly. This means
1638      * that for hosts that don't expose MMC_CAP_CMD23, no
1639      * change of behavior will be observed.
1640      *
1641      * N.B: Some MMC cards experience perf degradation.
1642      * We'll avoid using CMD23-bounded multiblock writes for
1643      * these, while retaining features like reliable writes.
1644      */
1645     if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
1646         (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
1647          do_data_tag)) {
1648         brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1649         brq->sbc.arg = brq->data.blocks |
1650             (do_rel_wr ? (1 << 31) : 0) |
1651             (do_data_tag ? (1 << 29) : 0);
1652         brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1653         brq->mrq.sbc = &brq->sbc;
1654     }
1655 }
1656 
1657 #define MMC_MAX_RETRIES     5
1658 #define MMC_DATA_RETRIES    2
1659 #define MMC_NO_RETRIES      (MMC_MAX_RETRIES + 1)
1660 
1661 static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
1662 {
1663     struct mmc_command cmd = {
1664         .opcode = MMC_STOP_TRANSMISSION,
1665         .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
1666         /* Some hosts wait for busy anyway, so provide a busy timeout */
1667         .busy_timeout = timeout,
1668     };
1669 
1670     return mmc_wait_for_cmd(card->host, &cmd, 5);
1671 }
1672 
1673 static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
1674 {
1675     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1676     struct mmc_blk_request *brq = &mqrq->brq;
1677     unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
1678     int err;
1679 
1680     mmc_retune_hold_now(card->host);
1681 
1682     mmc_blk_send_stop(card, timeout);
1683 
1684     err = mmc_poll_for_busy(card, timeout, false, MMC_BUSY_IO);
1685 
1686     mmc_retune_release(card->host);
1687 
1688     return err;
1689 }
1690 
1691 #define MMC_READ_SINGLE_RETRIES 2
1692 
1693 /* Single (native) sector read during recovery */
1694 static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
1695 {
1696     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1697     struct mmc_request *mrq = &mqrq->brq.mrq;
1698     struct mmc_card *card = mq->card;
1699     struct mmc_host *host = card->host;
1700     blk_status_t error = BLK_STS_OK;
1701     size_t bytes_per_read = queue_physical_block_size(mq->queue);
1702 
1703     do {
1704         u32 status;
1705         int err;
1706         int retries = 0;
1707 
1708         while (retries++ <= MMC_READ_SINGLE_RETRIES) {
1709             mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
1710 
1711             mmc_wait_for_req(host, mrq);
1712 
1713             err = mmc_send_status(card, &status);
1714             if (err)
1715                 goto error_exit;
1716 
1717             if (!mmc_host_is_spi(host) &&
1718                 !mmc_ready_for_data(status)) {
1719                 err = mmc_blk_fix_state(card, req);
1720                 if (err)
1721                     goto error_exit;
1722             }
1723 
1724             if (!mrq->cmd->error)
1725                 break;
1726         }
1727 
1728         if (mrq->cmd->error ||
1729             mrq->data->error ||
1730             (!mmc_host_is_spi(host) &&
1731              (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
1732             error = BLK_STS_IOERR;
1733         else
1734             error = BLK_STS_OK;
1735 
1736     } while (blk_update_request(req, error, bytes_per_read));
1737 
1738     return;
1739 
1740 error_exit:
1741     mrq->data->bytes_xfered = 0;
1742     blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
1743     /* Let it try the remaining request again */
1744     if (mqrq->retries > MMC_MAX_RETRIES - 1)
1745         mqrq->retries = MMC_MAX_RETRIES - 1;
1746 }
1747 
1748 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
1749 {
1750     return !!brq->mrq.sbc;
1751 }
1752 
1753 static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
1754 {
1755     return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
1756 }
1757 
1758 /*
1759  * Check for errors the host controller driver might not have seen such as
1760  * response mode errors or invalid card state.
1761  */
1762 static bool mmc_blk_status_error(struct request *req, u32 status)
1763 {
1764     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1765     struct mmc_blk_request *brq = &mqrq->brq;
1766     struct mmc_queue *mq = req->q->queuedata;
1767     u32 stop_err_bits;
1768 
1769     if (mmc_host_is_spi(mq->card->host))
1770         return false;
1771 
1772     stop_err_bits = mmc_blk_stop_err_bits(brq);
1773 
1774     return brq->cmd.resp[0]  & CMD_ERRORS    ||
1775            brq->stop.resp[0] & stop_err_bits ||
1776            status            & stop_err_bits ||
1777            (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status));
1778 }
1779 
1780 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
1781 {
1782     return !brq->sbc.error && !brq->cmd.error &&
1783            !(brq->cmd.resp[0] & CMD_ERRORS);
1784 }
1785 
1786 /*
1787  * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
1788  * policy:
1789  * 1. A request that has transferred at least some data is considered
1790  * successful and will be requeued if there is remaining data to
1791  * transfer.
1792  * 2. Otherwise the number of retries is incremented and the request
1793  * will be requeued if there are remaining retries.
1794  * 3. Otherwise the request will be errored out.
1795  * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
1796  * mqrq->retries. So there are only 4 possible actions here:
1797  *  1. do not accept the bytes_xfered value i.e. set it to zero
1798  *  2. change mqrq->retries to determine the number of retries
1799  *  3. try to reset the card
1800  *  4. read one sector at a time
1801  */
1802 static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
1803 {
1804     int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1805     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1806     struct mmc_blk_request *brq = &mqrq->brq;
1807     struct mmc_blk_data *md = mq->blkdata;
1808     struct mmc_card *card = mq->card;
1809     u32 status;
1810     u32 blocks;
1811     int err;
1812 
1813     /*
1814      * Some errors the host driver might not have seen. Set the number of
1815      * bytes transferred to zero in that case.
1816      */
1817     err = __mmc_send_status(card, &status, 0);
1818     if (err || mmc_blk_status_error(req, status))
1819         brq->data.bytes_xfered = 0;
1820 
1821     mmc_retune_release(card->host);
1822 
1823     /*
1824      * Try again to get the status. This also provides an opportunity for
1825      * re-tuning.
1826      */
1827     if (err)
1828         err = __mmc_send_status(card, &status, 0);
1829 
1830     /*
1831      * Nothing more to do after the number of bytes transferred has been
1832      * updated and there is no card.
1833      */
1834     if (err && mmc_detect_card_removed(card->host))
1835         return;
1836 
1837     /* Try to get back to "tran" state */
1838     if (!mmc_host_is_spi(mq->card->host) &&
1839         (err || !mmc_ready_for_data(status)))
1840         err = mmc_blk_fix_state(mq->card, req);
1841 
1842     /*
1843      * Special case for SD cards where the card might record the number of
1844      * blocks written.
1845      */
1846     if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
1847         rq_data_dir(req) == WRITE) {
1848         if (mmc_sd_num_wr_blocks(card, &blocks))
1849             brq->data.bytes_xfered = 0;
1850         else
1851             brq->data.bytes_xfered = blocks << 9;
1852     }
1853 
1854     /* Reset if the card is in a bad state */
1855     if (!mmc_host_is_spi(mq->card->host) &&
1856         err && mmc_blk_reset(md, card->host, type)) {
1857         pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
1858         mqrq->retries = MMC_NO_RETRIES;
1859         return;
1860     }
1861 
1862     /*
1863      * If anything was done, just return and if there is anything remaining
1864      * on the request it will get requeued.
1865      */
1866     if (brq->data.bytes_xfered)
1867         return;
1868 
1869     /* Reset before last retry */
1870     if (mqrq->retries + 1 == MMC_MAX_RETRIES)
1871         mmc_blk_reset(md, card->host, type);
1872 
1873     /* Command errors fail fast, so use all MMC_MAX_RETRIES */
1874     if (brq->sbc.error || brq->cmd.error)
1875         return;
1876 
1877     /* Reduce the remaining retries for data errors */
1878     if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
1879         mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
1880         return;
1881     }
1882 
1883     if (rq_data_dir(req) == READ && brq->data.blocks >
1884             queue_physical_block_size(mq->queue) >> 9) {
1885         /* Read one (native) sector at a time */
1886         mmc_blk_read_single(mq, req);
1887         return;
1888     }
1889 }
1890 
1891 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
1892 {
1893     mmc_blk_eval_resp_error(brq);
1894 
1895     return brq->sbc.error || brq->cmd.error || brq->stop.error ||
1896            brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
1897 }
1898 
1899 static int mmc_spi_err_check(struct mmc_card *card)
1900 {
1901     u32 status = 0;
1902     int err;
1903 
1904     /*
1905      * SPI does not have a TRAN state we have to wait on, instead the
1906      * card is ready again when it no longer holds the line LOW.
1907      * We still have to ensure two things here before we know the write
1908      * was successful:
1909      * 1. The card has not disconnected during busy and we actually read our
1910      * own pull-up, thinking it was still connected, so ensure it
1911      * still responds.
1912      * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
1913      * just reconnected card after being disconnected during busy.
1914      */
1915     err = __mmc_send_status(card, &status, 0);
1916     if (err)
1917         return err;
1918     /* All R1 and R2 bits of SPI are errors in our case */
1919     if (status)
1920         return -EIO;
1921     return 0;
1922 }
1923 
1924 static int mmc_blk_busy_cb(void *cb_data, bool *busy)
1925 {
1926     struct mmc_blk_busy_data *data = cb_data;
1927     u32 status = 0;
1928     int err;
1929 
1930     err = mmc_send_status(data->card, &status);
1931     if (err)
1932         return err;
1933 
1934     /* Accumulate response error bits. */
1935     data->status |= status;
1936 
1937     *busy = !mmc_ready_for_data(status);
1938     return 0;
1939 }
1940 
1941 static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
1942 {
1943     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1944     struct mmc_blk_busy_data cb_data;
1945     int err;
1946 
1947     if (rq_data_dir(req) == READ)
1948         return 0;
1949 
1950     if (mmc_host_is_spi(card->host)) {
1951         err = mmc_spi_err_check(card);
1952         if (err)
1953             mqrq->brq.data.bytes_xfered = 0;
1954         return err;
1955     }
1956 
1957     cb_data.card = card;
1958     cb_data.status = 0;
1959     err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
1960                   &mmc_blk_busy_cb, &cb_data);
1961 
1962     /*
1963      * Do not assume data transferred correctly if there are any error bits
1964      * set.
1965      */
1966     if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
1967         mqrq->brq.data.bytes_xfered = 0;
1968         err = err ? err : -EIO;
1969     }
1970 
1971     /* Copy the exception bit so it will be seen later on */
1972     if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
1973         mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
1974 
1975     return err;
1976 }
1977 
1978 static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
1979                         struct request *req)
1980 {
1981     int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1982 
1983     mmc_blk_reset_success(mq->blkdata, type);
1984 }
1985 
1986 static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
1987 {
1988     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
1989     unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
1990 
1991     if (nr_bytes) {
1992         if (blk_update_request(req, BLK_STS_OK, nr_bytes))
1993             blk_mq_requeue_request(req, true);
1994         else
1995             __blk_mq_end_request(req, BLK_STS_OK);
1996     } else if (!blk_rq_bytes(req)) {
1997         __blk_mq_end_request(req, BLK_STS_IOERR);
1998     } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
1999         blk_mq_requeue_request(req, true);
2000     } else {
2001         if (mmc_card_removed(mq->card))
2002             req->rq_flags |= RQF_QUIET;
2003         blk_mq_end_request(req, BLK_STS_IOERR);
2004     }
2005 }
2006 
2007 static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
2008                     struct mmc_queue_req *mqrq)
2009 {
2010     return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
2011            (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
2012         mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
2013 }
2014 
2015 static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
2016                  struct mmc_queue_req *mqrq)
2017 {
2018     if (mmc_blk_urgent_bkops_needed(mq, mqrq))
2019         mmc_run_bkops(mq->card);
2020 }
2021 
2022 static void mmc_blk_hsq_req_done(struct mmc_request *mrq)
2023 {
2024     struct mmc_queue_req *mqrq =
2025         container_of(mrq, struct mmc_queue_req, brq.mrq);
2026     struct request *req = mmc_queue_req_to_req(mqrq);
2027     struct request_queue *q = req->q;
2028     struct mmc_queue *mq = q->queuedata;
2029     struct mmc_host *host = mq->card->host;
2030     unsigned long flags;
2031 
2032     if (mmc_blk_rq_error(&mqrq->brq) ||
2033         mmc_blk_urgent_bkops_needed(mq, mqrq)) {
2034         spin_lock_irqsave(&mq->lock, flags);
2035         mq->recovery_needed = true;
2036         mq->recovery_req = req;
2037         spin_unlock_irqrestore(&mq->lock, flags);
2038 
2039         host->cqe_ops->cqe_recovery_start(host);
2040 
2041         schedule_work(&mq->recovery_work);
2042         return;
2043     }
2044 
2045     mmc_blk_rw_reset_success(mq, req);
2046 
2047     /*
2048      * Block layer timeouts race with completions which means the normal
2049      * completion path cannot be used during recovery.
2050      */
2051     if (mq->in_recovery)
2052         mmc_blk_cqe_complete_rq(mq, req);
2053     else if (likely(!blk_should_fake_timeout(req->q)))
2054         blk_mq_complete_request(req);
2055 }
2056 
2057 void mmc_blk_mq_complete(struct request *req)
2058 {
2059     struct mmc_queue *mq = req->q->queuedata;
2060     struct mmc_host *host = mq->card->host;
2061 
2062     if (host->cqe_enabled)
2063         mmc_blk_cqe_complete_rq(mq, req);
2064     else if (likely(!blk_should_fake_timeout(req->q)))
2065         mmc_blk_mq_complete_rq(mq, req);
2066 }
2067 
2068 static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
2069                        struct request *req)
2070 {
2071     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2072     struct mmc_host *host = mq->card->host;
2073 
2074     if (mmc_blk_rq_error(&mqrq->brq) ||
2075         mmc_blk_card_busy(mq->card, req)) {
2076         mmc_blk_mq_rw_recovery(mq, req);
2077     } else {
2078         mmc_blk_rw_reset_success(mq, req);
2079         mmc_retune_release(host);
2080     }
2081 
2082     mmc_blk_urgent_bkops(mq, mqrq);
2083 }
2084 
2085 static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
2086 {
2087     unsigned long flags;
2088     bool put_card;
2089 
2090     spin_lock_irqsave(&mq->lock, flags);
2091 
2092     mq->in_flight[mmc_issue_type(mq, req)] -= 1;
2093 
2094     put_card = (mmc_tot_in_flight(mq) == 0);
2095 
2096     spin_unlock_irqrestore(&mq->lock, flags);
2097 
2098     if (put_card)
2099         mmc_put_card(mq->card, &mq->ctx);
2100 }
2101 
2102 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
2103                 bool can_sleep)
2104 {
2105     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2106     struct mmc_request *mrq = &mqrq->brq.mrq;
2107     struct mmc_host *host = mq->card->host;
2108 
2109     mmc_post_req(host, mrq, 0);
2110 
2111     /*
2112      * Block layer timeouts race with completions which means the normal
2113      * completion path cannot be used during recovery.
2114      */
2115     if (mq->in_recovery) {
2116         mmc_blk_mq_complete_rq(mq, req);
2117     } else if (likely(!blk_should_fake_timeout(req->q))) {
2118         if (can_sleep)
2119             blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
2120         else
2121             blk_mq_complete_request(req);
2122     }
2123 
2124     mmc_blk_mq_dec_in_flight(mq, req);
2125 }
2126 
2127 void mmc_blk_mq_recovery(struct mmc_queue *mq)
2128 {
2129     struct request *req = mq->recovery_req;
2130     struct mmc_host *host = mq->card->host;
2131     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2132 
2133     mq->recovery_req = NULL;
2134     mq->rw_wait = false;
2135 
2136     if (mmc_blk_rq_error(&mqrq->brq)) {
2137         mmc_retune_hold_now(host);
2138         mmc_blk_mq_rw_recovery(mq, req);
2139     }
2140 
2141     mmc_blk_urgent_bkops(mq, mqrq);
2142 
2143     mmc_blk_mq_post_req(mq, req, true);
2144 }
2145 
2146 static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
2147                      struct request **prev_req)
2148 {
2149     if (mmc_host_done_complete(mq->card->host))
2150         return;
2151 
2152     mutex_lock(&mq->complete_lock);
2153 
2154     if (!mq->complete_req)
2155         goto out_unlock;
2156 
2157     mmc_blk_mq_poll_completion(mq, mq->complete_req);
2158 
2159     if (prev_req)
2160         *prev_req = mq->complete_req;
2161     else
2162         mmc_blk_mq_post_req(mq, mq->complete_req, true);
2163 
2164     mq->complete_req = NULL;
2165 
2166 out_unlock:
2167     mutex_unlock(&mq->complete_lock);
2168 }
2169 
2170 void mmc_blk_mq_complete_work(struct work_struct *work)
2171 {
2172     struct mmc_queue *mq = container_of(work, struct mmc_queue,
2173                         complete_work);
2174 
2175     mmc_blk_mq_complete_prev_req(mq, NULL);
2176 }
2177 
2178 static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2179 {
2180     struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
2181                           brq.mrq);
2182     struct request *req = mmc_queue_req_to_req(mqrq);
2183     struct request_queue *q = req->q;
2184     struct mmc_queue *mq = q->queuedata;
2185     struct mmc_host *host = mq->card->host;
2186     unsigned long flags;
2187 
2188     if (!mmc_host_done_complete(host)) {
2189         bool waiting;
2190 
2191         /*
2192          * We cannot complete the request in this context, so record
2193          * that there is a request to complete, and that a following
2194          * request does not need to wait (although it does need to
2195          * complete complete_req first).
2196          */
2197         spin_lock_irqsave(&mq->lock, flags);
2198         mq->complete_req = req;
2199         mq->rw_wait = false;
2200         waiting = mq->waiting;
2201         spin_unlock_irqrestore(&mq->lock, flags);
2202 
2203         /*
2204          * If 'waiting' then the waiting task will complete this
2205          * request, otherwise queue a work to do it. Note that
2206          * complete_work may still race with the dispatch of a following
2207          * request.
2208          */
2209         if (waiting)
2210             wake_up(&mq->wait);
2211         else
2212             queue_work(mq->card->complete_wq, &mq->complete_work);
2213 
2214         return;
2215     }
2216 
2217     /* Take the recovery path for errors or urgent background operations */
2218     if (mmc_blk_rq_error(&mqrq->brq) ||
2219         mmc_blk_urgent_bkops_needed(mq, mqrq)) {
2220         spin_lock_irqsave(&mq->lock, flags);
2221         mq->recovery_needed = true;
2222         mq->recovery_req = req;
2223         spin_unlock_irqrestore(&mq->lock, flags);
2224         wake_up(&mq->wait);
2225         schedule_work(&mq->recovery_work);
2226         return;
2227     }
2228 
2229     mmc_blk_rw_reset_success(mq, req);
2230 
2231     mq->rw_wait = false;
2232     wake_up(&mq->wait);
2233 
2234     /* context unknown */
2235     mmc_blk_mq_post_req(mq, req, false);
2236 }
2237 
2238 static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
2239 {
2240     unsigned long flags;
2241     bool done;
2242 
2243     /*
2244      * Wait while there is another request in progress, but not if recovery
2245      * is needed. Also indicate whether there is a request waiting to start.
2246      */
2247     spin_lock_irqsave(&mq->lock, flags);
2248     if (mq->recovery_needed) {
2249         *err = -EBUSY;
2250         done = true;
2251     } else {
2252         done = !mq->rw_wait;
2253     }
2254     mq->waiting = !done;
2255     spin_unlock_irqrestore(&mq->lock, flags);
2256 
2257     return done;
2258 }
2259 
2260 static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
2261 {
2262     int err = 0;
2263 
2264     wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
2265 
2266     /* Always complete the previous request if there is one */
2267     mmc_blk_mq_complete_prev_req(mq, prev_req);
2268 
2269     return err;
2270 }
2271 
2272 static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
2273                   struct request *req)
2274 {
2275     struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
2276     struct mmc_host *host = mq->card->host;
2277     struct request *prev_req = NULL;
2278     int err = 0;
2279 
2280     mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
2281 
2282     mqrq->brq.mrq.done = mmc_blk_mq_req_done;
2283 
2284     mmc_pre_req(host, &mqrq->brq.mrq);
2285 
2286     err = mmc_blk_rw_wait(mq, &prev_req);
2287     if (err)
2288         goto out_post_req;
2289 
2290     mq->rw_wait = true;
2291 
2292     err = mmc_start_request(host, &mqrq->brq.mrq);
2293 
2294     if (prev_req)
2295         mmc_blk_mq_post_req(mq, prev_req, true);
2296 
2297     if (err)
2298         mq->rw_wait = false;
2299 
2300     /* Release re-tuning here where there is no synchronization required */
2301     if (err || mmc_host_done_complete(host))
2302         mmc_retune_release(host);
2303 
2304 out_post_req:
2305     if (err)
2306         mmc_post_req(host, &mqrq->brq.mrq, err);
2307 
2308     return err;
2309 }
2310 
2311 static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
2312 {
2313     if (host->cqe_enabled)
2314         return host->cqe_ops->cqe_wait_for_idle(host);
2315 
2316     return mmc_blk_rw_wait(mq, NULL);
2317 }
2318 
2319 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
2320 {
2321     struct mmc_blk_data *md = mq->blkdata;
2322     struct mmc_card *card = md->queue.card;
2323     struct mmc_host *host = card->host;
2324     int ret;
2325 
2326     ret = mmc_blk_part_switch(card, md->part_type);
2327     if (ret)
2328         return MMC_REQ_FAILED_TO_START;
2329 
2330     switch (mmc_issue_type(mq, req)) {
2331     case MMC_ISSUE_SYNC:
2332         ret = mmc_blk_wait_for_idle(mq, host);
2333         if (ret)
2334             return MMC_REQ_BUSY;
2335         switch (req_op(req)) {
2336         case REQ_OP_DRV_IN:
2337         case REQ_OP_DRV_OUT:
2338             mmc_blk_issue_drv_op(mq, req);
2339             break;
2340         case REQ_OP_DISCARD:
2341             mmc_blk_issue_discard_rq(mq, req);
2342             break;
2343         case REQ_OP_SECURE_ERASE:
2344             mmc_blk_issue_secdiscard_rq(mq, req);
2345             break;
2346         case REQ_OP_WRITE_ZEROES:
2347             mmc_blk_issue_trim_rq(mq, req);
2348             break;
2349         case REQ_OP_FLUSH:
2350             mmc_blk_issue_flush(mq, req);
2351             break;
2352         default:
2353             WARN_ON_ONCE(1);
2354             return MMC_REQ_FAILED_TO_START;
2355         }
2356         return MMC_REQ_FINISHED;
2357     case MMC_ISSUE_DCMD:
2358     case MMC_ISSUE_ASYNC:
2359         switch (req_op(req)) {
2360         case REQ_OP_FLUSH:
2361             if (!mmc_cache_enabled(host)) {
2362                 blk_mq_end_request(req, BLK_STS_OK);
2363                 return MMC_REQ_FINISHED;
2364             }
2365             ret = mmc_blk_cqe_issue_flush(mq, req);
2366             break;
2367         case REQ_OP_READ:
2368         case REQ_OP_WRITE:
2369             if (host->cqe_enabled)
2370                 ret = mmc_blk_cqe_issue_rw_rq(mq, req);
2371             else
2372                 ret = mmc_blk_mq_issue_rw_rq(mq, req);
2373             break;
2374         default:
2375             WARN_ON_ONCE(1);
2376             ret = -EINVAL;
2377         }
2378         if (!ret)
2379             return MMC_REQ_STARTED;
2380         return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
2381     default:
2382         WARN_ON_ONCE(1);
2383         return MMC_REQ_FAILED_TO_START;
2384     }
2385 }
2386 
2387 static inline int mmc_blk_readonly(struct mmc_card *card)
2388 {
2389     return mmc_card_readonly(card) ||
2390            !(card->csd.cmdclass & CCC_BLOCK_WRITE);
2391 }
2392 
2393 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
2394                           struct device *parent,
2395                           sector_t size,
2396                           bool default_ro,
2397                           const char *subname,
2398                           int area_type,
2399                           unsigned int part_type)
2400 {
2401     struct mmc_blk_data *md;
2402     int devidx, ret;
2403     char cap_str[10];
2404     bool cache_enabled = false;
2405     bool fua_enabled = false;
2406 
2407     devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
2408     if (devidx < 0) {
2409         /*
2410          * We get -ENOSPC because there are no more any available
2411          * devidx. The reason may be that, either userspace haven't yet
2412          * unmounted the partitions, which postpones mmc_blk_release()
2413          * from being called, or the device has more partitions than
2414          * what we support.
2415          */
2416         if (devidx == -ENOSPC)
2417             dev_err(mmc_dev(card->host),
2418                 "no more device IDs available\n");
2419 
2420         return ERR_PTR(devidx);
2421     }
2422 
2423     md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
2424     if (!md) {
2425         ret = -ENOMEM;
2426         goto out;
2427     }
2428 
2429     md->area_type = area_type;
2430 
2431     /*
2432      * Set the read-only status based on the supported commands
2433      * and the write protect switch.
2434      */
2435     md->read_only = mmc_blk_readonly(card);
2436 
2437     md->disk = mmc_init_queue(&md->queue, card);
2438     if (IS_ERR(md->disk)) {
2439         ret = PTR_ERR(md->disk);
2440         goto err_kfree;
2441     }
2442 
2443     INIT_LIST_HEAD(&md->part);
2444     INIT_LIST_HEAD(&md->rpmbs);
2445     kref_init(&md->kref);
2446 
2447     md->queue.blkdata = md;
2448     md->part_type = part_type;
2449 
2450     md->disk->major = MMC_BLOCK_MAJOR;
2451     md->disk->minors = perdev_minors;
2452     md->disk->first_minor = devidx * perdev_minors;
2453     md->disk->fops = &mmc_bdops;
2454     md->disk->private_data = md;
2455     md->parent = parent;
2456     set_disk_ro(md->disk, md->read_only || default_ro);
2457     if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
2458         md->disk->flags |= GENHD_FL_NO_PART;
2459 
2460     /*
2461      * As discussed on lkml, GENHD_FL_REMOVABLE should:
2462      *
2463      * - be set for removable media with permanent block devices
2464      * - be unset for removable block devices with permanent media
2465      *
2466      * Since MMC block devices clearly fall under the second
2467      * case, we do not set GENHD_FL_REMOVABLE.  Userspace
2468      * should use the block device creation/destruction hotplug
2469      * messages to tell when the card is present.
2470      */
2471 
2472     snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
2473          "mmcblk%u%s", card->host->index, subname ? subname : "");
2474 
2475     set_capacity(md->disk, size);
2476 
2477     if (mmc_host_cmd23(card->host)) {
2478         if ((mmc_card_mmc(card) &&
2479              card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
2480             (mmc_card_sd(card) &&
2481              card->scr.cmds & SD_SCR_CMD23_SUPPORT))
2482             md->flags |= MMC_BLK_CMD23;
2483     }
2484 
2485     if (md->flags & MMC_BLK_CMD23 &&
2486         ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
2487          card->ext_csd.rel_sectors)) {
2488         md->flags |= MMC_BLK_REL_WR;
2489         fua_enabled = true;
2490         cache_enabled = true;
2491     }
2492     if (mmc_cache_enabled(card->host))
2493         cache_enabled  = true;
2494 
2495     blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
2496 
2497     string_get_size((u64)size, 512, STRING_UNITS_2,
2498             cap_str, sizeof(cap_str));
2499     pr_info("%s: %s %s %s %s\n",
2500         md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
2501         cap_str, md->read_only ? "(ro)" : "");
2502 
2503     /* used in ->open, must be set before add_disk: */
2504     if (area_type == MMC_BLK_DATA_AREA_MAIN)
2505         dev_set_drvdata(&card->dev, md);
2506     ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
2507     if (ret)
2508         goto err_put_disk;
2509     return md;
2510 
2511  err_put_disk:
2512     put_disk(md->disk);
2513     blk_mq_free_tag_set(&md->queue.tag_set);
2514  err_kfree:
2515     kfree(md);
2516  out:
2517     ida_simple_remove(&mmc_blk_ida, devidx);
2518     return ERR_PTR(ret);
2519 }
2520 
2521 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
2522 {
2523     sector_t size;
2524 
2525     if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
2526         /*
2527          * The EXT_CSD sector count is in number or 512 byte
2528          * sectors.
2529          */
2530         size = card->ext_csd.sectors;
2531     } else {
2532         /*
2533          * The CSD capacity field is in units of read_blkbits.
2534          * set_capacity takes units of 512 bytes.
2535          */
2536         size = (typeof(sector_t))card->csd.capacity
2537             << (card->csd.read_blkbits - 9);
2538     }
2539 
2540     return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
2541                     MMC_BLK_DATA_AREA_MAIN, 0);
2542 }
2543 
2544 static int mmc_blk_alloc_part(struct mmc_card *card,
2545                   struct mmc_blk_data *md,
2546                   unsigned int part_type,
2547                   sector_t size,
2548                   bool default_ro,
2549                   const char *subname,
2550                   int area_type)
2551 {
2552     struct mmc_blk_data *part_md;
2553 
2554     part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
2555                     subname, area_type, part_type);
2556     if (IS_ERR(part_md))
2557         return PTR_ERR(part_md);
2558     list_add(&part_md->part, &md->part);
2559 
2560     return 0;
2561 }
2562 
2563 /**
2564  * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
2565  * @filp: the character device file
2566  * @cmd: the ioctl() command
2567  * @arg: the argument from userspace
2568  *
2569  * This will essentially just redirect the ioctl()s coming in over to
2570  * the main block device spawning the RPMB character device.
2571  */
2572 static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2573                unsigned long arg)
2574 {
2575     struct mmc_rpmb_data *rpmb = filp->private_data;
2576     int ret;
2577 
2578     switch (cmd) {
2579     case MMC_IOC_CMD:
2580         ret = mmc_blk_ioctl_cmd(rpmb->md,
2581                     (struct mmc_ioc_cmd __user *)arg,
2582                     rpmb);
2583         break;
2584     case MMC_IOC_MULTI_CMD:
2585         ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
2586                     (struct mmc_ioc_multi_cmd __user *)arg,
2587                     rpmb);
2588         break;
2589     default:
2590         ret = -EINVAL;
2591         break;
2592     }
2593 
2594     return ret;
2595 }
2596 
2597 #ifdef CONFIG_COMPAT
2598 static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
2599                   unsigned long arg)
2600 {
2601     return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2602 }
2603 #endif
2604 
2605 static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
2606 {
2607     struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2608                           struct mmc_rpmb_data, chrdev);
2609 
2610     get_device(&rpmb->dev);
2611     filp->private_data = rpmb;
2612     mmc_blk_get(rpmb->md->disk);
2613 
2614     return nonseekable_open(inode, filp);
2615 }
2616 
2617 static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
2618 {
2619     struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
2620                           struct mmc_rpmb_data, chrdev);
2621 
2622     mmc_blk_put(rpmb->md);
2623     put_device(&rpmb->dev);
2624 
2625     return 0;
2626 }
2627 
2628 static const struct file_operations mmc_rpmb_fileops = {
2629     .release = mmc_rpmb_chrdev_release,
2630     .open = mmc_rpmb_chrdev_open,
2631     .owner = THIS_MODULE,
2632     .llseek = no_llseek,
2633     .unlocked_ioctl = mmc_rpmb_ioctl,
2634 #ifdef CONFIG_COMPAT
2635     .compat_ioctl = mmc_rpmb_ioctl_compat,
2636 #endif
2637 };
2638 
2639 static void mmc_blk_rpmb_device_release(struct device *dev)
2640 {
2641     struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
2642 
2643     ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
2644     kfree(rpmb);
2645 }
2646 
2647 static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
2648                    struct mmc_blk_data *md,
2649                    unsigned int part_index,
2650                    sector_t size,
2651                    const char *subname)
2652 {
2653     int devidx, ret;
2654     char rpmb_name[DISK_NAME_LEN];
2655     char cap_str[10];
2656     struct mmc_rpmb_data *rpmb;
2657 
2658     /* This creates the minor number for the RPMB char device */
2659     devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
2660     if (devidx < 0)
2661         return devidx;
2662 
2663     rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
2664     if (!rpmb) {
2665         ida_simple_remove(&mmc_rpmb_ida, devidx);
2666         return -ENOMEM;
2667     }
2668 
2669     snprintf(rpmb_name, sizeof(rpmb_name),
2670          "mmcblk%u%s", card->host->index, subname ? subname : "");
2671 
2672     rpmb->id = devidx;
2673     rpmb->part_index = part_index;
2674     rpmb->dev.init_name = rpmb_name;
2675     rpmb->dev.bus = &mmc_rpmb_bus_type;
2676     rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
2677     rpmb->dev.parent = &card->dev;
2678     rpmb->dev.release = mmc_blk_rpmb_device_release;
2679     device_initialize(&rpmb->dev);
2680     dev_set_drvdata(&rpmb->dev, rpmb);
2681     rpmb->md = md;
2682 
2683     cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
2684     rpmb->chrdev.owner = THIS_MODULE;
2685     ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
2686     if (ret) {
2687         pr_err("%s: could not add character device\n", rpmb_name);
2688         goto out_put_device;
2689     }
2690 
2691     list_add(&rpmb->node, &md->rpmbs);
2692 
2693     string_get_size((u64)size, 512, STRING_UNITS_2,
2694             cap_str, sizeof(cap_str));
2695 
2696     pr_info("%s: %s %s %s, chardev (%d:%d)\n",
2697         rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str,
2698         MAJOR(mmc_rpmb_devt), rpmb->id);
2699 
2700     return 0;
2701 
2702 out_put_device:
2703     put_device(&rpmb->dev);
2704     return ret;
2705 }
2706 
2707 static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
2708 
2709 {
2710     cdev_device_del(&rpmb->chrdev, &rpmb->dev);
2711     put_device(&rpmb->dev);
2712 }
2713 
2714 /* MMC Physical partitions consist of two boot partitions and
2715  * up to four general purpose partitions.
2716  * For each partition enabled in EXT_CSD a block device will be allocatedi
2717  * to provide access to the partition.
2718  */
2719 
2720 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
2721 {
2722     int idx, ret;
2723 
2724     if (!mmc_card_mmc(card))
2725         return 0;
2726 
2727     for (idx = 0; idx < card->nr_parts; idx++) {
2728         if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
2729             /*
2730              * RPMB partitions does not provide block access, they
2731              * are only accessed using ioctl():s. Thus create
2732              * special RPMB block devices that do not have a
2733              * backing block queue for these.
2734              */
2735             ret = mmc_blk_alloc_rpmb_part(card, md,
2736                 card->part[idx].part_cfg,
2737                 card->part[idx].size >> 9,
2738                 card->part[idx].name);
2739             if (ret)
2740                 return ret;
2741         } else if (card->part[idx].size) {
2742             ret = mmc_blk_alloc_part(card, md,
2743                 card->part[idx].part_cfg,
2744                 card->part[idx].size >> 9,
2745                 card->part[idx].force_ro,
2746                 card->part[idx].name,
2747                 card->part[idx].area_type);
2748             if (ret)
2749                 return ret;
2750         }
2751     }
2752 
2753     return 0;
2754 }
2755 
2756 static void mmc_blk_remove_req(struct mmc_blk_data *md)
2757 {
2758     /*
2759      * Flush remaining requests and free queues. It is freeing the queue
2760      * that stops new requests from being accepted.
2761      */
2762     del_gendisk(md->disk);
2763     mmc_cleanup_queue(&md->queue);
2764     mmc_blk_put(md);
2765 }
2766 
2767 static void mmc_blk_remove_parts(struct mmc_card *card,
2768                  struct mmc_blk_data *md)
2769 {
2770     struct list_head *pos, *q;
2771     struct mmc_blk_data *part_md;
2772     struct mmc_rpmb_data *rpmb;
2773 
2774     /* Remove RPMB partitions */
2775     list_for_each_safe(pos, q, &md->rpmbs) {
2776         rpmb = list_entry(pos, struct mmc_rpmb_data, node);
2777         list_del(pos);
2778         mmc_blk_remove_rpmb_part(rpmb);
2779     }
2780     /* Remove block partitions */
2781     list_for_each_safe(pos, q, &md->part) {
2782         part_md = list_entry(pos, struct mmc_blk_data, part);
2783         list_del(pos);
2784         mmc_blk_remove_req(part_md);
2785     }
2786 }
2787 
2788 #ifdef CONFIG_DEBUG_FS
2789 
2790 static int mmc_dbg_card_status_get(void *data, u64 *val)
2791 {
2792     struct mmc_card *card = data;
2793     struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2794     struct mmc_queue *mq = &md->queue;
2795     struct request *req;
2796     int ret;
2797 
2798     /* Ask the block layer about the card status */
2799     req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
2800     if (IS_ERR(req))
2801         return PTR_ERR(req);
2802     req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
2803     blk_execute_rq(req, false);
2804     ret = req_to_mmc_queue_req(req)->drv_op_result;
2805     if (ret >= 0) {
2806         *val = ret;
2807         ret = 0;
2808     }
2809     blk_mq_free_request(req);
2810 
2811     return ret;
2812 }
2813 DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
2814              NULL, "%08llx\n");
2815 
2816 /* That is two digits * 512 + 1 for newline */
2817 #define EXT_CSD_STR_LEN 1025
2818 
2819 static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
2820 {
2821     struct mmc_card *card = inode->i_private;
2822     struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
2823     struct mmc_queue *mq = &md->queue;
2824     struct request *req;
2825     char *buf;
2826     ssize_t n = 0;
2827     u8 *ext_csd;
2828     int err, i;
2829 
2830     buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL);
2831     if (!buf)
2832         return -ENOMEM;
2833 
2834     /* Ask the block layer for the EXT CSD */
2835     req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
2836     if (IS_ERR(req)) {
2837         err = PTR_ERR(req);
2838         goto out_free;
2839     }
2840     req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
2841     req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
2842     blk_execute_rq(req, false);
2843     err = req_to_mmc_queue_req(req)->drv_op_result;
2844     blk_mq_free_request(req);
2845     if (err) {
2846         pr_err("FAILED %d\n", err);
2847         goto out_free;
2848     }
2849 
2850     for (i = 0; i < 512; i++)
2851         n += sprintf(buf + n, "%02x", ext_csd[i]);
2852     n += sprintf(buf + n, "\n");
2853 
2854     if (n != EXT_CSD_STR_LEN) {
2855         err = -EINVAL;
2856         kfree(ext_csd);
2857         goto out_free;
2858     }
2859 
2860     filp->private_data = buf;
2861     kfree(ext_csd);
2862     return 0;
2863 
2864 out_free:
2865     kfree(buf);
2866     return err;
2867 }
2868 
2869 static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf,
2870                 size_t cnt, loff_t *ppos)
2871 {
2872     char *buf = filp->private_data;
2873 
2874     return simple_read_from_buffer(ubuf, cnt, ppos,
2875                        buf, EXT_CSD_STR_LEN);
2876 }
2877 
2878 static int mmc_ext_csd_release(struct inode *inode, struct file *file)
2879 {
2880     kfree(file->private_data);
2881     return 0;
2882 }
2883 
2884 static const struct file_operations mmc_dbg_ext_csd_fops = {
2885     .open       = mmc_ext_csd_open,
2886     .read       = mmc_ext_csd_read,
2887     .release    = mmc_ext_csd_release,
2888     .llseek     = default_llseek,
2889 };
2890 
2891 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2892 {
2893     struct dentry *root;
2894 
2895     if (!card->debugfs_root)
2896         return 0;
2897 
2898     root = card->debugfs_root;
2899 
2900     if (mmc_card_mmc(card) || mmc_card_sd(card)) {
2901         md->status_dentry =
2902             debugfs_create_file_unsafe("status", 0400, root,
2903                            card,
2904                            &mmc_dbg_card_status_fops);
2905         if (!md->status_dentry)
2906             return -EIO;
2907     }
2908 
2909     if (mmc_card_mmc(card)) {
2910         md->ext_csd_dentry =
2911             debugfs_create_file("ext_csd", S_IRUSR, root, card,
2912                         &mmc_dbg_ext_csd_fops);
2913         if (!md->ext_csd_dentry)
2914             return -EIO;
2915     }
2916 
2917     return 0;
2918 }
2919 
2920 static void mmc_blk_remove_debugfs(struct mmc_card *card,
2921                    struct mmc_blk_data *md)
2922 {
2923     if (!card->debugfs_root)
2924         return;
2925 
2926     if (!IS_ERR_OR_NULL(md->status_dentry)) {
2927         debugfs_remove(md->status_dentry);
2928         md->status_dentry = NULL;
2929     }
2930 
2931     if (!IS_ERR_OR_NULL(md->ext_csd_dentry)) {
2932         debugfs_remove(md->ext_csd_dentry);
2933         md->ext_csd_dentry = NULL;
2934     }
2935 }
2936 
2937 #else
2938 
2939 static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md)
2940 {
2941     return 0;
2942 }
2943 
2944 static void mmc_blk_remove_debugfs(struct mmc_card *card,
2945                    struct mmc_blk_data *md)
2946 {
2947 }
2948 
2949 #endif /* CONFIG_DEBUG_FS */
2950 
2951 static int mmc_blk_probe(struct mmc_card *card)
2952 {
2953     struct mmc_blk_data *md;
2954     int ret = 0;
2955 
2956     /*
2957      * Check that the card supports the command class(es) we need.
2958      */
2959     if (!(card->csd.cmdclass & CCC_BLOCK_READ))
2960         return -ENODEV;
2961 
2962     mmc_fixup_device(card, mmc_blk_fixups);
2963 
2964     card->complete_wq = alloc_workqueue("mmc_complete",
2965                     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2966     if (!card->complete_wq) {
2967         pr_err("Failed to create mmc completion workqueue");
2968         return -ENOMEM;
2969     }
2970 
2971     md = mmc_blk_alloc(card);
2972     if (IS_ERR(md)) {
2973         ret = PTR_ERR(md);
2974         goto out_free;
2975     }
2976 
2977     ret = mmc_blk_alloc_parts(card, md);
2978     if (ret)
2979         goto out;
2980 
2981     /* Add two debugfs entries */
2982     mmc_blk_add_debugfs(card, md);
2983 
2984     pm_runtime_set_autosuspend_delay(&card->dev, 3000);
2985     pm_runtime_use_autosuspend(&card->dev);
2986 
2987     /*
2988      * Don't enable runtime PM for SD-combo cards here. Leave that
2989      * decision to be taken during the SDIO init sequence instead.
2990      */
2991     if (!mmc_card_sd_combo(card)) {
2992         pm_runtime_set_active(&card->dev);
2993         pm_runtime_enable(&card->dev);
2994     }
2995 
2996     return 0;
2997 
2998 out:
2999     mmc_blk_remove_parts(card, md);
3000     mmc_blk_remove_req(md);
3001 out_free:
3002     destroy_workqueue(card->complete_wq);
3003     return ret;
3004 }
3005 
3006 static void mmc_blk_remove(struct mmc_card *card)
3007 {
3008     struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
3009 
3010     mmc_blk_remove_debugfs(card, md);
3011     mmc_blk_remove_parts(card, md);
3012     pm_runtime_get_sync(&card->dev);
3013     if (md->part_curr != md->part_type) {
3014         mmc_claim_host(card->host);
3015         mmc_blk_part_switch(card, md->part_type);
3016         mmc_release_host(card->host);
3017     }
3018     if (!mmc_card_sd_combo(card))
3019         pm_runtime_disable(&card->dev);
3020     pm_runtime_put_noidle(&card->dev);
3021     mmc_blk_remove_req(md);
3022     dev_set_drvdata(&card->dev, NULL);
3023     destroy_workqueue(card->complete_wq);
3024 }
3025 
3026 static int _mmc_blk_suspend(struct mmc_card *card)
3027 {
3028     struct mmc_blk_data *part_md;
3029     struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
3030 
3031     if (md) {
3032         mmc_queue_suspend(&md->queue);
3033         list_for_each_entry(part_md, &md->part, part) {
3034             mmc_queue_suspend(&part_md->queue);
3035         }
3036     }
3037     return 0;
3038 }
3039 
3040 static void mmc_blk_shutdown(struct mmc_card *card)
3041 {
3042     _mmc_blk_suspend(card);
3043 }
3044 
3045 #ifdef CONFIG_PM_SLEEP
3046 static int mmc_blk_suspend(struct device *dev)
3047 {
3048     struct mmc_card *card = mmc_dev_to_card(dev);
3049 
3050     return _mmc_blk_suspend(card);
3051 }
3052 
3053 static int mmc_blk_resume(struct device *dev)
3054 {
3055     struct mmc_blk_data *part_md;
3056     struct mmc_blk_data *md = dev_get_drvdata(dev);
3057 
3058     if (md) {
3059         /*
3060          * Resume involves the card going into idle state,
3061          * so current partition is always the main one.
3062          */
3063         md->part_curr = md->part_type;
3064         mmc_queue_resume(&md->queue);
3065         list_for_each_entry(part_md, &md->part, part) {
3066             mmc_queue_resume(&part_md->queue);
3067         }
3068     }
3069     return 0;
3070 }
3071 #endif
3072 
3073 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
3074 
3075 static struct mmc_driver mmc_driver = {
3076     .drv        = {
3077         .name   = "mmcblk",
3078         .pm = &mmc_blk_pm_ops,
3079     },
3080     .probe      = mmc_blk_probe,
3081     .remove     = mmc_blk_remove,
3082     .shutdown   = mmc_blk_shutdown,
3083 };
3084 
3085 static int __init mmc_blk_init(void)
3086 {
3087     int res;
3088 
3089     res  = bus_register(&mmc_rpmb_bus_type);
3090     if (res < 0) {
3091         pr_err("mmcblk: could not register RPMB bus type\n");
3092         return res;
3093     }
3094     res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
3095     if (res < 0) {
3096         pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
3097         goto out_bus_unreg;
3098     }
3099 
3100     if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
3101         pr_info("mmcblk: using %d minors per device\n", perdev_minors);
3102 
3103     max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
3104 
3105     res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
3106     if (res)
3107         goto out_chrdev_unreg;
3108 
3109     res = mmc_register_driver(&mmc_driver);
3110     if (res)
3111         goto out_blkdev_unreg;
3112 
3113     return 0;
3114 
3115 out_blkdev_unreg:
3116     unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3117 out_chrdev_unreg:
3118     unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
3119 out_bus_unreg:
3120     bus_unregister(&mmc_rpmb_bus_type);
3121     return res;
3122 }
3123 
3124 static void __exit mmc_blk_exit(void)
3125 {
3126     mmc_unregister_driver(&mmc_driver);
3127     unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3128     unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
3129     bus_unregister(&mmc_rpmb_bus_type);
3130 }
3131 
3132 module_init(mmc_blk_init);
3133 module_exit(mmc_blk_exit);
3134 
3135 MODULE_LICENSE("GPL");
3136 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
3137