Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  linux/drivers/mmc/core/mmc_ops.h
0004  *
0005  *  Copyright 2006-2007 Pierre Ossman
0006  */
0007 
0008 #include <linux/slab.h>
0009 #include <linux/export.h>
0010 #include <linux/types.h>
0011 #include <linux/scatterlist.h>
0012 
0013 #include <linux/mmc/host.h>
0014 #include <linux/mmc/card.h>
0015 #include <linux/mmc/mmc.h>
0016 
0017 #include "core.h"
0018 #include "card.h"
0019 #include "host.h"
0020 #include "mmc_ops.h"
0021 
0022 #define MMC_BKOPS_TIMEOUT_MS        (120 * 1000) /* 120s */
0023 #define MMC_SANITIZE_TIMEOUT_MS     (240 * 1000) /* 240s */
0024 #define MMC_OP_COND_PERIOD_US       (4 * 1000) /* 4ms */
0025 #define MMC_OP_COND_TIMEOUT_MS      1000 /* 1s */
0026 
0027 static const u8 tuning_blk_pattern_4bit[] = {
0028     0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
0029     0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
0030     0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
0031     0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
0032     0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
0033     0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
0034     0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
0035     0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
0036 };
0037 
0038 static const u8 tuning_blk_pattern_8bit[] = {
0039     0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
0040     0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
0041     0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
0042     0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
0043     0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
0044     0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
0045     0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
0046     0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
0047     0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
0048     0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
0049     0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
0050     0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
0051     0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
0052     0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
0053     0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
0054     0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
0055 };
0056 
0057 struct mmc_busy_data {
0058     struct mmc_card *card;
0059     bool retry_crc_err;
0060     enum mmc_busy_cmd busy_cmd;
0061 };
0062 
0063 struct mmc_op_cond_busy_data {
0064     struct mmc_host *host;
0065     u32 ocr;
0066     struct mmc_command *cmd;
0067 };
0068 
0069 int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
0070 {
0071     int err;
0072     struct mmc_command cmd = {};
0073 
0074     cmd.opcode = MMC_SEND_STATUS;
0075     if (!mmc_host_is_spi(card->host))
0076         cmd.arg = card->rca << 16;
0077     cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
0078 
0079     err = mmc_wait_for_cmd(card->host, &cmd, retries);
0080     if (err)
0081         return err;
0082 
0083     /* NOTE: callers are required to understand the difference
0084      * between "native" and SPI format status words!
0085      */
0086     if (status)
0087         *status = cmd.resp[0];
0088 
0089     return 0;
0090 }
0091 EXPORT_SYMBOL_GPL(__mmc_send_status);
0092 
0093 int mmc_send_status(struct mmc_card *card, u32 *status)
0094 {
0095     return __mmc_send_status(card, status, MMC_CMD_RETRIES);
0096 }
0097 EXPORT_SYMBOL_GPL(mmc_send_status);
0098 
0099 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
0100 {
0101     struct mmc_command cmd = {};
0102 
0103     cmd.opcode = MMC_SELECT_CARD;
0104 
0105     if (card) {
0106         cmd.arg = card->rca << 16;
0107         cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
0108     } else {
0109         cmd.arg = 0;
0110         cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
0111     }
0112 
0113     return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
0114 }
0115 
0116 int mmc_select_card(struct mmc_card *card)
0117 {
0118 
0119     return _mmc_select_card(card->host, card);
0120 }
0121 
0122 int mmc_deselect_cards(struct mmc_host *host)
0123 {
0124     return _mmc_select_card(host, NULL);
0125 }
0126 
0127 /*
0128  * Write the value specified in the device tree or board code into the optional
0129  * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
0130  * drive strength of the DAT and CMD outputs. The actual meaning of a given
0131  * value is hardware dependant.
0132  * The presence of the DSR register can be determined from the CSD register,
0133  * bit 76.
0134  */
0135 int mmc_set_dsr(struct mmc_host *host)
0136 {
0137     struct mmc_command cmd = {};
0138 
0139     cmd.opcode = MMC_SET_DSR;
0140 
0141     cmd.arg = (host->dsr << 16) | 0xffff;
0142     cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
0143 
0144     return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
0145 }
0146 
0147 int mmc_go_idle(struct mmc_host *host)
0148 {
0149     int err;
0150     struct mmc_command cmd = {};
0151 
0152     /*
0153      * Non-SPI hosts need to prevent chipselect going active during
0154      * GO_IDLE; that would put chips into SPI mode.  Remind them of
0155      * that in case of hardware that won't pull up DAT3/nCS otherwise.
0156      *
0157      * SPI hosts ignore ios.chip_select; it's managed according to
0158      * rules that must accommodate non-MMC slaves which this layer
0159      * won't even know about.
0160      */
0161     if (!mmc_host_is_spi(host)) {
0162         mmc_set_chip_select(host, MMC_CS_HIGH);
0163         mmc_delay(1);
0164     }
0165 
0166     cmd.opcode = MMC_GO_IDLE_STATE;
0167     cmd.arg = 0;
0168     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
0169 
0170     err = mmc_wait_for_cmd(host, &cmd, 0);
0171 
0172     mmc_delay(1);
0173 
0174     if (!mmc_host_is_spi(host)) {
0175         mmc_set_chip_select(host, MMC_CS_DONTCARE);
0176         mmc_delay(1);
0177     }
0178 
0179     host->use_spi_crc = 0;
0180 
0181     return err;
0182 }
0183 
0184 static int __mmc_send_op_cond_cb(void *cb_data, bool *busy)
0185 {
0186     struct mmc_op_cond_busy_data *data = cb_data;
0187     struct mmc_host *host = data->host;
0188     struct mmc_command *cmd = data->cmd;
0189     u32 ocr = data->ocr;
0190     int err = 0;
0191 
0192     err = mmc_wait_for_cmd(host, cmd, 0);
0193     if (err)
0194         return err;
0195 
0196     if (mmc_host_is_spi(host)) {
0197         if (!(cmd->resp[0] & R1_SPI_IDLE)) {
0198             *busy = false;
0199             return 0;
0200         }
0201     } else {
0202         if (cmd->resp[0] & MMC_CARD_BUSY) {
0203             *busy = false;
0204             return 0;
0205         }
0206     }
0207 
0208     *busy = true;
0209 
0210     /*
0211      * According to eMMC specification v5.1 section 6.4.3, we
0212      * should issue CMD1 repeatedly in the idle state until
0213      * the eMMC is ready. Otherwise some eMMC devices seem to enter
0214      * the inactive mode after mmc_init_card() issued CMD0 when
0215      * the eMMC device is busy.
0216      */
0217     if (!ocr && !mmc_host_is_spi(host))
0218         cmd->arg = cmd->resp[0] | BIT(30);
0219 
0220     return 0;
0221 }
0222 
0223 int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
0224 {
0225     struct mmc_command cmd = {};
0226     int err = 0;
0227     struct mmc_op_cond_busy_data cb_data = {
0228         .host = host,
0229         .ocr = ocr,
0230         .cmd = &cmd
0231     };
0232 
0233     cmd.opcode = MMC_SEND_OP_COND;
0234     cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
0235     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
0236 
0237     err = __mmc_poll_for_busy(host, MMC_OP_COND_PERIOD_US,
0238                   MMC_OP_COND_TIMEOUT_MS,
0239                   &__mmc_send_op_cond_cb, &cb_data);
0240     if (err)
0241         return err;
0242 
0243     if (rocr && !mmc_host_is_spi(host))
0244         *rocr = cmd.resp[0];
0245 
0246     return err;
0247 }
0248 
0249 int mmc_set_relative_addr(struct mmc_card *card)
0250 {
0251     struct mmc_command cmd = {};
0252 
0253     cmd.opcode = MMC_SET_RELATIVE_ADDR;
0254     cmd.arg = card->rca << 16;
0255     cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
0256 
0257     return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
0258 }
0259 
0260 static int
0261 mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
0262 {
0263     int err;
0264     struct mmc_command cmd = {};
0265 
0266     cmd.opcode = opcode;
0267     cmd.arg = arg;
0268     cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
0269 
0270     err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
0271     if (err)
0272         return err;
0273 
0274     memcpy(cxd, cmd.resp, sizeof(u32) * 4);
0275 
0276     return 0;
0277 }
0278 
0279 /*
0280  * NOTE: void *buf, caller for the buf is required to use DMA-capable
0281  * buffer or on-stack buffer (with some overhead in callee).
0282  */
0283 int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
0284                u32 args, void *buf, unsigned len)
0285 {
0286     struct mmc_request mrq = {};
0287     struct mmc_command cmd = {};
0288     struct mmc_data data = {};
0289     struct scatterlist sg;
0290 
0291     mrq.cmd = &cmd;
0292     mrq.data = &data;
0293 
0294     cmd.opcode = opcode;
0295     cmd.arg = args;
0296 
0297     /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
0298      * rely on callers to never use this with "native" calls for reading
0299      * CSD or CID.  Native versions of those commands use the R2 type,
0300      * not R1 plus a data block.
0301      */
0302     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
0303 
0304     data.blksz = len;
0305     data.blocks = 1;
0306     data.flags = MMC_DATA_READ;
0307     data.sg = &sg;
0308     data.sg_len = 1;
0309 
0310     sg_init_one(&sg, buf, len);
0311 
0312     if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
0313         /*
0314          * The spec states that CSR and CID accesses have a timeout
0315          * of 64 clock cycles.
0316          */
0317         data.timeout_ns = 0;
0318         data.timeout_clks = 64;
0319     } else
0320         mmc_set_data_timeout(&data, card);
0321 
0322     mmc_wait_for_req(host, &mrq);
0323 
0324     if (cmd.error)
0325         return cmd.error;
0326     if (data.error)
0327         return data.error;
0328 
0329     return 0;
0330 }
0331 
0332 static int mmc_spi_send_cxd(struct mmc_host *host, u32 *cxd, u32 opcode)
0333 {
0334     int ret, i;
0335     __be32 *cxd_tmp;
0336 
0337     cxd_tmp = kzalloc(16, GFP_KERNEL);
0338     if (!cxd_tmp)
0339         return -ENOMEM;
0340 
0341     ret = mmc_send_adtc_data(NULL, host, opcode, 0, cxd_tmp, 16);
0342     if (ret)
0343         goto err;
0344 
0345     for (i = 0; i < 4; i++)
0346         cxd[i] = be32_to_cpu(cxd_tmp[i]);
0347 
0348 err:
0349     kfree(cxd_tmp);
0350     return ret;
0351 }
0352 
0353 int mmc_send_csd(struct mmc_card *card, u32 *csd)
0354 {
0355     if (mmc_host_is_spi(card->host))
0356         return mmc_spi_send_cxd(card->host, csd, MMC_SEND_CSD);
0357 
0358     return mmc_send_cxd_native(card->host, card->rca << 16, csd,
0359                 MMC_SEND_CSD);
0360 }
0361 
0362 int mmc_send_cid(struct mmc_host *host, u32 *cid)
0363 {
0364     if (mmc_host_is_spi(host))
0365         return mmc_spi_send_cxd(host, cid, MMC_SEND_CID);
0366 
0367     return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
0368 }
0369 
0370 int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
0371 {
0372     int err;
0373     u8 *ext_csd;
0374 
0375     if (!card || !new_ext_csd)
0376         return -EINVAL;
0377 
0378     if (!mmc_can_ext_csd(card))
0379         return -EOPNOTSUPP;
0380 
0381     /*
0382      * As the ext_csd is so large and mostly unused, we don't store the
0383      * raw block in mmc_card.
0384      */
0385     ext_csd = kzalloc(512, GFP_KERNEL);
0386     if (!ext_csd)
0387         return -ENOMEM;
0388 
0389     err = mmc_send_adtc_data(card, card->host, MMC_SEND_EXT_CSD, 0, ext_csd,
0390                 512);
0391     if (err)
0392         kfree(ext_csd);
0393     else
0394         *new_ext_csd = ext_csd;
0395 
0396     return err;
0397 }
0398 EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
0399 
0400 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
0401 {
0402     struct mmc_command cmd = {};
0403     int err;
0404 
0405     cmd.opcode = MMC_SPI_READ_OCR;
0406     cmd.arg = highcap ? (1 << 30) : 0;
0407     cmd.flags = MMC_RSP_SPI_R3;
0408 
0409     err = mmc_wait_for_cmd(host, &cmd, 0);
0410 
0411     *ocrp = cmd.resp[1];
0412     return err;
0413 }
0414 
0415 int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
0416 {
0417     struct mmc_command cmd = {};
0418     int err;
0419 
0420     cmd.opcode = MMC_SPI_CRC_ON_OFF;
0421     cmd.flags = MMC_RSP_SPI_R1;
0422     cmd.arg = use_crc;
0423 
0424     err = mmc_wait_for_cmd(host, &cmd, 0);
0425     if (!err)
0426         host->use_spi_crc = use_crc;
0427     return err;
0428 }
0429 
0430 static int mmc_switch_status_error(struct mmc_host *host, u32 status)
0431 {
0432     if (mmc_host_is_spi(host)) {
0433         if (status & R1_SPI_ILLEGAL_COMMAND)
0434             return -EBADMSG;
0435     } else {
0436         if (R1_STATUS(status))
0437             pr_warn("%s: unexpected status %#x after switch\n",
0438                 mmc_hostname(host), status);
0439         if (status & R1_SWITCH_ERROR)
0440             return -EBADMSG;
0441     }
0442     return 0;
0443 }
0444 
0445 /* Caller must hold re-tuning */
0446 int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
0447 {
0448     u32 status;
0449     int err;
0450 
0451     err = mmc_send_status(card, &status);
0452     if (!crc_err_fatal && err == -EILSEQ)
0453         return 0;
0454     if (err)
0455         return err;
0456 
0457     return mmc_switch_status_error(card->host, status);
0458 }
0459 
0460 static int mmc_busy_cb(void *cb_data, bool *busy)
0461 {
0462     struct mmc_busy_data *data = cb_data;
0463     struct mmc_host *host = data->card->host;
0464     u32 status = 0;
0465     int err;
0466 
0467     if (data->busy_cmd != MMC_BUSY_IO && host->ops->card_busy) {
0468         *busy = host->ops->card_busy(host);
0469         return 0;
0470     }
0471 
0472     err = mmc_send_status(data->card, &status);
0473     if (data->retry_crc_err && err == -EILSEQ) {
0474         *busy = true;
0475         return 0;
0476     }
0477     if (err)
0478         return err;
0479 
0480     switch (data->busy_cmd) {
0481     case MMC_BUSY_CMD6:
0482         err = mmc_switch_status_error(host, status);
0483         break;
0484     case MMC_BUSY_ERASE:
0485         err = R1_STATUS(status) ? -EIO : 0;
0486         break;
0487     case MMC_BUSY_HPI:
0488     case MMC_BUSY_EXTR_SINGLE:
0489     case MMC_BUSY_IO:
0490         break;
0491     default:
0492         err = -EINVAL;
0493     }
0494 
0495     if (err)
0496         return err;
0497 
0498     *busy = !mmc_ready_for_data(status);
0499     return 0;
0500 }
0501 
0502 int __mmc_poll_for_busy(struct mmc_host *host, unsigned int period_us,
0503             unsigned int timeout_ms,
0504             int (*busy_cb)(void *cb_data, bool *busy),
0505             void *cb_data)
0506 {
0507     int err;
0508     unsigned long timeout;
0509     unsigned int udelay = period_us ? period_us : 32, udelay_max = 32768;
0510     bool expired = false;
0511     bool busy = false;
0512 
0513     timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
0514     do {
0515         /*
0516          * Due to the possibility of being preempted while polling,
0517          * check the expiration time first.
0518          */
0519         expired = time_after(jiffies, timeout);
0520 
0521         err = (*busy_cb)(cb_data, &busy);
0522         if (err)
0523             return err;
0524 
0525         /* Timeout if the device still remains busy. */
0526         if (expired && busy) {
0527             pr_err("%s: Card stuck being busy! %s\n",
0528                 mmc_hostname(host), __func__);
0529             return -ETIMEDOUT;
0530         }
0531 
0532         /* Throttle the polling rate to avoid hogging the CPU. */
0533         if (busy) {
0534             usleep_range(udelay, udelay * 2);
0535             if (udelay < udelay_max)
0536                 udelay *= 2;
0537         }
0538     } while (busy);
0539 
0540     return 0;
0541 }
0542 EXPORT_SYMBOL_GPL(__mmc_poll_for_busy);
0543 
0544 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
0545               bool retry_crc_err, enum mmc_busy_cmd busy_cmd)
0546 {
0547     struct mmc_host *host = card->host;
0548     struct mmc_busy_data cb_data;
0549 
0550     cb_data.card = card;
0551     cb_data.retry_crc_err = retry_crc_err;
0552     cb_data.busy_cmd = busy_cmd;
0553 
0554     return __mmc_poll_for_busy(host, 0, timeout_ms, &mmc_busy_cb, &cb_data);
0555 }
0556 EXPORT_SYMBOL_GPL(mmc_poll_for_busy);
0557 
0558 bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
0559               unsigned int timeout_ms)
0560 {
0561     /*
0562      * If the max_busy_timeout of the host is specified, make sure it's
0563      * enough to fit the used timeout_ms. In case it's not, let's instruct
0564      * the host to avoid HW busy detection, by converting to a R1 response
0565      * instead of a R1B. Note, some hosts requires R1B, which also means
0566      * they are on their own when it comes to deal with the busy timeout.
0567      */
0568     if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
0569         (timeout_ms > host->max_busy_timeout)) {
0570         cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1;
0571         return false;
0572     }
0573 
0574     cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1B | MMC_RSP_R1B;
0575     cmd->busy_timeout = timeout_ms;
0576     return true;
0577 }
0578 
0579 /**
0580  *  __mmc_switch - modify EXT_CSD register
0581  *  @card: the MMC card associated with the data transfer
0582  *  @set: cmd set values
0583  *  @index: EXT_CSD register index
0584  *  @value: value to program into EXT_CSD register
0585  *  @timeout_ms: timeout (ms) for operation performed by register write,
0586  *                   timeout of zero implies maximum possible timeout
0587  *  @timing: new timing to change to
0588  *  @send_status: send status cmd to poll for busy
0589  *  @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
0590  *  @retries: number of retries
0591  *
0592  *  Modifies the EXT_CSD register for selected card.
0593  */
0594 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
0595         unsigned int timeout_ms, unsigned char timing,
0596         bool send_status, bool retry_crc_err, unsigned int retries)
0597 {
0598     struct mmc_host *host = card->host;
0599     int err;
0600     struct mmc_command cmd = {};
0601     bool use_r1b_resp;
0602     unsigned char old_timing = host->ios.timing;
0603 
0604     mmc_retune_hold(host);
0605 
0606     if (!timeout_ms) {
0607         pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
0608             mmc_hostname(host));
0609         timeout_ms = card->ext_csd.generic_cmd6_time;
0610     }
0611 
0612     cmd.opcode = MMC_SWITCH;
0613     cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
0614           (index << 16) |
0615           (value << 8) |
0616           set;
0617     use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd, timeout_ms);
0618 
0619     err = mmc_wait_for_cmd(host, &cmd, retries);
0620     if (err)
0621         goto out;
0622 
0623     /*If SPI or used HW busy detection above, then we don't need to poll. */
0624     if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
0625         mmc_host_is_spi(host))
0626         goto out_tim;
0627 
0628     /*
0629      * If the host doesn't support HW polling via the ->card_busy() ops and
0630      * when it's not allowed to poll by using CMD13, then we need to rely on
0631      * waiting the stated timeout to be sufficient.
0632      */
0633     if (!send_status && !host->ops->card_busy) {
0634         mmc_delay(timeout_ms);
0635         goto out_tim;
0636     }
0637 
0638     /* Let's try to poll to find out when the command is completed. */
0639     err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6);
0640     if (err)
0641         goto out;
0642 
0643 out_tim:
0644     /* Switch to new timing before check switch status. */
0645     if (timing)
0646         mmc_set_timing(host, timing);
0647 
0648     if (send_status) {
0649         err = mmc_switch_status(card, true);
0650         if (err && timing)
0651             mmc_set_timing(host, old_timing);
0652     }
0653 out:
0654     mmc_retune_release(host);
0655 
0656     return err;
0657 }
0658 
0659 int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
0660         unsigned int timeout_ms)
0661 {
0662     return __mmc_switch(card, set, index, value, timeout_ms, 0,
0663                 true, false, MMC_CMD_RETRIES);
0664 }
0665 EXPORT_SYMBOL_GPL(mmc_switch);
0666 
0667 int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
0668 {
0669     struct mmc_request mrq = {};
0670     struct mmc_command cmd = {};
0671     struct mmc_data data = {};
0672     struct scatterlist sg;
0673     struct mmc_ios *ios = &host->ios;
0674     const u8 *tuning_block_pattern;
0675     int size, err = 0;
0676     u8 *data_buf;
0677 
0678     if (ios->bus_width == MMC_BUS_WIDTH_8) {
0679         tuning_block_pattern = tuning_blk_pattern_8bit;
0680         size = sizeof(tuning_blk_pattern_8bit);
0681     } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
0682         tuning_block_pattern = tuning_blk_pattern_4bit;
0683         size = sizeof(tuning_blk_pattern_4bit);
0684     } else
0685         return -EINVAL;
0686 
0687     data_buf = kzalloc(size, GFP_KERNEL);
0688     if (!data_buf)
0689         return -ENOMEM;
0690 
0691     mrq.cmd = &cmd;
0692     mrq.data = &data;
0693 
0694     cmd.opcode = opcode;
0695     cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
0696 
0697     data.blksz = size;
0698     data.blocks = 1;
0699     data.flags = MMC_DATA_READ;
0700 
0701     /*
0702      * According to the tuning specs, Tuning process
0703      * is normally shorter 40 executions of CMD19,
0704      * and timeout value should be shorter than 150 ms
0705      */
0706     data.timeout_ns = 150 * NSEC_PER_MSEC;
0707 
0708     data.sg = &sg;
0709     data.sg_len = 1;
0710     sg_init_one(&sg, data_buf, size);
0711 
0712     mmc_wait_for_req(host, &mrq);
0713 
0714     if (cmd_error)
0715         *cmd_error = cmd.error;
0716 
0717     if (cmd.error) {
0718         err = cmd.error;
0719         goto out;
0720     }
0721 
0722     if (data.error) {
0723         err = data.error;
0724         goto out;
0725     }
0726 
0727     if (memcmp(data_buf, tuning_block_pattern, size))
0728         err = -EIO;
0729 
0730 out:
0731     kfree(data_buf);
0732     return err;
0733 }
0734 EXPORT_SYMBOL_GPL(mmc_send_tuning);
0735 
0736 int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode)
0737 {
0738     struct mmc_command cmd = {};
0739 
0740     /*
0741      * eMMC specification specifies that CMD12 can be used to stop a tuning
0742      * command, but SD specification does not, so do nothing unless it is
0743      * eMMC.
0744      */
0745     if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
0746         return 0;
0747 
0748     cmd.opcode = MMC_STOP_TRANSMISSION;
0749     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
0750 
0751     /*
0752      * For drivers that override R1 to R1b, set an arbitrary timeout based
0753      * on the tuning timeout i.e. 150ms.
0754      */
0755     cmd.busy_timeout = 150;
0756 
0757     return mmc_wait_for_cmd(host, &cmd, 0);
0758 }
0759 EXPORT_SYMBOL_GPL(mmc_send_abort_tuning);
0760 
0761 static int
0762 mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
0763           u8 len)
0764 {
0765     struct mmc_request mrq = {};
0766     struct mmc_command cmd = {};
0767     struct mmc_data data = {};
0768     struct scatterlist sg;
0769     u8 *data_buf;
0770     u8 *test_buf;
0771     int i, err;
0772     static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
0773     static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
0774 
0775     /* dma onto stack is unsafe/nonportable, but callers to this
0776      * routine normally provide temporary on-stack buffers ...
0777      */
0778     data_buf = kmalloc(len, GFP_KERNEL);
0779     if (!data_buf)
0780         return -ENOMEM;
0781 
0782     if (len == 8)
0783         test_buf = testdata_8bit;
0784     else if (len == 4)
0785         test_buf = testdata_4bit;
0786     else {
0787         pr_err("%s: Invalid bus_width %d\n",
0788                mmc_hostname(host), len);
0789         kfree(data_buf);
0790         return -EINVAL;
0791     }
0792 
0793     if (opcode == MMC_BUS_TEST_W)
0794         memcpy(data_buf, test_buf, len);
0795 
0796     mrq.cmd = &cmd;
0797     mrq.data = &data;
0798     cmd.opcode = opcode;
0799     cmd.arg = 0;
0800 
0801     /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
0802      * rely on callers to never use this with "native" calls for reading
0803      * CSD or CID.  Native versions of those commands use the R2 type,
0804      * not R1 plus a data block.
0805      */
0806     cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
0807 
0808     data.blksz = len;
0809     data.blocks = 1;
0810     if (opcode == MMC_BUS_TEST_R)
0811         data.flags = MMC_DATA_READ;
0812     else
0813         data.flags = MMC_DATA_WRITE;
0814 
0815     data.sg = &sg;
0816     data.sg_len = 1;
0817     mmc_set_data_timeout(&data, card);
0818     sg_init_one(&sg, data_buf, len);
0819     mmc_wait_for_req(host, &mrq);
0820     err = 0;
0821     if (opcode == MMC_BUS_TEST_R) {
0822         for (i = 0; i < len / 4; i++)
0823             if ((test_buf[i] ^ data_buf[i]) != 0xff) {
0824                 err = -EIO;
0825                 break;
0826             }
0827     }
0828     kfree(data_buf);
0829 
0830     if (cmd.error)
0831         return cmd.error;
0832     if (data.error)
0833         return data.error;
0834 
0835     return err;
0836 }
0837 
0838 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
0839 {
0840     int width;
0841 
0842     if (bus_width == MMC_BUS_WIDTH_8)
0843         width = 8;
0844     else if (bus_width == MMC_BUS_WIDTH_4)
0845         width = 4;
0846     else if (bus_width == MMC_BUS_WIDTH_1)
0847         return 0; /* no need for test */
0848     else
0849         return -EINVAL;
0850 
0851     /*
0852      * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
0853      * is a problem.  This improves chances that the test will work.
0854      */
0855     mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
0856     return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
0857 }
0858 
0859 static int mmc_send_hpi_cmd(struct mmc_card *card)
0860 {
0861     unsigned int busy_timeout_ms = card->ext_csd.out_of_int_time;
0862     struct mmc_host *host = card->host;
0863     bool use_r1b_resp = false;
0864     struct mmc_command cmd = {};
0865     int err;
0866 
0867     cmd.opcode = card->ext_csd.hpi_cmd;
0868     cmd.arg = card->rca << 16 | 1;
0869     cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
0870 
0871     if (cmd.opcode == MMC_STOP_TRANSMISSION)
0872         use_r1b_resp = mmc_prepare_busy_cmd(host, &cmd,
0873                             busy_timeout_ms);
0874 
0875     err = mmc_wait_for_cmd(host, &cmd, 0);
0876     if (err) {
0877         pr_warn("%s: HPI error %d. Command response %#x\n",
0878             mmc_hostname(host), err, cmd.resp[0]);
0879         return err;
0880     }
0881 
0882     /* No need to poll when using HW busy detection. */
0883     if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp)
0884         return 0;
0885 
0886     /* Let's poll to find out when the HPI request completes. */
0887     return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
0888 }
0889 
0890 /**
0891  *  mmc_interrupt_hpi - Issue for High priority Interrupt
0892  *  @card: the MMC card associated with the HPI transfer
0893  *
0894  *  Issued High Priority Interrupt, and check for card status
0895  *  until out-of prg-state.
0896  */
0897 static int mmc_interrupt_hpi(struct mmc_card *card)
0898 {
0899     int err;
0900     u32 status;
0901 
0902     if (!card->ext_csd.hpi_en) {
0903         pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
0904         return 1;
0905     }
0906 
0907     err = mmc_send_status(card, &status);
0908     if (err) {
0909         pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
0910         goto out;
0911     }
0912 
0913     switch (R1_CURRENT_STATE(status)) {
0914     case R1_STATE_IDLE:
0915     case R1_STATE_READY:
0916     case R1_STATE_STBY:
0917     case R1_STATE_TRAN:
0918         /*
0919          * In idle and transfer states, HPI is not needed and the caller
0920          * can issue the next intended command immediately
0921          */
0922         goto out;
0923     case R1_STATE_PRG:
0924         break;
0925     default:
0926         /* In all other states, it's illegal to issue HPI */
0927         pr_debug("%s: HPI cannot be sent. Card state=%d\n",
0928             mmc_hostname(card->host), R1_CURRENT_STATE(status));
0929         err = -EINVAL;
0930         goto out;
0931     }
0932 
0933     err = mmc_send_hpi_cmd(card);
0934 out:
0935     return err;
0936 }
0937 
0938 int mmc_can_ext_csd(struct mmc_card *card)
0939 {
0940     return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
0941 }
0942 
0943 static int mmc_read_bkops_status(struct mmc_card *card)
0944 {
0945     int err;
0946     u8 *ext_csd;
0947 
0948     err = mmc_get_ext_csd(card, &ext_csd);
0949     if (err)
0950         return err;
0951 
0952     card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
0953     card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
0954     kfree(ext_csd);
0955     return 0;
0956 }
0957 
0958 /**
0959  *  mmc_run_bkops - Run BKOPS for supported cards
0960  *  @card: MMC card to run BKOPS for
0961  *
0962  *  Run background operations synchronously for cards having manual BKOPS
0963  *  enabled and in case it reports urgent BKOPS level.
0964 */
0965 void mmc_run_bkops(struct mmc_card *card)
0966 {
0967     int err;
0968 
0969     if (!card->ext_csd.man_bkops_en)
0970         return;
0971 
0972     err = mmc_read_bkops_status(card);
0973     if (err) {
0974         pr_err("%s: Failed to read bkops status: %d\n",
0975                mmc_hostname(card->host), err);
0976         return;
0977     }
0978 
0979     if (!card->ext_csd.raw_bkops_status ||
0980         card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2)
0981         return;
0982 
0983     mmc_retune_hold(card->host);
0984 
0985     /*
0986      * For urgent BKOPS status, LEVEL_2 and higher, let's execute
0987      * synchronously. Future wise, we may consider to start BKOPS, for less
0988      * urgent levels by using an asynchronous background task, when idle.
0989      */
0990     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
0991              EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS);
0992     /*
0993      * If the BKOPS timed out, the card is probably still busy in the
0994      * R1_STATE_PRG. Rather than continue to wait, let's try to abort
0995      * it with a HPI command to get back into R1_STATE_TRAN.
0996      */
0997     if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
0998         pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host));
0999     else if (err)
1000         pr_warn("%s: Error %d running bkops\n",
1001             mmc_hostname(card->host), err);
1002 
1003     mmc_retune_release(card->host);
1004 }
1005 EXPORT_SYMBOL(mmc_run_bkops);
1006 
1007 static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
1008 {
1009     u8 val = enable ? EXT_CSD_CMDQ_MODE_ENABLED : 0;
1010     int err;
1011 
1012     if (!card->ext_csd.cmdq_support)
1013         return -EOPNOTSUPP;
1014 
1015     err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ_MODE_EN,
1016              val, card->ext_csd.generic_cmd6_time);
1017     if (!err)
1018         card->ext_csd.cmdq_en = enable;
1019 
1020     return err;
1021 }
1022 
1023 int mmc_cmdq_enable(struct mmc_card *card)
1024 {
1025     return mmc_cmdq_switch(card, true);
1026 }
1027 EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
1028 
1029 int mmc_cmdq_disable(struct mmc_card *card)
1030 {
1031     return mmc_cmdq_switch(card, false);
1032 }
1033 EXPORT_SYMBOL_GPL(mmc_cmdq_disable);
1034 
1035 int mmc_sanitize(struct mmc_card *card, unsigned int timeout_ms)
1036 {
1037     struct mmc_host *host = card->host;
1038     int err;
1039 
1040     if (!mmc_can_sanitize(card)) {
1041         pr_warn("%s: Sanitize not supported\n", mmc_hostname(host));
1042         return -EOPNOTSUPP;
1043     }
1044 
1045     if (!timeout_ms)
1046         timeout_ms = MMC_SANITIZE_TIMEOUT_MS;
1047 
1048     pr_debug("%s: Sanitize in progress...\n", mmc_hostname(host));
1049 
1050     mmc_retune_hold(host);
1051 
1052     err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_SANITIZE_START,
1053                1, timeout_ms, 0, true, false, 0);
1054     if (err)
1055         pr_err("%s: Sanitize failed err=%d\n", mmc_hostname(host), err);
1056 
1057     /*
1058      * If the sanitize operation timed out, the card is probably still busy
1059      * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort
1060      * it with a HPI command to get back into R1_STATE_TRAN.
1061      */
1062     if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
1063         pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
1064 
1065     mmc_retune_release(host);
1066 
1067     pr_debug("%s: Sanitize completed\n", mmc_hostname(host));
1068     return err;
1069 }
1070 EXPORT_SYMBOL_GPL(mmc_sanitize);