Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Synopsys DesignWare Multimedia Card Interface driver
0004  *  (Based on NXP driver for lpc 31xx)
0005  *
0006  * Copyright (C) 2009 NXP Semiconductors
0007  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
0008  */
0009 
0010 #include <linux/blkdev.h>
0011 #include <linux/clk.h>
0012 #include <linux/debugfs.h>
0013 #include <linux/device.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/init.h>
0017 #include <linux/interrupt.h>
0018 #include <linux/iopoll.h>
0019 #include <linux/ioport.h>
0020 #include <linux/ktime.h>
0021 #include <linux/module.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/prandom.h>
0025 #include <linux/seq_file.h>
0026 #include <linux/slab.h>
0027 #include <linux/stat.h>
0028 #include <linux/delay.h>
0029 #include <linux/irq.h>
0030 #include <linux/mmc/card.h>
0031 #include <linux/mmc/host.h>
0032 #include <linux/mmc/mmc.h>
0033 #include <linux/mmc/sd.h>
0034 #include <linux/mmc/sdio.h>
0035 #include <linux/bitops.h>
0036 #include <linux/regulator/consumer.h>
0037 #include <linux/of.h>
0038 #include <linux/of_gpio.h>
0039 #include <linux/mmc/slot-gpio.h>
0040 
0041 #include "dw_mmc.h"
0042 
0043 /* Common flag combinations */
0044 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
0045                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
0046                  SDMMC_INT_EBE | SDMMC_INT_HLE)
0047 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
0048                  SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
0049 #define DW_MCI_ERROR_FLAGS  (DW_MCI_DATA_ERROR_FLAGS | \
0050                  DW_MCI_CMD_ERROR_FLAGS)
0051 #define DW_MCI_SEND_STATUS  1
0052 #define DW_MCI_RECV_STATUS  2
0053 #define DW_MCI_DMA_THRESHOLD    16
0054 
0055 #define DW_MCI_FREQ_MAX 200000000   /* unit: HZ */
0056 #define DW_MCI_FREQ_MIN 100000      /* unit: HZ */
0057 
0058 #define IDMAC_INT_CLR       (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
0059                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
0060                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
0061                  SDMMC_IDMAC_INT_TI)
0062 
0063 #define DESC_RING_BUF_SZ    PAGE_SIZE
0064 
0065 struct idmac_desc_64addr {
0066     u32     des0;   /* Control Descriptor */
0067 #define IDMAC_OWN_CLR64(x) \
0068     !((x) & cpu_to_le32(IDMAC_DES0_OWN))
0069 
0070     u32     des1;   /* Reserved */
0071 
0072     u32     des2;   /*Buffer sizes */
0073 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
0074     ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
0075      ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
0076 
0077     u32     des3;   /* Reserved */
0078 
0079     u32     des4;   /* Lower 32-bits of Buffer Address Pointer 1*/
0080     u32     des5;   /* Upper 32-bits of Buffer Address Pointer 1*/
0081 
0082     u32     des6;   /* Lower 32-bits of Next Descriptor Address */
0083     u32     des7;   /* Upper 32-bits of Next Descriptor Address */
0084 };
0085 
0086 struct idmac_desc {
0087     __le32      des0;   /* Control Descriptor */
0088 #define IDMAC_DES0_DIC  BIT(1)
0089 #define IDMAC_DES0_LD   BIT(2)
0090 #define IDMAC_DES0_FD   BIT(3)
0091 #define IDMAC_DES0_CH   BIT(4)
0092 #define IDMAC_DES0_ER   BIT(5)
0093 #define IDMAC_DES0_CES  BIT(30)
0094 #define IDMAC_DES0_OWN  BIT(31)
0095 
0096     __le32      des1;   /* Buffer sizes */
0097 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
0098     ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
0099 
0100     __le32      des2;   /* buffer 1 physical address */
0101 
0102     __le32      des3;   /* buffer 2 physical address */
0103 };
0104 
0105 /* Each descriptor can transfer up to 4KB of data in chained mode */
0106 #define DW_MCI_DESC_DATA_LENGTH 0x1000
0107 
0108 #if defined(CONFIG_DEBUG_FS)
0109 static int dw_mci_req_show(struct seq_file *s, void *v)
0110 {
0111     struct dw_mci_slot *slot = s->private;
0112     struct mmc_request *mrq;
0113     struct mmc_command *cmd;
0114     struct mmc_command *stop;
0115     struct mmc_data *data;
0116 
0117     /* Make sure we get a consistent snapshot */
0118     spin_lock_bh(&slot->host->lock);
0119     mrq = slot->mrq;
0120 
0121     if (mrq) {
0122         cmd = mrq->cmd;
0123         data = mrq->data;
0124         stop = mrq->stop;
0125 
0126         if (cmd)
0127             seq_printf(s,
0128                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
0129                    cmd->opcode, cmd->arg, cmd->flags,
0130                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
0131                    cmd->resp[2], cmd->error);
0132         if (data)
0133             seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
0134                    data->bytes_xfered, data->blocks,
0135                    data->blksz, data->flags, data->error);
0136         if (stop)
0137             seq_printf(s,
0138                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
0139                    stop->opcode, stop->arg, stop->flags,
0140                    stop->resp[0], stop->resp[1], stop->resp[2],
0141                    stop->resp[2], stop->error);
0142     }
0143 
0144     spin_unlock_bh(&slot->host->lock);
0145 
0146     return 0;
0147 }
0148 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
0149 
0150 static int dw_mci_regs_show(struct seq_file *s, void *v)
0151 {
0152     struct dw_mci *host = s->private;
0153 
0154     pm_runtime_get_sync(host->dev);
0155 
0156     seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
0157     seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
0158     seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
0159     seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
0160     seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
0161     seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
0162 
0163     pm_runtime_put_autosuspend(host->dev);
0164 
0165     return 0;
0166 }
0167 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
0168 
0169 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
0170 {
0171     struct mmc_host *mmc = slot->mmc;
0172     struct dw_mci *host = slot->host;
0173     struct dentry *root;
0174 
0175     root = mmc->debugfs_root;
0176     if (!root)
0177         return;
0178 
0179     debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
0180     debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
0181     debugfs_create_u32("state", S_IRUSR, root, &host->state);
0182     debugfs_create_xul("pending_events", S_IRUSR, root,
0183                &host->pending_events);
0184     debugfs_create_xul("completed_events", S_IRUSR, root,
0185                &host->completed_events);
0186 #ifdef CONFIG_FAULT_INJECTION
0187     fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc);
0188 #endif
0189 }
0190 #endif /* defined(CONFIG_DEBUG_FS) */
0191 
0192 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
0193 {
0194     u32 ctrl;
0195 
0196     ctrl = mci_readl(host, CTRL);
0197     ctrl |= reset;
0198     mci_writel(host, CTRL, ctrl);
0199 
0200     /* wait till resets clear */
0201     if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
0202                       !(ctrl & reset),
0203                       1, 500 * USEC_PER_MSEC)) {
0204         dev_err(host->dev,
0205             "Timeout resetting block (ctrl reset %#x)\n",
0206             ctrl & reset);
0207         return false;
0208     }
0209 
0210     return true;
0211 }
0212 
0213 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
0214 {
0215     u32 status;
0216 
0217     /*
0218      * Databook says that before issuing a new data transfer command
0219      * we need to check to see if the card is busy.  Data transfer commands
0220      * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
0221      *
0222      * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
0223      * expected.
0224      */
0225     if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
0226         !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
0227         if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
0228                           status,
0229                           !(status & SDMMC_STATUS_BUSY),
0230                           10, 500 * USEC_PER_MSEC))
0231             dev_err(host->dev, "Busy; trying anyway\n");
0232     }
0233 }
0234 
0235 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
0236 {
0237     struct dw_mci *host = slot->host;
0238     unsigned int cmd_status = 0;
0239 
0240     mci_writel(host, CMDARG, arg);
0241     wmb(); /* drain writebuffer */
0242     dw_mci_wait_while_busy(host, cmd);
0243     mci_writel(host, CMD, SDMMC_CMD_START | cmd);
0244 
0245     if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
0246                       !(cmd_status & SDMMC_CMD_START),
0247                       1, 500 * USEC_PER_MSEC))
0248         dev_err(&slot->mmc->class_dev,
0249             "Timeout sending command (cmd %#x arg %#x status %#x)\n",
0250             cmd, arg, cmd_status);
0251 }
0252 
0253 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
0254 {
0255     struct dw_mci_slot *slot = mmc_priv(mmc);
0256     struct dw_mci *host = slot->host;
0257     u32 cmdr;
0258 
0259     cmd->error = -EINPROGRESS;
0260     cmdr = cmd->opcode;
0261 
0262     if (cmd->opcode == MMC_STOP_TRANSMISSION ||
0263         cmd->opcode == MMC_GO_IDLE_STATE ||
0264         cmd->opcode == MMC_GO_INACTIVE_STATE ||
0265         (cmd->opcode == SD_IO_RW_DIRECT &&
0266          ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
0267         cmdr |= SDMMC_CMD_STOP;
0268     else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
0269         cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
0270 
0271     if (cmd->opcode == SD_SWITCH_VOLTAGE) {
0272         u32 clk_en_a;
0273 
0274         /* Special bit makes CMD11 not die */
0275         cmdr |= SDMMC_CMD_VOLT_SWITCH;
0276 
0277         /* Change state to continue to handle CMD11 weirdness */
0278         WARN_ON(slot->host->state != STATE_SENDING_CMD);
0279         slot->host->state = STATE_SENDING_CMD11;
0280 
0281         /*
0282          * We need to disable low power mode (automatic clock stop)
0283          * while doing voltage switch so we don't confuse the card,
0284          * since stopping the clock is a specific part of the UHS
0285          * voltage change dance.
0286          *
0287          * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
0288          * unconditionally turned back on in dw_mci_setup_bus() if it's
0289          * ever called with a non-zero clock.  That shouldn't happen
0290          * until the voltage change is all done.
0291          */
0292         clk_en_a = mci_readl(host, CLKENA);
0293         clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
0294         mci_writel(host, CLKENA, clk_en_a);
0295         mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
0296                  SDMMC_CMD_PRV_DAT_WAIT, 0);
0297     }
0298 
0299     if (cmd->flags & MMC_RSP_PRESENT) {
0300         /* We expect a response, so set this bit */
0301         cmdr |= SDMMC_CMD_RESP_EXP;
0302         if (cmd->flags & MMC_RSP_136)
0303             cmdr |= SDMMC_CMD_RESP_LONG;
0304     }
0305 
0306     if (cmd->flags & MMC_RSP_CRC)
0307         cmdr |= SDMMC_CMD_RESP_CRC;
0308 
0309     if (cmd->data) {
0310         cmdr |= SDMMC_CMD_DAT_EXP;
0311         if (cmd->data->flags & MMC_DATA_WRITE)
0312             cmdr |= SDMMC_CMD_DAT_WR;
0313     }
0314 
0315     if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
0316         cmdr |= SDMMC_CMD_USE_HOLD_REG;
0317 
0318     return cmdr;
0319 }
0320 
0321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
0322 {
0323     struct mmc_command *stop;
0324     u32 cmdr;
0325 
0326     if (!cmd->data)
0327         return 0;
0328 
0329     stop = &host->stop_abort;
0330     cmdr = cmd->opcode;
0331     memset(stop, 0, sizeof(struct mmc_command));
0332 
0333     if (cmdr == MMC_READ_SINGLE_BLOCK ||
0334         cmdr == MMC_READ_MULTIPLE_BLOCK ||
0335         cmdr == MMC_WRITE_BLOCK ||
0336         cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
0337         cmdr == MMC_SEND_TUNING_BLOCK ||
0338         cmdr == MMC_SEND_TUNING_BLOCK_HS200 ||
0339         cmdr == MMC_GEN_CMD) {
0340         stop->opcode = MMC_STOP_TRANSMISSION;
0341         stop->arg = 0;
0342         stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
0343     } else if (cmdr == SD_IO_RW_EXTENDED) {
0344         stop->opcode = SD_IO_RW_DIRECT;
0345         stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
0346                  ((cmd->arg >> 28) & 0x7);
0347         stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
0348     } else {
0349         return 0;
0350     }
0351 
0352     cmdr = stop->opcode | SDMMC_CMD_STOP |
0353         SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
0354 
0355     if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
0356         cmdr |= SDMMC_CMD_USE_HOLD_REG;
0357 
0358     return cmdr;
0359 }
0360 
0361 static inline void dw_mci_set_cto(struct dw_mci *host)
0362 {
0363     unsigned int cto_clks;
0364     unsigned int cto_div;
0365     unsigned int cto_ms;
0366     unsigned long irqflags;
0367 
0368     cto_clks = mci_readl(host, TMOUT) & 0xff;
0369     cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
0370     if (cto_div == 0)
0371         cto_div = 1;
0372 
0373     cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
0374                   host->bus_hz);
0375 
0376     /* add a bit spare time */
0377     cto_ms += 10;
0378 
0379     /*
0380      * The durations we're working with are fairly short so we have to be
0381      * extra careful about synchronization here.  Specifically in hardware a
0382      * command timeout is _at most_ 5.1 ms, so that means we expect an
0383      * interrupt (either command done or timeout) to come rather quickly
0384      * after the mci_writel.  ...but just in case we have a long interrupt
0385      * latency let's add a bit of paranoia.
0386      *
0387      * In general we'll assume that at least an interrupt will be asserted
0388      * in hardware by the time the cto_timer runs.  ...and if it hasn't
0389      * been asserted in hardware by that time then we'll assume it'll never
0390      * come.
0391      */
0392     spin_lock_irqsave(&host->irq_lock, irqflags);
0393     if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
0394         mod_timer(&host->cto_timer,
0395             jiffies + msecs_to_jiffies(cto_ms) + 1);
0396     spin_unlock_irqrestore(&host->irq_lock, irqflags);
0397 }
0398 
0399 static void dw_mci_start_command(struct dw_mci *host,
0400                  struct mmc_command *cmd, u32 cmd_flags)
0401 {
0402     host->cmd = cmd;
0403     dev_vdbg(host->dev,
0404          "start command: ARGR=0x%08x CMDR=0x%08x\n",
0405          cmd->arg, cmd_flags);
0406 
0407     mci_writel(host, CMDARG, cmd->arg);
0408     wmb(); /* drain writebuffer */
0409     dw_mci_wait_while_busy(host, cmd_flags);
0410 
0411     mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
0412 
0413     /* response expected command only */
0414     if (cmd_flags & SDMMC_CMD_RESP_EXP)
0415         dw_mci_set_cto(host);
0416 }
0417 
0418 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
0419 {
0420     struct mmc_command *stop = &host->stop_abort;
0421 
0422     dw_mci_start_command(host, stop, host->stop_cmdr);
0423 }
0424 
0425 /* DMA interface functions */
0426 static void dw_mci_stop_dma(struct dw_mci *host)
0427 {
0428     if (host->using_dma) {
0429         host->dma_ops->stop(host);
0430         host->dma_ops->cleanup(host);
0431     }
0432 
0433     /* Data transfer was stopped by the interrupt handler */
0434     set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
0435 }
0436 
0437 static void dw_mci_dma_cleanup(struct dw_mci *host)
0438 {
0439     struct mmc_data *data = host->data;
0440 
0441     if (data && data->host_cookie == COOKIE_MAPPED) {
0442         dma_unmap_sg(host->dev,
0443                  data->sg,
0444                  data->sg_len,
0445                  mmc_get_dma_dir(data));
0446         data->host_cookie = COOKIE_UNMAPPED;
0447     }
0448 }
0449 
0450 static void dw_mci_idmac_reset(struct dw_mci *host)
0451 {
0452     u32 bmod = mci_readl(host, BMOD);
0453     /* Software reset of DMA */
0454     bmod |= SDMMC_IDMAC_SWRESET;
0455     mci_writel(host, BMOD, bmod);
0456 }
0457 
0458 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
0459 {
0460     u32 temp;
0461 
0462     /* Disable and reset the IDMAC interface */
0463     temp = mci_readl(host, CTRL);
0464     temp &= ~SDMMC_CTRL_USE_IDMAC;
0465     temp |= SDMMC_CTRL_DMA_RESET;
0466     mci_writel(host, CTRL, temp);
0467 
0468     /* Stop the IDMAC running */
0469     temp = mci_readl(host, BMOD);
0470     temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
0471     temp |= SDMMC_IDMAC_SWRESET;
0472     mci_writel(host, BMOD, temp);
0473 }
0474 
0475 static void dw_mci_dmac_complete_dma(void *arg)
0476 {
0477     struct dw_mci *host = arg;
0478     struct mmc_data *data = host->data;
0479 
0480     dev_vdbg(host->dev, "DMA complete\n");
0481 
0482     if ((host->use_dma == TRANS_MODE_EDMAC) &&
0483         data && (data->flags & MMC_DATA_READ))
0484         /* Invalidate cache after read */
0485         dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
0486                     data->sg,
0487                     data->sg_len,
0488                     DMA_FROM_DEVICE);
0489 
0490     host->dma_ops->cleanup(host);
0491 
0492     /*
0493      * If the card was removed, data will be NULL. No point in trying to
0494      * send the stop command or waiting for NBUSY in this case.
0495      */
0496     if (data) {
0497         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
0498         tasklet_schedule(&host->tasklet);
0499     }
0500 }
0501 
0502 static int dw_mci_idmac_init(struct dw_mci *host)
0503 {
0504     int i;
0505 
0506     if (host->dma_64bit_address == 1) {
0507         struct idmac_desc_64addr *p;
0508         /* Number of descriptors in the ring buffer */
0509         host->ring_size =
0510             DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
0511 
0512         /* Forward link the descriptor list */
0513         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
0514                                 i++, p++) {
0515             p->des6 = (host->sg_dma +
0516                     (sizeof(struct idmac_desc_64addr) *
0517                             (i + 1))) & 0xffffffff;
0518 
0519             p->des7 = (u64)(host->sg_dma +
0520                     (sizeof(struct idmac_desc_64addr) *
0521                             (i + 1))) >> 32;
0522             /* Initialize reserved and buffer size fields to "0" */
0523             p->des0 = 0;
0524             p->des1 = 0;
0525             p->des2 = 0;
0526             p->des3 = 0;
0527         }
0528 
0529         /* Set the last descriptor as the end-of-ring descriptor */
0530         p->des6 = host->sg_dma & 0xffffffff;
0531         p->des7 = (u64)host->sg_dma >> 32;
0532         p->des0 = IDMAC_DES0_ER;
0533 
0534     } else {
0535         struct idmac_desc *p;
0536         /* Number of descriptors in the ring buffer */
0537         host->ring_size =
0538             DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
0539 
0540         /* Forward link the descriptor list */
0541         for (i = 0, p = host->sg_cpu;
0542              i < host->ring_size - 1;
0543              i++, p++) {
0544             p->des3 = cpu_to_le32(host->sg_dma +
0545                     (sizeof(struct idmac_desc) * (i + 1)));
0546             p->des0 = 0;
0547             p->des1 = 0;
0548         }
0549 
0550         /* Set the last descriptor as the end-of-ring descriptor */
0551         p->des3 = cpu_to_le32(host->sg_dma);
0552         p->des0 = cpu_to_le32(IDMAC_DES0_ER);
0553     }
0554 
0555     dw_mci_idmac_reset(host);
0556 
0557     if (host->dma_64bit_address == 1) {
0558         /* Mask out interrupts - get Tx & Rx complete only */
0559         mci_writel(host, IDSTS64, IDMAC_INT_CLR);
0560         mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
0561                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
0562 
0563         /* Set the descriptor base address */
0564         mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
0565         mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
0566 
0567     } else {
0568         /* Mask out interrupts - get Tx & Rx complete only */
0569         mci_writel(host, IDSTS, IDMAC_INT_CLR);
0570         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
0571                 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
0572 
0573         /* Set the descriptor base address */
0574         mci_writel(host, DBADDR, host->sg_dma);
0575     }
0576 
0577     return 0;
0578 }
0579 
0580 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
0581                      struct mmc_data *data,
0582                      unsigned int sg_len)
0583 {
0584     unsigned int desc_len;
0585     struct idmac_desc_64addr *desc_first, *desc_last, *desc;
0586     u32 val;
0587     int i;
0588 
0589     desc_first = desc_last = desc = host->sg_cpu;
0590 
0591     for (i = 0; i < sg_len; i++) {
0592         unsigned int length = sg_dma_len(&data->sg[i]);
0593 
0594         u64 mem_addr = sg_dma_address(&data->sg[i]);
0595 
0596         for ( ; length ; desc++) {
0597             desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
0598                    length : DW_MCI_DESC_DATA_LENGTH;
0599 
0600             length -= desc_len;
0601 
0602             /*
0603              * Wait for the former clear OWN bit operation
0604              * of IDMAC to make sure that this descriptor
0605              * isn't still owned by IDMAC as IDMAC's write
0606              * ops and CPU's read ops are asynchronous.
0607              */
0608             if (readl_poll_timeout_atomic(&desc->des0, val,
0609                         !(val & IDMAC_DES0_OWN),
0610                         10, 100 * USEC_PER_MSEC))
0611                 goto err_own_bit;
0612 
0613             /*
0614              * Set the OWN bit and disable interrupts
0615              * for this descriptor
0616              */
0617             desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
0618                         IDMAC_DES0_CH;
0619 
0620             /* Buffer length */
0621             IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
0622 
0623             /* Physical address to DMA to/from */
0624             desc->des4 = mem_addr & 0xffffffff;
0625             desc->des5 = mem_addr >> 32;
0626 
0627             /* Update physical address for the next desc */
0628             mem_addr += desc_len;
0629 
0630             /* Save pointer to the last descriptor */
0631             desc_last = desc;
0632         }
0633     }
0634 
0635     /* Set first descriptor */
0636     desc_first->des0 |= IDMAC_DES0_FD;
0637 
0638     /* Set last descriptor */
0639     desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
0640     desc_last->des0 |= IDMAC_DES0_LD;
0641 
0642     return 0;
0643 err_own_bit:
0644     /* restore the descriptor chain as it's polluted */
0645     dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
0646     memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
0647     dw_mci_idmac_init(host);
0648     return -EINVAL;
0649 }
0650 
0651 
0652 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
0653                      struct mmc_data *data,
0654                      unsigned int sg_len)
0655 {
0656     unsigned int desc_len;
0657     struct idmac_desc *desc_first, *desc_last, *desc;
0658     u32 val;
0659     int i;
0660 
0661     desc_first = desc_last = desc = host->sg_cpu;
0662 
0663     for (i = 0; i < sg_len; i++) {
0664         unsigned int length = sg_dma_len(&data->sg[i]);
0665 
0666         u32 mem_addr = sg_dma_address(&data->sg[i]);
0667 
0668         for ( ; length ; desc++) {
0669             desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
0670                    length : DW_MCI_DESC_DATA_LENGTH;
0671 
0672             length -= desc_len;
0673 
0674             /*
0675              * Wait for the former clear OWN bit operation
0676              * of IDMAC to make sure that this descriptor
0677              * isn't still owned by IDMAC as IDMAC's write
0678              * ops and CPU's read ops are asynchronous.
0679              */
0680             if (readl_poll_timeout_atomic(&desc->des0, val,
0681                               IDMAC_OWN_CLR64(val),
0682                               10,
0683                               100 * USEC_PER_MSEC))
0684                 goto err_own_bit;
0685 
0686             /*
0687              * Set the OWN bit and disable interrupts
0688              * for this descriptor
0689              */
0690             desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
0691                          IDMAC_DES0_DIC |
0692                          IDMAC_DES0_CH);
0693 
0694             /* Buffer length */
0695             IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
0696 
0697             /* Physical address to DMA to/from */
0698             desc->des2 = cpu_to_le32(mem_addr);
0699 
0700             /* Update physical address for the next desc */
0701             mem_addr += desc_len;
0702 
0703             /* Save pointer to the last descriptor */
0704             desc_last = desc;
0705         }
0706     }
0707 
0708     /* Set first descriptor */
0709     desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
0710 
0711     /* Set last descriptor */
0712     desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
0713                        IDMAC_DES0_DIC));
0714     desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
0715 
0716     return 0;
0717 err_own_bit:
0718     /* restore the descriptor chain as it's polluted */
0719     dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
0720     memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
0721     dw_mci_idmac_init(host);
0722     return -EINVAL;
0723 }
0724 
0725 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
0726 {
0727     u32 temp;
0728     int ret;
0729 
0730     if (host->dma_64bit_address == 1)
0731         ret = dw_mci_prepare_desc64(host, host->data, sg_len);
0732     else
0733         ret = dw_mci_prepare_desc32(host, host->data, sg_len);
0734 
0735     if (ret)
0736         goto out;
0737 
0738     /* drain writebuffer */
0739     wmb();
0740 
0741     /* Make sure to reset DMA in case we did PIO before this */
0742     dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
0743     dw_mci_idmac_reset(host);
0744 
0745     /* Select IDMAC interface */
0746     temp = mci_readl(host, CTRL);
0747     temp |= SDMMC_CTRL_USE_IDMAC;
0748     mci_writel(host, CTRL, temp);
0749 
0750     /* drain writebuffer */
0751     wmb();
0752 
0753     /* Enable the IDMAC */
0754     temp = mci_readl(host, BMOD);
0755     temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
0756     mci_writel(host, BMOD, temp);
0757 
0758     /* Start it running */
0759     mci_writel(host, PLDMND, 1);
0760 
0761 out:
0762     return ret;
0763 }
0764 
0765 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
0766     .init = dw_mci_idmac_init,
0767     .start = dw_mci_idmac_start_dma,
0768     .stop = dw_mci_idmac_stop_dma,
0769     .complete = dw_mci_dmac_complete_dma,
0770     .cleanup = dw_mci_dma_cleanup,
0771 };
0772 
0773 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
0774 {
0775     dmaengine_terminate_async(host->dms->ch);
0776 }
0777 
0778 static int dw_mci_edmac_start_dma(struct dw_mci *host,
0779                         unsigned int sg_len)
0780 {
0781     struct dma_slave_config cfg;
0782     struct dma_async_tx_descriptor *desc = NULL;
0783     struct scatterlist *sgl = host->data->sg;
0784     static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
0785     u32 sg_elems = host->data->sg_len;
0786     u32 fifoth_val;
0787     u32 fifo_offset = host->fifo_reg - host->regs;
0788     int ret = 0;
0789 
0790     /* Set external dma config: burst size, burst width */
0791     memset(&cfg, 0, sizeof(cfg));
0792     cfg.dst_addr = host->phy_regs + fifo_offset;
0793     cfg.src_addr = cfg.dst_addr;
0794     cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0795     cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0796 
0797     /* Match burst msize with external dma config */
0798     fifoth_val = mci_readl(host, FIFOTH);
0799     cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
0800     cfg.src_maxburst = cfg.dst_maxburst;
0801 
0802     if (host->data->flags & MMC_DATA_WRITE)
0803         cfg.direction = DMA_MEM_TO_DEV;
0804     else
0805         cfg.direction = DMA_DEV_TO_MEM;
0806 
0807     ret = dmaengine_slave_config(host->dms->ch, &cfg);
0808     if (ret) {
0809         dev_err(host->dev, "Failed to config edmac.\n");
0810         return -EBUSY;
0811     }
0812 
0813     desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
0814                        sg_len, cfg.direction,
0815                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0816     if (!desc) {
0817         dev_err(host->dev, "Can't prepare slave sg.\n");
0818         return -EBUSY;
0819     }
0820 
0821     /* Set dw_mci_dmac_complete_dma as callback */
0822     desc->callback = dw_mci_dmac_complete_dma;
0823     desc->callback_param = (void *)host;
0824     dmaengine_submit(desc);
0825 
0826     /* Flush cache before write */
0827     if (host->data->flags & MMC_DATA_WRITE)
0828         dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
0829                        sg_elems, DMA_TO_DEVICE);
0830 
0831     dma_async_issue_pending(host->dms->ch);
0832 
0833     return 0;
0834 }
0835 
0836 static int dw_mci_edmac_init(struct dw_mci *host)
0837 {
0838     /* Request external dma channel */
0839     host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
0840     if (!host->dms)
0841         return -ENOMEM;
0842 
0843     host->dms->ch = dma_request_chan(host->dev, "rx-tx");
0844     if (IS_ERR(host->dms->ch)) {
0845         int ret = PTR_ERR(host->dms->ch);
0846 
0847         dev_err(host->dev, "Failed to get external DMA channel.\n");
0848         kfree(host->dms);
0849         host->dms = NULL;
0850         return ret;
0851     }
0852 
0853     return 0;
0854 }
0855 
0856 static void dw_mci_edmac_exit(struct dw_mci *host)
0857 {
0858     if (host->dms) {
0859         if (host->dms->ch) {
0860             dma_release_channel(host->dms->ch);
0861             host->dms->ch = NULL;
0862         }
0863         kfree(host->dms);
0864         host->dms = NULL;
0865     }
0866 }
0867 
0868 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
0869     .init = dw_mci_edmac_init,
0870     .exit = dw_mci_edmac_exit,
0871     .start = dw_mci_edmac_start_dma,
0872     .stop = dw_mci_edmac_stop_dma,
0873     .complete = dw_mci_dmac_complete_dma,
0874     .cleanup = dw_mci_dma_cleanup,
0875 };
0876 
0877 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
0878                    struct mmc_data *data,
0879                    int cookie)
0880 {
0881     struct scatterlist *sg;
0882     unsigned int i, sg_len;
0883 
0884     if (data->host_cookie == COOKIE_PRE_MAPPED)
0885         return data->sg_len;
0886 
0887     /*
0888      * We don't do DMA on "complex" transfers, i.e. with
0889      * non-word-aligned buffers or lengths. Also, we don't bother
0890      * with all the DMA setup overhead for short transfers.
0891      */
0892     if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
0893         return -EINVAL;
0894 
0895     if (data->blksz & 3)
0896         return -EINVAL;
0897 
0898     for_each_sg(data->sg, sg, data->sg_len, i) {
0899         if (sg->offset & 3 || sg->length & 3)
0900             return -EINVAL;
0901     }
0902 
0903     sg_len = dma_map_sg(host->dev,
0904                 data->sg,
0905                 data->sg_len,
0906                 mmc_get_dma_dir(data));
0907     if (sg_len == 0)
0908         return -EINVAL;
0909 
0910     data->host_cookie = cookie;
0911 
0912     return sg_len;
0913 }
0914 
0915 static void dw_mci_pre_req(struct mmc_host *mmc,
0916                struct mmc_request *mrq)
0917 {
0918     struct dw_mci_slot *slot = mmc_priv(mmc);
0919     struct mmc_data *data = mrq->data;
0920 
0921     if (!slot->host->use_dma || !data)
0922         return;
0923 
0924     /* This data might be unmapped at this time */
0925     data->host_cookie = COOKIE_UNMAPPED;
0926 
0927     if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
0928                 COOKIE_PRE_MAPPED) < 0)
0929         data->host_cookie = COOKIE_UNMAPPED;
0930 }
0931 
0932 static void dw_mci_post_req(struct mmc_host *mmc,
0933                 struct mmc_request *mrq,
0934                 int err)
0935 {
0936     struct dw_mci_slot *slot = mmc_priv(mmc);
0937     struct mmc_data *data = mrq->data;
0938 
0939     if (!slot->host->use_dma || !data)
0940         return;
0941 
0942     if (data->host_cookie != COOKIE_UNMAPPED)
0943         dma_unmap_sg(slot->host->dev,
0944                  data->sg,
0945                  data->sg_len,
0946                  mmc_get_dma_dir(data));
0947     data->host_cookie = COOKIE_UNMAPPED;
0948 }
0949 
0950 static int dw_mci_get_cd(struct mmc_host *mmc)
0951 {
0952     int present;
0953     struct dw_mci_slot *slot = mmc_priv(mmc);
0954     struct dw_mci *host = slot->host;
0955     int gpio_cd = mmc_gpio_get_cd(mmc);
0956 
0957     /* Use platform get_cd function, else try onboard card detect */
0958     if (((mmc->caps & MMC_CAP_NEEDS_POLL)
0959                 || !mmc_card_is_removable(mmc))) {
0960         present = 1;
0961 
0962         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
0963             if (mmc->caps & MMC_CAP_NEEDS_POLL) {
0964                 dev_info(&mmc->class_dev,
0965                     "card is polling.\n");
0966             } else {
0967                 dev_info(&mmc->class_dev,
0968                     "card is non-removable.\n");
0969             }
0970             set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
0971         }
0972 
0973         return present;
0974     } else if (gpio_cd >= 0)
0975         present = gpio_cd;
0976     else
0977         present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
0978             == 0 ? 1 : 0;
0979 
0980     spin_lock_bh(&host->lock);
0981     if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
0982         dev_dbg(&mmc->class_dev, "card is present\n");
0983     else if (!present &&
0984             !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
0985         dev_dbg(&mmc->class_dev, "card is not present\n");
0986     spin_unlock_bh(&host->lock);
0987 
0988     return present;
0989 }
0990 
0991 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
0992 {
0993     unsigned int blksz = data->blksz;
0994     static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
0995     u32 fifo_width = 1 << host->data_shift;
0996     u32 blksz_depth = blksz / fifo_width, fifoth_val;
0997     u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
0998     int idx = ARRAY_SIZE(mszs) - 1;
0999 
1000     /* pio should ship this scenario */
1001     if (!host->use_dma)
1002         return;
1003 
1004     tx_wmark = (host->fifo_depth) / 2;
1005     tx_wmark_invers = host->fifo_depth - tx_wmark;
1006 
1007     /*
1008      * MSIZE is '1',
1009      * if blksz is not a multiple of the FIFO width
1010      */
1011     if (blksz % fifo_width)
1012         goto done;
1013 
1014     do {
1015         if (!((blksz_depth % mszs[idx]) ||
1016              (tx_wmark_invers % mszs[idx]))) {
1017             msize = idx;
1018             rx_wmark = mszs[idx] - 1;
1019             break;
1020         }
1021     } while (--idx > 0);
1022     /*
1023      * If idx is '0', it won't be tried
1024      * Thus, initial values are uesed
1025      */
1026 done:
1027     fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1028     mci_writel(host, FIFOTH, fifoth_val);
1029 }
1030 
1031 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1032 {
1033     unsigned int blksz = data->blksz;
1034     u32 blksz_depth, fifo_depth;
1035     u16 thld_size;
1036     u8 enable;
1037 
1038     /*
1039      * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1040      * in the FIFO region, so we really shouldn't access it).
1041      */
1042     if (host->verid < DW_MMC_240A ||
1043         (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1044         return;
1045 
1046     /*
1047      * Card write Threshold is introduced since 2.80a
1048      * It's used when HS400 mode is enabled.
1049      */
1050     if (data->flags & MMC_DATA_WRITE &&
1051         host->timing != MMC_TIMING_MMC_HS400)
1052         goto disable;
1053 
1054     if (data->flags & MMC_DATA_WRITE)
1055         enable = SDMMC_CARD_WR_THR_EN;
1056     else
1057         enable = SDMMC_CARD_RD_THR_EN;
1058 
1059     if (host->timing != MMC_TIMING_MMC_HS200 &&
1060         host->timing != MMC_TIMING_UHS_SDR104 &&
1061         host->timing != MMC_TIMING_MMC_HS400)
1062         goto disable;
1063 
1064     blksz_depth = blksz / (1 << host->data_shift);
1065     fifo_depth = host->fifo_depth;
1066 
1067     if (blksz_depth > fifo_depth)
1068         goto disable;
1069 
1070     /*
1071      * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1072      * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
1073      * Currently just choose blksz.
1074      */
1075     thld_size = blksz;
1076     mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1077     return;
1078 
1079 disable:
1080     mci_writel(host, CDTHRCTL, 0);
1081 }
1082 
1083 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1084 {
1085     unsigned long irqflags;
1086     int sg_len;
1087     u32 temp;
1088 
1089     host->using_dma = 0;
1090 
1091     /* If we don't have a channel, we can't do DMA */
1092     if (!host->use_dma)
1093         return -ENODEV;
1094 
1095     sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1096     if (sg_len < 0) {
1097         host->dma_ops->stop(host);
1098         return sg_len;
1099     }
1100 
1101     host->using_dma = 1;
1102 
1103     if (host->use_dma == TRANS_MODE_IDMAC)
1104         dev_vdbg(host->dev,
1105              "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1106              (unsigned long)host->sg_cpu,
1107              (unsigned long)host->sg_dma,
1108              sg_len);
1109 
1110     /*
1111      * Decide the MSIZE and RX/TX Watermark.
1112      * If current block size is same with previous size,
1113      * no need to update fifoth.
1114      */
1115     if (host->prev_blksz != data->blksz)
1116         dw_mci_adjust_fifoth(host, data);
1117 
1118     /* Enable the DMA interface */
1119     temp = mci_readl(host, CTRL);
1120     temp |= SDMMC_CTRL_DMA_ENABLE;
1121     mci_writel(host, CTRL, temp);
1122 
1123     /* Disable RX/TX IRQs, let DMA handle it */
1124     spin_lock_irqsave(&host->irq_lock, irqflags);
1125     temp = mci_readl(host, INTMASK);
1126     temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1127     mci_writel(host, INTMASK, temp);
1128     spin_unlock_irqrestore(&host->irq_lock, irqflags);
1129 
1130     if (host->dma_ops->start(host, sg_len)) {
1131         host->dma_ops->stop(host);
1132         /* We can't do DMA, try PIO for this one */
1133         dev_dbg(host->dev,
1134             "%s: fall back to PIO mode for current transfer\n",
1135             __func__);
1136         return -ENODEV;
1137     }
1138 
1139     return 0;
1140 }
1141 
1142 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1143 {
1144     unsigned long irqflags;
1145     int flags = SG_MITER_ATOMIC;
1146     u32 temp;
1147 
1148     data->error = -EINPROGRESS;
1149 
1150     WARN_ON(host->data);
1151     host->sg = NULL;
1152     host->data = data;
1153 
1154     if (data->flags & MMC_DATA_READ)
1155         host->dir_status = DW_MCI_RECV_STATUS;
1156     else
1157         host->dir_status = DW_MCI_SEND_STATUS;
1158 
1159     dw_mci_ctrl_thld(host, data);
1160 
1161     if (dw_mci_submit_data_dma(host, data)) {
1162         if (host->data->flags & MMC_DATA_READ)
1163             flags |= SG_MITER_TO_SG;
1164         else
1165             flags |= SG_MITER_FROM_SG;
1166 
1167         sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1168         host->sg = data->sg;
1169         host->part_buf_start = 0;
1170         host->part_buf_count = 0;
1171 
1172         mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1173 
1174         spin_lock_irqsave(&host->irq_lock, irqflags);
1175         temp = mci_readl(host, INTMASK);
1176         temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1177         mci_writel(host, INTMASK, temp);
1178         spin_unlock_irqrestore(&host->irq_lock, irqflags);
1179 
1180         temp = mci_readl(host, CTRL);
1181         temp &= ~SDMMC_CTRL_DMA_ENABLE;
1182         mci_writel(host, CTRL, temp);
1183 
1184         /*
1185          * Use the initial fifoth_val for PIO mode. If wm_algined
1186          * is set, we set watermark same as data size.
1187          * If next issued data may be transfered by DMA mode,
1188          * prev_blksz should be invalidated.
1189          */
1190         if (host->wm_aligned)
1191             dw_mci_adjust_fifoth(host, data);
1192         else
1193             mci_writel(host, FIFOTH, host->fifoth_val);
1194         host->prev_blksz = 0;
1195     } else {
1196         /*
1197          * Keep the current block size.
1198          * It will be used to decide whether to update
1199          * fifoth register next time.
1200          */
1201         host->prev_blksz = data->blksz;
1202     }
1203 }
1204 
1205 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1206 {
1207     struct dw_mci *host = slot->host;
1208     unsigned int clock = slot->clock;
1209     u32 div;
1210     u32 clk_en_a;
1211     u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1212 
1213     /* We must continue to set bit 28 in CMD until the change is complete */
1214     if (host->state == STATE_WAITING_CMD11_DONE)
1215         sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1216 
1217     slot->mmc->actual_clock = 0;
1218 
1219     if (!clock) {
1220         mci_writel(host, CLKENA, 0);
1221         mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1222     } else if (clock != host->current_speed || force_clkinit) {
1223         div = host->bus_hz / clock;
1224         if (host->bus_hz % clock && host->bus_hz > clock)
1225             /*
1226              * move the + 1 after the divide to prevent
1227              * over-clocking the card.
1228              */
1229             div += 1;
1230 
1231         div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1232 
1233         if ((clock != slot->__clk_old &&
1234             !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1235             force_clkinit) {
1236             /* Silent the verbose log if calling from PM context */
1237             if (!force_clkinit)
1238                 dev_info(&slot->mmc->class_dev,
1239                      "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1240                      slot->id, host->bus_hz, clock,
1241                      div ? ((host->bus_hz / div) >> 1) :
1242                      host->bus_hz, div);
1243 
1244             /*
1245              * If card is polling, display the message only
1246              * one time at boot time.
1247              */
1248             if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1249                     slot->mmc->f_min == clock)
1250                 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1251         }
1252 
1253         /* disable clock */
1254         mci_writel(host, CLKENA, 0);
1255         mci_writel(host, CLKSRC, 0);
1256 
1257         /* inform CIU */
1258         mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1259 
1260         /* set clock to desired speed */
1261         mci_writel(host, CLKDIV, div);
1262 
1263         /* inform CIU */
1264         mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1265 
1266         /* enable clock; only low power if no SDIO */
1267         clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1268         if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1269             clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1270         mci_writel(host, CLKENA, clk_en_a);
1271 
1272         /* inform CIU */
1273         mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1274 
1275         /* keep the last clock value that was requested from core */
1276         slot->__clk_old = clock;
1277         slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1278                       host->bus_hz;
1279     }
1280 
1281     host->current_speed = clock;
1282 
1283     /* Set the current slot bus width */
1284     mci_writel(host, CTYPE, (slot->ctype << slot->id));
1285 }
1286 
1287 static void dw_mci_set_data_timeout(struct dw_mci *host,
1288                     unsigned int timeout_ns)
1289 {
1290     const struct dw_mci_drv_data *drv_data = host->drv_data;
1291     u32 clk_div, tmout;
1292     u64 tmp;
1293 
1294     if (drv_data && drv_data->set_data_timeout)
1295         return drv_data->set_data_timeout(host, timeout_ns);
1296 
1297     clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2;
1298     if (clk_div == 0)
1299         clk_div = 1;
1300 
1301     tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC);
1302     tmp = DIV_ROUND_UP_ULL(tmp, clk_div);
1303 
1304     /* TMOUT[7:0] (RESPONSE_TIMEOUT) */
1305     tmout = 0xFF; /* Set maximum */
1306 
1307     /* TMOUT[31:8] (DATA_TIMEOUT) */
1308     if (!tmp || tmp > 0xFFFFFF)
1309         tmout |= (0xFFFFFF << 8);
1310     else
1311         tmout |= (tmp & 0xFFFFFF) << 8;
1312 
1313     mci_writel(host, TMOUT, tmout);
1314     dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x",
1315         timeout_ns, tmout >> 8);
1316 }
1317 
1318 static void __dw_mci_start_request(struct dw_mci *host,
1319                    struct dw_mci_slot *slot,
1320                    struct mmc_command *cmd)
1321 {
1322     struct mmc_request *mrq;
1323     struct mmc_data *data;
1324     u32 cmdflags;
1325 
1326     mrq = slot->mrq;
1327 
1328     host->mrq = mrq;
1329 
1330     host->pending_events = 0;
1331     host->completed_events = 0;
1332     host->cmd_status = 0;
1333     host->data_status = 0;
1334     host->dir_status = 0;
1335 
1336     data = cmd->data;
1337     if (data) {
1338         dw_mci_set_data_timeout(host, data->timeout_ns);
1339         mci_writel(host, BYTCNT, data->blksz*data->blocks);
1340         mci_writel(host, BLKSIZ, data->blksz);
1341     }
1342 
1343     cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1344 
1345     /* this is the first command, send the initialization clock */
1346     if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1347         cmdflags |= SDMMC_CMD_INIT;
1348 
1349     if (data) {
1350         dw_mci_submit_data(host, data);
1351         wmb(); /* drain writebuffer */
1352     }
1353 
1354     dw_mci_start_command(host, cmd, cmdflags);
1355 
1356     if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1357         unsigned long irqflags;
1358 
1359         /*
1360          * Databook says to fail after 2ms w/ no response, but evidence
1361          * shows that sometimes the cmd11 interrupt takes over 130ms.
1362          * We'll set to 500ms, plus an extra jiffy just in case jiffies
1363          * is just about to roll over.
1364          *
1365          * We do this whole thing under spinlock and only if the
1366          * command hasn't already completed (indicating the the irq
1367          * already ran so we don't want the timeout).
1368          */
1369         spin_lock_irqsave(&host->irq_lock, irqflags);
1370         if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1371             mod_timer(&host->cmd11_timer,
1372                 jiffies + msecs_to_jiffies(500) + 1);
1373         spin_unlock_irqrestore(&host->irq_lock, irqflags);
1374     }
1375 
1376     host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1377 }
1378 
1379 static void dw_mci_start_request(struct dw_mci *host,
1380                  struct dw_mci_slot *slot)
1381 {
1382     struct mmc_request *mrq = slot->mrq;
1383     struct mmc_command *cmd;
1384 
1385     cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1386     __dw_mci_start_request(host, slot, cmd);
1387 }
1388 
1389 /* must be called with host->lock held */
1390 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1391                  struct mmc_request *mrq)
1392 {
1393     dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1394          host->state);
1395 
1396     slot->mrq = mrq;
1397 
1398     if (host->state == STATE_WAITING_CMD11_DONE) {
1399         dev_warn(&slot->mmc->class_dev,
1400              "Voltage change didn't complete\n");
1401         /*
1402          * this case isn't expected to happen, so we can
1403          * either crash here or just try to continue on
1404          * in the closest possible state
1405          */
1406         host->state = STATE_IDLE;
1407     }
1408 
1409     if (host->state == STATE_IDLE) {
1410         host->state = STATE_SENDING_CMD;
1411         dw_mci_start_request(host, slot);
1412     } else {
1413         list_add_tail(&slot->queue_node, &host->queue);
1414     }
1415 }
1416 
1417 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1418 {
1419     struct dw_mci_slot *slot = mmc_priv(mmc);
1420     struct dw_mci *host = slot->host;
1421 
1422     WARN_ON(slot->mrq);
1423 
1424     /*
1425      * The check for card presence and queueing of the request must be
1426      * atomic, otherwise the card could be removed in between and the
1427      * request wouldn't fail until another card was inserted.
1428      */
1429 
1430     if (!dw_mci_get_cd(mmc)) {
1431         mrq->cmd->error = -ENOMEDIUM;
1432         mmc_request_done(mmc, mrq);
1433         return;
1434     }
1435 
1436     spin_lock_bh(&host->lock);
1437 
1438     dw_mci_queue_request(host, slot, mrq);
1439 
1440     spin_unlock_bh(&host->lock);
1441 }
1442 
1443 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1444 {
1445     struct dw_mci_slot *slot = mmc_priv(mmc);
1446     const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1447     u32 regs;
1448     int ret;
1449 
1450     switch (ios->bus_width) {
1451     case MMC_BUS_WIDTH_4:
1452         slot->ctype = SDMMC_CTYPE_4BIT;
1453         break;
1454     case MMC_BUS_WIDTH_8:
1455         slot->ctype = SDMMC_CTYPE_8BIT;
1456         break;
1457     default:
1458         /* set default 1 bit mode */
1459         slot->ctype = SDMMC_CTYPE_1BIT;
1460     }
1461 
1462     regs = mci_readl(slot->host, UHS_REG);
1463 
1464     /* DDR mode set */
1465     if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1466         ios->timing == MMC_TIMING_UHS_DDR50 ||
1467         ios->timing == MMC_TIMING_MMC_HS400)
1468         regs |= ((0x1 << slot->id) << 16);
1469     else
1470         regs &= ~((0x1 << slot->id) << 16);
1471 
1472     mci_writel(slot->host, UHS_REG, regs);
1473     slot->host->timing = ios->timing;
1474 
1475     /*
1476      * Use mirror of ios->clock to prevent race with mmc
1477      * core ios update when finding the minimum.
1478      */
1479     slot->clock = ios->clock;
1480 
1481     if (drv_data && drv_data->set_ios)
1482         drv_data->set_ios(slot->host, ios);
1483 
1484     switch (ios->power_mode) {
1485     case MMC_POWER_UP:
1486         if (!IS_ERR(mmc->supply.vmmc)) {
1487             ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1488                     ios->vdd);
1489             if (ret) {
1490                 dev_err(slot->host->dev,
1491                     "failed to enable vmmc regulator\n");
1492                 /*return, if failed turn on vmmc*/
1493                 return;
1494             }
1495         }
1496         set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1497         regs = mci_readl(slot->host, PWREN);
1498         regs |= (1 << slot->id);
1499         mci_writel(slot->host, PWREN, regs);
1500         break;
1501     case MMC_POWER_ON:
1502         if (!slot->host->vqmmc_enabled) {
1503             if (!IS_ERR(mmc->supply.vqmmc)) {
1504                 ret = regulator_enable(mmc->supply.vqmmc);
1505                 if (ret < 0)
1506                     dev_err(slot->host->dev,
1507                         "failed to enable vqmmc\n");
1508                 else
1509                     slot->host->vqmmc_enabled = true;
1510 
1511             } else {
1512                 /* Keep track so we don't reset again */
1513                 slot->host->vqmmc_enabled = true;
1514             }
1515 
1516             /* Reset our state machine after powering on */
1517             dw_mci_ctrl_reset(slot->host,
1518                       SDMMC_CTRL_ALL_RESET_FLAGS);
1519         }
1520 
1521         /* Adjust clock / bus width after power is up */
1522         dw_mci_setup_bus(slot, false);
1523 
1524         break;
1525     case MMC_POWER_OFF:
1526         /* Turn clock off before power goes down */
1527         dw_mci_setup_bus(slot, false);
1528 
1529         if (!IS_ERR(mmc->supply.vmmc))
1530             mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1531 
1532         if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1533             regulator_disable(mmc->supply.vqmmc);
1534         slot->host->vqmmc_enabled = false;
1535 
1536         regs = mci_readl(slot->host, PWREN);
1537         regs &= ~(1 << slot->id);
1538         mci_writel(slot->host, PWREN, regs);
1539         break;
1540     default:
1541         break;
1542     }
1543 
1544     if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1545         slot->host->state = STATE_IDLE;
1546 }
1547 
1548 static int dw_mci_card_busy(struct mmc_host *mmc)
1549 {
1550     struct dw_mci_slot *slot = mmc_priv(mmc);
1551     u32 status;
1552 
1553     /*
1554      * Check the busy bit which is low when DAT[3:0]
1555      * (the data lines) are 0000
1556      */
1557     status = mci_readl(slot->host, STATUS);
1558 
1559     return !!(status & SDMMC_STATUS_BUSY);
1560 }
1561 
1562 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1563 {
1564     struct dw_mci_slot *slot = mmc_priv(mmc);
1565     struct dw_mci *host = slot->host;
1566     const struct dw_mci_drv_data *drv_data = host->drv_data;
1567     u32 uhs;
1568     u32 v18 = SDMMC_UHS_18V << slot->id;
1569     int ret;
1570 
1571     if (drv_data && drv_data->switch_voltage)
1572         return drv_data->switch_voltage(mmc, ios);
1573 
1574     /*
1575      * Program the voltage.  Note that some instances of dw_mmc may use
1576      * the UHS_REG for this.  For other instances (like exynos) the UHS_REG
1577      * does no harm but you need to set the regulator directly.  Try both.
1578      */
1579     uhs = mci_readl(host, UHS_REG);
1580     if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1581         uhs &= ~v18;
1582     else
1583         uhs |= v18;
1584 
1585     if (!IS_ERR(mmc->supply.vqmmc)) {
1586         ret = mmc_regulator_set_vqmmc(mmc, ios);
1587         if (ret < 0) {
1588             dev_dbg(&mmc->class_dev,
1589                      "Regulator set error %d - %s V\n",
1590                      ret, uhs & v18 ? "1.8" : "3.3");
1591             return ret;
1592         }
1593     }
1594     mci_writel(host, UHS_REG, uhs);
1595 
1596     return 0;
1597 }
1598 
1599 static int dw_mci_get_ro(struct mmc_host *mmc)
1600 {
1601     int read_only;
1602     struct dw_mci_slot *slot = mmc_priv(mmc);
1603     int gpio_ro = mmc_gpio_get_ro(mmc);
1604 
1605     /* Use platform get_ro function, else try on board write protect */
1606     if (gpio_ro >= 0)
1607         read_only = gpio_ro;
1608     else
1609         read_only =
1610             mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1611 
1612     dev_dbg(&mmc->class_dev, "card is %s\n",
1613         read_only ? "read-only" : "read-write");
1614 
1615     return read_only;
1616 }
1617 
1618 static void dw_mci_hw_reset(struct mmc_host *mmc)
1619 {
1620     struct dw_mci_slot *slot = mmc_priv(mmc);
1621     struct dw_mci *host = slot->host;
1622     int reset;
1623 
1624     if (host->use_dma == TRANS_MODE_IDMAC)
1625         dw_mci_idmac_reset(host);
1626 
1627     if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1628                      SDMMC_CTRL_FIFO_RESET))
1629         return;
1630 
1631     /*
1632      * According to eMMC spec, card reset procedure:
1633      * tRstW >= 1us:   RST_n pulse width
1634      * tRSCA >= 200us: RST_n to Command time
1635      * tRSTH >= 1us:   RST_n high period
1636      */
1637     reset = mci_readl(host, RST_N);
1638     reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1639     mci_writel(host, RST_N, reset);
1640     usleep_range(1, 2);
1641     reset |= SDMMC_RST_HWACTIVE << slot->id;
1642     mci_writel(host, RST_N, reset);
1643     usleep_range(200, 300);
1644 }
1645 
1646 static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
1647 {
1648     struct dw_mci *host = slot->host;
1649     const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1650     u32 clk_en_a_old;
1651     u32 clk_en_a;
1652 
1653     /*
1654      * Low power mode will stop the card clock when idle.  According to the
1655      * description of the CLKENA register we should disable low power mode
1656      * for SDIO cards if we need SDIO interrupts to work.
1657      */
1658 
1659     clk_en_a_old = mci_readl(host, CLKENA);
1660     if (prepare) {
1661         set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1662         clk_en_a = clk_en_a_old & ~clken_low_pwr;
1663     } else {
1664         clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1665         clk_en_a = clk_en_a_old | clken_low_pwr;
1666     }
1667 
1668     if (clk_en_a != clk_en_a_old) {
1669         mci_writel(host, CLKENA, clk_en_a);
1670         mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
1671                  0);
1672     }
1673 }
1674 
1675 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1676 {
1677     struct dw_mci *host = slot->host;
1678     unsigned long irqflags;
1679     u32 int_mask;
1680 
1681     spin_lock_irqsave(&host->irq_lock, irqflags);
1682 
1683     /* Enable/disable Slot Specific SDIO interrupt */
1684     int_mask = mci_readl(host, INTMASK);
1685     if (enb)
1686         int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1687     else
1688         int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1689     mci_writel(host, INTMASK, int_mask);
1690 
1691     spin_unlock_irqrestore(&host->irq_lock, irqflags);
1692 }
1693 
1694 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1695 {
1696     struct dw_mci_slot *slot = mmc_priv(mmc);
1697     struct dw_mci *host = slot->host;
1698 
1699     dw_mci_prepare_sdio_irq(slot, enb);
1700     __dw_mci_enable_sdio_irq(slot, enb);
1701 
1702     /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1703     if (enb)
1704         pm_runtime_get_noresume(host->dev);
1705     else
1706         pm_runtime_put_noidle(host->dev);
1707 }
1708 
1709 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1710 {
1711     struct dw_mci_slot *slot = mmc_priv(mmc);
1712 
1713     __dw_mci_enable_sdio_irq(slot, 1);
1714 }
1715 
1716 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1717 {
1718     struct dw_mci_slot *slot = mmc_priv(mmc);
1719     struct dw_mci *host = slot->host;
1720     const struct dw_mci_drv_data *drv_data = host->drv_data;
1721     int err = -EINVAL;
1722 
1723     if (drv_data && drv_data->execute_tuning)
1724         err = drv_data->execute_tuning(slot, opcode);
1725     return err;
1726 }
1727 
1728 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1729                        struct mmc_ios *ios)
1730 {
1731     struct dw_mci_slot *slot = mmc_priv(mmc);
1732     struct dw_mci *host = slot->host;
1733     const struct dw_mci_drv_data *drv_data = host->drv_data;
1734 
1735     if (drv_data && drv_data->prepare_hs400_tuning)
1736         return drv_data->prepare_hs400_tuning(host, ios);
1737 
1738     return 0;
1739 }
1740 
1741 static bool dw_mci_reset(struct dw_mci *host)
1742 {
1743     u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1744     bool ret = false;
1745     u32 status = 0;
1746 
1747     /*
1748      * Resetting generates a block interrupt, hence setting
1749      * the scatter-gather pointer to NULL.
1750      */
1751     if (host->sg) {
1752         sg_miter_stop(&host->sg_miter);
1753         host->sg = NULL;
1754     }
1755 
1756     if (host->use_dma)
1757         flags |= SDMMC_CTRL_DMA_RESET;
1758 
1759     if (dw_mci_ctrl_reset(host, flags)) {
1760         /*
1761          * In all cases we clear the RAWINTS
1762          * register to clear any interrupts.
1763          */
1764         mci_writel(host, RINTSTS, 0xFFFFFFFF);
1765 
1766         if (!host->use_dma) {
1767             ret = true;
1768             goto ciu_out;
1769         }
1770 
1771         /* Wait for dma_req to be cleared */
1772         if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1773                           status,
1774                           !(status & SDMMC_STATUS_DMA_REQ),
1775                           1, 500 * USEC_PER_MSEC)) {
1776             dev_err(host->dev,
1777                 "%s: Timeout waiting for dma_req to be cleared\n",
1778                 __func__);
1779             goto ciu_out;
1780         }
1781 
1782         /* when using DMA next we reset the fifo again */
1783         if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1784             goto ciu_out;
1785     } else {
1786         /* if the controller reset bit did clear, then set clock regs */
1787         if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1788             dev_err(host->dev,
1789                 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1790                 __func__);
1791             goto ciu_out;
1792         }
1793     }
1794 
1795     if (host->use_dma == TRANS_MODE_IDMAC)
1796         /* It is also required that we reinit idmac */
1797         dw_mci_idmac_init(host);
1798 
1799     ret = true;
1800 
1801 ciu_out:
1802     /* After a CTRL reset we need to have CIU set clock registers  */
1803     mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1804 
1805     return ret;
1806 }
1807 
1808 static const struct mmc_host_ops dw_mci_ops = {
1809     .request        = dw_mci_request,
1810     .pre_req        = dw_mci_pre_req,
1811     .post_req       = dw_mci_post_req,
1812     .set_ios        = dw_mci_set_ios,
1813     .get_ro         = dw_mci_get_ro,
1814     .get_cd         = dw_mci_get_cd,
1815     .card_hw_reset          = dw_mci_hw_reset,
1816     .enable_sdio_irq    = dw_mci_enable_sdio_irq,
1817     .ack_sdio_irq       = dw_mci_ack_sdio_irq,
1818     .execute_tuning     = dw_mci_execute_tuning,
1819     .card_busy      = dw_mci_card_busy,
1820     .start_signal_voltage_switch = dw_mci_switch_voltage,
1821     .prepare_hs400_tuning   = dw_mci_prepare_hs400_tuning,
1822 };
1823 
1824 #ifdef CONFIG_FAULT_INJECTION
1825 static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
1826 {
1827     struct dw_mci *host = container_of(t, struct dw_mci, fault_timer);
1828     unsigned long flags;
1829 
1830     spin_lock_irqsave(&host->irq_lock, flags);
1831 
1832     /*
1833      * Only inject an error if we haven't already got an error or data over
1834      * interrupt.
1835      */
1836     if (!host->data_status) {
1837         host->data_status = SDMMC_INT_DCRC;
1838         set_bit(EVENT_DATA_ERROR, &host->pending_events);
1839         tasklet_schedule(&host->tasklet);
1840     }
1841 
1842     spin_unlock_irqrestore(&host->irq_lock, flags);
1843 
1844     return HRTIMER_NORESTART;
1845 }
1846 
1847 static void dw_mci_start_fault_timer(struct dw_mci *host)
1848 {
1849     struct mmc_data *data = host->data;
1850 
1851     if (!data || data->blocks <= 1)
1852         return;
1853 
1854     if (!should_fail(&host->fail_data_crc, 1))
1855         return;
1856 
1857     /*
1858      * Try to inject the error at random points during the data transfer.
1859      */
1860     hrtimer_start(&host->fault_timer,
1861               ms_to_ktime(prandom_u32() % 25),
1862               HRTIMER_MODE_REL);
1863 }
1864 
1865 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1866 {
1867     hrtimer_cancel(&host->fault_timer);
1868 }
1869 
1870 static void dw_mci_init_fault(struct dw_mci *host)
1871 {
1872     host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER;
1873 
1874     hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1875     host->fault_timer.function = dw_mci_fault_timer;
1876 }
1877 #else
1878 static void dw_mci_init_fault(struct dw_mci *host)
1879 {
1880 }
1881 
1882 static void dw_mci_start_fault_timer(struct dw_mci *host)
1883 {
1884 }
1885 
1886 static void dw_mci_stop_fault_timer(struct dw_mci *host)
1887 {
1888 }
1889 #endif
1890 
1891 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1892     __releases(&host->lock)
1893     __acquires(&host->lock)
1894 {
1895     struct dw_mci_slot *slot;
1896     struct mmc_host *prev_mmc = host->slot->mmc;
1897 
1898     WARN_ON(host->cmd || host->data);
1899 
1900     host->slot->mrq = NULL;
1901     host->mrq = NULL;
1902     if (!list_empty(&host->queue)) {
1903         slot = list_entry(host->queue.next,
1904                   struct dw_mci_slot, queue_node);
1905         list_del(&slot->queue_node);
1906         dev_vdbg(host->dev, "list not empty: %s is next\n",
1907              mmc_hostname(slot->mmc));
1908         host->state = STATE_SENDING_CMD;
1909         dw_mci_start_request(host, slot);
1910     } else {
1911         dev_vdbg(host->dev, "list empty\n");
1912 
1913         if (host->state == STATE_SENDING_CMD11)
1914             host->state = STATE_WAITING_CMD11_DONE;
1915         else
1916             host->state = STATE_IDLE;
1917     }
1918 
1919     spin_unlock(&host->lock);
1920     mmc_request_done(prev_mmc, mrq);
1921     spin_lock(&host->lock);
1922 }
1923 
1924 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1925 {
1926     u32 status = host->cmd_status;
1927 
1928     host->cmd_status = 0;
1929 
1930     /* Read the response from the card (up to 16 bytes) */
1931     if (cmd->flags & MMC_RSP_PRESENT) {
1932         if (cmd->flags & MMC_RSP_136) {
1933             cmd->resp[3] = mci_readl(host, RESP0);
1934             cmd->resp[2] = mci_readl(host, RESP1);
1935             cmd->resp[1] = mci_readl(host, RESP2);
1936             cmd->resp[0] = mci_readl(host, RESP3);
1937         } else {
1938             cmd->resp[0] = mci_readl(host, RESP0);
1939             cmd->resp[1] = 0;
1940             cmd->resp[2] = 0;
1941             cmd->resp[3] = 0;
1942         }
1943     }
1944 
1945     if (status & SDMMC_INT_RTO)
1946         cmd->error = -ETIMEDOUT;
1947     else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1948         cmd->error = -EILSEQ;
1949     else if (status & SDMMC_INT_RESP_ERR)
1950         cmd->error = -EIO;
1951     else
1952         cmd->error = 0;
1953 
1954     return cmd->error;
1955 }
1956 
1957 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1958 {
1959     u32 status = host->data_status;
1960 
1961     if (status & DW_MCI_DATA_ERROR_FLAGS) {
1962         if (status & SDMMC_INT_DRTO) {
1963             data->error = -ETIMEDOUT;
1964         } else if (status & SDMMC_INT_DCRC) {
1965             data->error = -EILSEQ;
1966         } else if (status & SDMMC_INT_EBE) {
1967             if (host->dir_status ==
1968                 DW_MCI_SEND_STATUS) {
1969                 /*
1970                  * No data CRC status was returned.
1971                  * The number of bytes transferred
1972                  * will be exaggerated in PIO mode.
1973                  */
1974                 data->bytes_xfered = 0;
1975                 data->error = -ETIMEDOUT;
1976             } else if (host->dir_status ==
1977                     DW_MCI_RECV_STATUS) {
1978                 data->error = -EILSEQ;
1979             }
1980         } else {
1981             /* SDMMC_INT_SBE is included */
1982             data->error = -EILSEQ;
1983         }
1984 
1985         dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1986 
1987         /*
1988          * After an error, there may be data lingering
1989          * in the FIFO
1990          */
1991         dw_mci_reset(host);
1992     } else {
1993         data->bytes_xfered = data->blocks * data->blksz;
1994         data->error = 0;
1995     }
1996 
1997     return data->error;
1998 }
1999 
2000 static void dw_mci_set_drto(struct dw_mci *host)
2001 {
2002     const struct dw_mci_drv_data *drv_data = host->drv_data;
2003     unsigned int drto_clks;
2004     unsigned int drto_div;
2005     unsigned int drto_ms;
2006     unsigned long irqflags;
2007 
2008     if (drv_data && drv_data->get_drto_clks)
2009         drto_clks = drv_data->get_drto_clks(host);
2010     else
2011         drto_clks = mci_readl(host, TMOUT) >> 8;
2012     drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2013     if (drto_div == 0)
2014         drto_div = 1;
2015 
2016     drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
2017                    host->bus_hz);
2018 
2019     dev_dbg(host->dev, "drto_ms: %u\n", drto_ms);
2020 
2021     /* add a bit spare time */
2022     drto_ms += 10;
2023 
2024     spin_lock_irqsave(&host->irq_lock, irqflags);
2025     if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2026         mod_timer(&host->dto_timer,
2027               jiffies + msecs_to_jiffies(drto_ms));
2028     spin_unlock_irqrestore(&host->irq_lock, irqflags);
2029 }
2030 
2031 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2032 {
2033     if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2034         return false;
2035 
2036     /*
2037      * Really be certain that the timer has stopped.  This is a bit of
2038      * paranoia and could only really happen if we had really bad
2039      * interrupt latency and the interrupt routine and timeout were
2040      * running concurrently so that the del_timer() in the interrupt
2041      * handler couldn't run.
2042      */
2043     WARN_ON(del_timer_sync(&host->cto_timer));
2044     clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2045 
2046     return true;
2047 }
2048 
2049 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2050 {
2051     if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2052         return false;
2053 
2054     /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2055     WARN_ON(del_timer_sync(&host->dto_timer));
2056     clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2057 
2058     return true;
2059 }
2060 
2061 static void dw_mci_tasklet_func(struct tasklet_struct *t)
2062 {
2063     struct dw_mci *host = from_tasklet(host, t, tasklet);
2064     struct mmc_data *data;
2065     struct mmc_command *cmd;
2066     struct mmc_request *mrq;
2067     enum dw_mci_state state;
2068     enum dw_mci_state prev_state;
2069     unsigned int err;
2070 
2071     spin_lock(&host->lock);
2072 
2073     state = host->state;
2074     data = host->data;
2075     mrq = host->mrq;
2076 
2077     do {
2078         prev_state = state;
2079 
2080         switch (state) {
2081         case STATE_IDLE:
2082         case STATE_WAITING_CMD11_DONE:
2083             break;
2084 
2085         case STATE_SENDING_CMD11:
2086         case STATE_SENDING_CMD:
2087             if (!dw_mci_clear_pending_cmd_complete(host))
2088                 break;
2089 
2090             cmd = host->cmd;
2091             host->cmd = NULL;
2092             set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2093             err = dw_mci_command_complete(host, cmd);
2094             if (cmd == mrq->sbc && !err) {
2095                 __dw_mci_start_request(host, host->slot,
2096                                mrq->cmd);
2097                 goto unlock;
2098             }
2099 
2100             if (cmd->data && err) {
2101                 /*
2102                  * During UHS tuning sequence, sending the stop
2103                  * command after the response CRC error would
2104                  * throw the system into a confused state
2105                  * causing all future tuning phases to report
2106                  * failure.
2107                  *
2108                  * In such case controller will move into a data
2109                  * transfer state after a response error or
2110                  * response CRC error. Let's let that finish
2111                  * before trying to send a stop, so we'll go to
2112                  * STATE_SENDING_DATA.
2113                  *
2114                  * Although letting the data transfer take place
2115                  * will waste a bit of time (we already know
2116                  * the command was bad), it can't cause any
2117                  * errors since it's possible it would have
2118                  * taken place anyway if this tasklet got
2119                  * delayed. Allowing the transfer to take place
2120                  * avoids races and keeps things simple.
2121                  */
2122                 if (err != -ETIMEDOUT &&
2123                     host->dir_status == DW_MCI_RECV_STATUS) {
2124                     state = STATE_SENDING_DATA;
2125                     continue;
2126                 }
2127 
2128                 send_stop_abort(host, data);
2129                 dw_mci_stop_dma(host);
2130                 state = STATE_SENDING_STOP;
2131                 break;
2132             }
2133 
2134             if (!cmd->data || err) {
2135                 dw_mci_request_end(host, mrq);
2136                 goto unlock;
2137             }
2138 
2139             prev_state = state = STATE_SENDING_DATA;
2140             fallthrough;
2141 
2142         case STATE_SENDING_DATA:
2143             /*
2144              * We could get a data error and never a transfer
2145              * complete so we'd better check for it here.
2146              *
2147              * Note that we don't really care if we also got a
2148              * transfer complete; stopping the DMA and sending an
2149              * abort won't hurt.
2150              */
2151             if (test_and_clear_bit(EVENT_DATA_ERROR,
2152                            &host->pending_events)) {
2153                 if (!(host->data_status & (SDMMC_INT_DRTO |
2154                                SDMMC_INT_EBE)))
2155                     send_stop_abort(host, data);
2156                 dw_mci_stop_dma(host);
2157                 state = STATE_DATA_ERROR;
2158                 break;
2159             }
2160 
2161             if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2162                         &host->pending_events)) {
2163                 /*
2164                  * If all data-related interrupts don't come
2165                  * within the given time in reading data state.
2166                  */
2167                 if (host->dir_status == DW_MCI_RECV_STATUS)
2168                     dw_mci_set_drto(host);
2169                 break;
2170             }
2171 
2172             set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2173 
2174             /*
2175              * Handle an EVENT_DATA_ERROR that might have shown up
2176              * before the transfer completed.  This might not have
2177              * been caught by the check above because the interrupt
2178              * could have gone off between the previous check and
2179              * the check for transfer complete.
2180              *
2181              * Technically this ought not be needed assuming we
2182              * get a DATA_COMPLETE eventually (we'll notice the
2183              * error and end the request), but it shouldn't hurt.
2184              *
2185              * This has the advantage of sending the stop command.
2186              */
2187             if (test_and_clear_bit(EVENT_DATA_ERROR,
2188                            &host->pending_events)) {
2189                 if (!(host->data_status & (SDMMC_INT_DRTO |
2190                                SDMMC_INT_EBE)))
2191                     send_stop_abort(host, data);
2192                 dw_mci_stop_dma(host);
2193                 state = STATE_DATA_ERROR;
2194                 break;
2195             }
2196             prev_state = state = STATE_DATA_BUSY;
2197 
2198             fallthrough;
2199 
2200         case STATE_DATA_BUSY:
2201             if (!dw_mci_clear_pending_data_complete(host)) {
2202                 /*
2203                  * If data error interrupt comes but data over
2204                  * interrupt doesn't come within the given time.
2205                  * in reading data state.
2206                  */
2207                 if (host->dir_status == DW_MCI_RECV_STATUS)
2208                     dw_mci_set_drto(host);
2209                 break;
2210             }
2211 
2212             dw_mci_stop_fault_timer(host);
2213             host->data = NULL;
2214             set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2215             err = dw_mci_data_complete(host, data);
2216 
2217             if (!err) {
2218                 if (!data->stop || mrq->sbc) {
2219                     if (mrq->sbc && data->stop)
2220                         data->stop->error = 0;
2221                     dw_mci_request_end(host, mrq);
2222                     goto unlock;
2223                 }
2224 
2225                 /* stop command for open-ended transfer*/
2226                 if (data->stop)
2227                     send_stop_abort(host, data);
2228             } else {
2229                 /*
2230                  * If we don't have a command complete now we'll
2231                  * never get one since we just reset everything;
2232                  * better end the request.
2233                  *
2234                  * If we do have a command complete we'll fall
2235                  * through to the SENDING_STOP command and
2236                  * everything will be peachy keen.
2237                  */
2238                 if (!test_bit(EVENT_CMD_COMPLETE,
2239                           &host->pending_events)) {
2240                     host->cmd = NULL;
2241                     dw_mci_request_end(host, mrq);
2242                     goto unlock;
2243                 }
2244             }
2245 
2246             /*
2247              * If err has non-zero,
2248              * stop-abort command has been already issued.
2249              */
2250             prev_state = state = STATE_SENDING_STOP;
2251 
2252             fallthrough;
2253 
2254         case STATE_SENDING_STOP:
2255             if (!dw_mci_clear_pending_cmd_complete(host))
2256                 break;
2257 
2258             /* CMD error in data command */
2259             if (mrq->cmd->error && mrq->data)
2260                 dw_mci_reset(host);
2261 
2262             dw_mci_stop_fault_timer(host);
2263             host->cmd = NULL;
2264             host->data = NULL;
2265 
2266             if (!mrq->sbc && mrq->stop)
2267                 dw_mci_command_complete(host, mrq->stop);
2268             else
2269                 host->cmd_status = 0;
2270 
2271             dw_mci_request_end(host, mrq);
2272             goto unlock;
2273 
2274         case STATE_DATA_ERROR:
2275             if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2276                         &host->pending_events))
2277                 break;
2278 
2279             state = STATE_DATA_BUSY;
2280             break;
2281         }
2282     } while (state != prev_state);
2283 
2284     host->state = state;
2285 unlock:
2286     spin_unlock(&host->lock);
2287 
2288 }
2289 
2290 /* push final bytes to part_buf, only use during push */
2291 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2292 {
2293     memcpy((void *)&host->part_buf, buf, cnt);
2294     host->part_buf_count = cnt;
2295 }
2296 
2297 /* append bytes to part_buf, only use during push */
2298 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2299 {
2300     cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2301     memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2302     host->part_buf_count += cnt;
2303     return cnt;
2304 }
2305 
2306 /* pull first bytes from part_buf, only use during pull */
2307 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2308 {
2309     cnt = min_t(int, cnt, host->part_buf_count);
2310     if (cnt) {
2311         memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2312                cnt);
2313         host->part_buf_count -= cnt;
2314         host->part_buf_start += cnt;
2315     }
2316     return cnt;
2317 }
2318 
2319 /* pull final bytes from the part_buf, assuming it's just been filled */
2320 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2321 {
2322     memcpy(buf, &host->part_buf, cnt);
2323     host->part_buf_start = cnt;
2324     host->part_buf_count = (1 << host->data_shift) - cnt;
2325 }
2326 
2327 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2328 {
2329     struct mmc_data *data = host->data;
2330     int init_cnt = cnt;
2331 
2332     /* try and push anything in the part_buf */
2333     if (unlikely(host->part_buf_count)) {
2334         int len = dw_mci_push_part_bytes(host, buf, cnt);
2335 
2336         buf += len;
2337         cnt -= len;
2338         if (host->part_buf_count == 2) {
2339             mci_fifo_writew(host->fifo_reg, host->part_buf16);
2340             host->part_buf_count = 0;
2341         }
2342     }
2343 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2344     if (unlikely((unsigned long)buf & 0x1)) {
2345         while (cnt >= 2) {
2346             u16 aligned_buf[64];
2347             int len = min(cnt & -2, (int)sizeof(aligned_buf));
2348             int items = len >> 1;
2349             int i;
2350             /* memcpy from input buffer into aligned buffer */
2351             memcpy(aligned_buf, buf, len);
2352             buf += len;
2353             cnt -= len;
2354             /* push data from aligned buffer into fifo */
2355             for (i = 0; i < items; ++i)
2356                 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2357         }
2358     } else
2359 #endif
2360     {
2361         u16 *pdata = buf;
2362 
2363         for (; cnt >= 2; cnt -= 2)
2364             mci_fifo_writew(host->fifo_reg, *pdata++);
2365         buf = pdata;
2366     }
2367     /* put anything remaining in the part_buf */
2368     if (cnt) {
2369         dw_mci_set_part_bytes(host, buf, cnt);
2370          /* Push data if we have reached the expected data length */
2371         if ((data->bytes_xfered + init_cnt) ==
2372             (data->blksz * data->blocks))
2373             mci_fifo_writew(host->fifo_reg, host->part_buf16);
2374     }
2375 }
2376 
2377 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2378 {
2379 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2380     if (unlikely((unsigned long)buf & 0x1)) {
2381         while (cnt >= 2) {
2382             /* pull data from fifo into aligned buffer */
2383             u16 aligned_buf[64];
2384             int len = min(cnt & -2, (int)sizeof(aligned_buf));
2385             int items = len >> 1;
2386             int i;
2387 
2388             for (i = 0; i < items; ++i)
2389                 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2390             /* memcpy from aligned buffer into output buffer */
2391             memcpy(buf, aligned_buf, len);
2392             buf += len;
2393             cnt -= len;
2394         }
2395     } else
2396 #endif
2397     {
2398         u16 *pdata = buf;
2399 
2400         for (; cnt >= 2; cnt -= 2)
2401             *pdata++ = mci_fifo_readw(host->fifo_reg);
2402         buf = pdata;
2403     }
2404     if (cnt) {
2405         host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2406         dw_mci_pull_final_bytes(host, buf, cnt);
2407     }
2408 }
2409 
2410 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2411 {
2412     struct mmc_data *data = host->data;
2413     int init_cnt = cnt;
2414 
2415     /* try and push anything in the part_buf */
2416     if (unlikely(host->part_buf_count)) {
2417         int len = dw_mci_push_part_bytes(host, buf, cnt);
2418 
2419         buf += len;
2420         cnt -= len;
2421         if (host->part_buf_count == 4) {
2422             mci_fifo_writel(host->fifo_reg, host->part_buf32);
2423             host->part_buf_count = 0;
2424         }
2425     }
2426 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2427     if (unlikely((unsigned long)buf & 0x3)) {
2428         while (cnt >= 4) {
2429             u32 aligned_buf[32];
2430             int len = min(cnt & -4, (int)sizeof(aligned_buf));
2431             int items = len >> 2;
2432             int i;
2433             /* memcpy from input buffer into aligned buffer */
2434             memcpy(aligned_buf, buf, len);
2435             buf += len;
2436             cnt -= len;
2437             /* push data from aligned buffer into fifo */
2438             for (i = 0; i < items; ++i)
2439                 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2440         }
2441     } else
2442 #endif
2443     {
2444         u32 *pdata = buf;
2445 
2446         for (; cnt >= 4; cnt -= 4)
2447             mci_fifo_writel(host->fifo_reg, *pdata++);
2448         buf = pdata;
2449     }
2450     /* put anything remaining in the part_buf */
2451     if (cnt) {
2452         dw_mci_set_part_bytes(host, buf, cnt);
2453          /* Push data if we have reached the expected data length */
2454         if ((data->bytes_xfered + init_cnt) ==
2455             (data->blksz * data->blocks))
2456             mci_fifo_writel(host->fifo_reg, host->part_buf32);
2457     }
2458 }
2459 
2460 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2461 {
2462 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2463     if (unlikely((unsigned long)buf & 0x3)) {
2464         while (cnt >= 4) {
2465             /* pull data from fifo into aligned buffer */
2466             u32 aligned_buf[32];
2467             int len = min(cnt & -4, (int)sizeof(aligned_buf));
2468             int items = len >> 2;
2469             int i;
2470 
2471             for (i = 0; i < items; ++i)
2472                 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2473             /* memcpy from aligned buffer into output buffer */
2474             memcpy(buf, aligned_buf, len);
2475             buf += len;
2476             cnt -= len;
2477         }
2478     } else
2479 #endif
2480     {
2481         u32 *pdata = buf;
2482 
2483         for (; cnt >= 4; cnt -= 4)
2484             *pdata++ = mci_fifo_readl(host->fifo_reg);
2485         buf = pdata;
2486     }
2487     if (cnt) {
2488         host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2489         dw_mci_pull_final_bytes(host, buf, cnt);
2490     }
2491 }
2492 
2493 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2494 {
2495     struct mmc_data *data = host->data;
2496     int init_cnt = cnt;
2497 
2498     /* try and push anything in the part_buf */
2499     if (unlikely(host->part_buf_count)) {
2500         int len = dw_mci_push_part_bytes(host, buf, cnt);
2501 
2502         buf += len;
2503         cnt -= len;
2504 
2505         if (host->part_buf_count == 8) {
2506             mci_fifo_writeq(host->fifo_reg, host->part_buf);
2507             host->part_buf_count = 0;
2508         }
2509     }
2510 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2511     if (unlikely((unsigned long)buf & 0x7)) {
2512         while (cnt >= 8) {
2513             u64 aligned_buf[16];
2514             int len = min(cnt & -8, (int)sizeof(aligned_buf));
2515             int items = len >> 3;
2516             int i;
2517             /* memcpy from input buffer into aligned buffer */
2518             memcpy(aligned_buf, buf, len);
2519             buf += len;
2520             cnt -= len;
2521             /* push data from aligned buffer into fifo */
2522             for (i = 0; i < items; ++i)
2523                 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2524         }
2525     } else
2526 #endif
2527     {
2528         u64 *pdata = buf;
2529 
2530         for (; cnt >= 8; cnt -= 8)
2531             mci_fifo_writeq(host->fifo_reg, *pdata++);
2532         buf = pdata;
2533     }
2534     /* put anything remaining in the part_buf */
2535     if (cnt) {
2536         dw_mci_set_part_bytes(host, buf, cnt);
2537         /* Push data if we have reached the expected data length */
2538         if ((data->bytes_xfered + init_cnt) ==
2539             (data->blksz * data->blocks))
2540             mci_fifo_writeq(host->fifo_reg, host->part_buf);
2541     }
2542 }
2543 
2544 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2545 {
2546 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2547     if (unlikely((unsigned long)buf & 0x7)) {
2548         while (cnt >= 8) {
2549             /* pull data from fifo into aligned buffer */
2550             u64 aligned_buf[16];
2551             int len = min(cnt & -8, (int)sizeof(aligned_buf));
2552             int items = len >> 3;
2553             int i;
2554 
2555             for (i = 0; i < items; ++i)
2556                 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2557 
2558             /* memcpy from aligned buffer into output buffer */
2559             memcpy(buf, aligned_buf, len);
2560             buf += len;
2561             cnt -= len;
2562         }
2563     } else
2564 #endif
2565     {
2566         u64 *pdata = buf;
2567 
2568         for (; cnt >= 8; cnt -= 8)
2569             *pdata++ = mci_fifo_readq(host->fifo_reg);
2570         buf = pdata;
2571     }
2572     if (cnt) {
2573         host->part_buf = mci_fifo_readq(host->fifo_reg);
2574         dw_mci_pull_final_bytes(host, buf, cnt);
2575     }
2576 }
2577 
2578 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2579 {
2580     int len;
2581 
2582     /* get remaining partial bytes */
2583     len = dw_mci_pull_part_bytes(host, buf, cnt);
2584     if (unlikely(len == cnt))
2585         return;
2586     buf += len;
2587     cnt -= len;
2588 
2589     /* get the rest of the data */
2590     host->pull_data(host, buf, cnt);
2591 }
2592 
2593 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2594 {
2595     struct sg_mapping_iter *sg_miter = &host->sg_miter;
2596     void *buf;
2597     unsigned int offset;
2598     struct mmc_data *data = host->data;
2599     int shift = host->data_shift;
2600     u32 status;
2601     unsigned int len;
2602     unsigned int remain, fcnt;
2603 
2604     do {
2605         if (!sg_miter_next(sg_miter))
2606             goto done;
2607 
2608         host->sg = sg_miter->piter.sg;
2609         buf = sg_miter->addr;
2610         remain = sg_miter->length;
2611         offset = 0;
2612 
2613         do {
2614             fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2615                     << shift) + host->part_buf_count;
2616             len = min(remain, fcnt);
2617             if (!len)
2618                 break;
2619             dw_mci_pull_data(host, (void *)(buf + offset), len);
2620             data->bytes_xfered += len;
2621             offset += len;
2622             remain -= len;
2623         } while (remain);
2624 
2625         sg_miter->consumed = offset;
2626         status = mci_readl(host, MINTSTS);
2627         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2628     /* if the RXDR is ready read again */
2629     } while ((status & SDMMC_INT_RXDR) ||
2630          (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2631 
2632     if (!remain) {
2633         if (!sg_miter_next(sg_miter))
2634             goto done;
2635         sg_miter->consumed = 0;
2636     }
2637     sg_miter_stop(sg_miter);
2638     return;
2639 
2640 done:
2641     sg_miter_stop(sg_miter);
2642     host->sg = NULL;
2643     smp_wmb(); /* drain writebuffer */
2644     set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2645 }
2646 
2647 static void dw_mci_write_data_pio(struct dw_mci *host)
2648 {
2649     struct sg_mapping_iter *sg_miter = &host->sg_miter;
2650     void *buf;
2651     unsigned int offset;
2652     struct mmc_data *data = host->data;
2653     int shift = host->data_shift;
2654     u32 status;
2655     unsigned int len;
2656     unsigned int fifo_depth = host->fifo_depth;
2657     unsigned int remain, fcnt;
2658 
2659     do {
2660         if (!sg_miter_next(sg_miter))
2661             goto done;
2662 
2663         host->sg = sg_miter->piter.sg;
2664         buf = sg_miter->addr;
2665         remain = sg_miter->length;
2666         offset = 0;
2667 
2668         do {
2669             fcnt = ((fifo_depth -
2670                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2671                     << shift) - host->part_buf_count;
2672             len = min(remain, fcnt);
2673             if (!len)
2674                 break;
2675             host->push_data(host, (void *)(buf + offset), len);
2676             data->bytes_xfered += len;
2677             offset += len;
2678             remain -= len;
2679         } while (remain);
2680 
2681         sg_miter->consumed = offset;
2682         status = mci_readl(host, MINTSTS);
2683         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2684     } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2685 
2686     if (!remain) {
2687         if (!sg_miter_next(sg_miter))
2688             goto done;
2689         sg_miter->consumed = 0;
2690     }
2691     sg_miter_stop(sg_miter);
2692     return;
2693 
2694 done:
2695     sg_miter_stop(sg_miter);
2696     host->sg = NULL;
2697     smp_wmb(); /* drain writebuffer */
2698     set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2699 }
2700 
2701 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2702 {
2703     del_timer(&host->cto_timer);
2704 
2705     if (!host->cmd_status)
2706         host->cmd_status = status;
2707 
2708     smp_wmb(); /* drain writebuffer */
2709 
2710     set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2711     tasklet_schedule(&host->tasklet);
2712 
2713     dw_mci_start_fault_timer(host);
2714 }
2715 
2716 static void dw_mci_handle_cd(struct dw_mci *host)
2717 {
2718     struct dw_mci_slot *slot = host->slot;
2719 
2720     mmc_detect_change(slot->mmc,
2721         msecs_to_jiffies(host->pdata->detect_delay_ms));
2722 }
2723 
2724 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2725 {
2726     struct dw_mci *host = dev_id;
2727     u32 pending;
2728     struct dw_mci_slot *slot = host->slot;
2729 
2730     pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2731 
2732     if (pending) {
2733         /* Check volt switch first, since it can look like an error */
2734         if ((host->state == STATE_SENDING_CMD11) &&
2735             (pending & SDMMC_INT_VOLT_SWITCH)) {
2736             mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2737             pending &= ~SDMMC_INT_VOLT_SWITCH;
2738 
2739             /*
2740              * Hold the lock; we know cmd11_timer can't be kicked
2741              * off after the lock is released, so safe to delete.
2742              */
2743             spin_lock(&host->irq_lock);
2744             dw_mci_cmd_interrupt(host, pending);
2745             spin_unlock(&host->irq_lock);
2746 
2747             del_timer(&host->cmd11_timer);
2748         }
2749 
2750         if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2751             spin_lock(&host->irq_lock);
2752 
2753             del_timer(&host->cto_timer);
2754             mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2755             host->cmd_status = pending;
2756             smp_wmb(); /* drain writebuffer */
2757             set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2758 
2759             spin_unlock(&host->irq_lock);
2760         }
2761 
2762         if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2763             spin_lock(&host->irq_lock);
2764 
2765             if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2766                 del_timer(&host->dto_timer);
2767 
2768             /* if there is an error report DATA_ERROR */
2769             mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2770             host->data_status = pending;
2771             smp_wmb(); /* drain writebuffer */
2772             set_bit(EVENT_DATA_ERROR, &host->pending_events);
2773 
2774             if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT)
2775                 /* In case of error, we cannot expect a DTO */
2776                 set_bit(EVENT_DATA_COMPLETE,
2777                     &host->pending_events);
2778 
2779             tasklet_schedule(&host->tasklet);
2780 
2781             spin_unlock(&host->irq_lock);
2782         }
2783 
2784         if (pending & SDMMC_INT_DATA_OVER) {
2785             spin_lock(&host->irq_lock);
2786 
2787             del_timer(&host->dto_timer);
2788 
2789             mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2790             if (!host->data_status)
2791                 host->data_status = pending;
2792             smp_wmb(); /* drain writebuffer */
2793             if (host->dir_status == DW_MCI_RECV_STATUS) {
2794                 if (host->sg != NULL)
2795                     dw_mci_read_data_pio(host, true);
2796             }
2797             set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2798             tasklet_schedule(&host->tasklet);
2799 
2800             spin_unlock(&host->irq_lock);
2801         }
2802 
2803         if (pending & SDMMC_INT_RXDR) {
2804             mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2805             if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2806                 dw_mci_read_data_pio(host, false);
2807         }
2808 
2809         if (pending & SDMMC_INT_TXDR) {
2810             mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2811             if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2812                 dw_mci_write_data_pio(host);
2813         }
2814 
2815         if (pending & SDMMC_INT_CMD_DONE) {
2816             spin_lock(&host->irq_lock);
2817 
2818             mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2819             dw_mci_cmd_interrupt(host, pending);
2820 
2821             spin_unlock(&host->irq_lock);
2822         }
2823 
2824         if (pending & SDMMC_INT_CD) {
2825             mci_writel(host, RINTSTS, SDMMC_INT_CD);
2826             dw_mci_handle_cd(host);
2827         }
2828 
2829         if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2830             mci_writel(host, RINTSTS,
2831                    SDMMC_INT_SDIO(slot->sdio_id));
2832             __dw_mci_enable_sdio_irq(slot, 0);
2833             sdio_signal_irq(slot->mmc);
2834         }
2835 
2836     }
2837 
2838     if (host->use_dma != TRANS_MODE_IDMAC)
2839         return IRQ_HANDLED;
2840 
2841     /* Handle IDMA interrupts */
2842     if (host->dma_64bit_address == 1) {
2843         pending = mci_readl(host, IDSTS64);
2844         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2845             mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2846                             SDMMC_IDMAC_INT_RI);
2847             mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2848             if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2849                 host->dma_ops->complete((void *)host);
2850         }
2851     } else {
2852         pending = mci_readl(host, IDSTS);
2853         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2854             mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2855                             SDMMC_IDMAC_INT_RI);
2856             mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2857             if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2858                 host->dma_ops->complete((void *)host);
2859         }
2860     }
2861 
2862     return IRQ_HANDLED;
2863 }
2864 
2865 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2866 {
2867     struct dw_mci *host = slot->host;
2868     const struct dw_mci_drv_data *drv_data = host->drv_data;
2869     struct mmc_host *mmc = slot->mmc;
2870     int ctrl_id;
2871 
2872     if (host->pdata->caps)
2873         mmc->caps = host->pdata->caps;
2874 
2875     if (host->pdata->pm_caps)
2876         mmc->pm_caps = host->pdata->pm_caps;
2877 
2878     if (drv_data)
2879         mmc->caps |= drv_data->common_caps;
2880 
2881     if (host->dev->of_node) {
2882         ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2883         if (ctrl_id < 0)
2884             ctrl_id = 0;
2885     } else {
2886         ctrl_id = to_platform_device(host->dev)->id;
2887     }
2888 
2889     if (drv_data && drv_data->caps) {
2890         if (ctrl_id >= drv_data->num_caps) {
2891             dev_err(host->dev, "invalid controller id %d\n",
2892                 ctrl_id);
2893             return -EINVAL;
2894         }
2895         mmc->caps |= drv_data->caps[ctrl_id];
2896     }
2897 
2898     if (host->pdata->caps2)
2899         mmc->caps2 = host->pdata->caps2;
2900 
2901     /* if host has set a minimum_freq, we should respect it */
2902     if (host->minimum_speed)
2903         mmc->f_min = host->minimum_speed;
2904     else
2905         mmc->f_min = DW_MCI_FREQ_MIN;
2906 
2907     if (!mmc->f_max)
2908         mmc->f_max = DW_MCI_FREQ_MAX;
2909 
2910     /* Process SDIO IRQs through the sdio_irq_work. */
2911     if (mmc->caps & MMC_CAP_SDIO_IRQ)
2912         mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2913 
2914     return 0;
2915 }
2916 
2917 static int dw_mci_init_slot(struct dw_mci *host)
2918 {
2919     struct mmc_host *mmc;
2920     struct dw_mci_slot *slot;
2921     int ret;
2922 
2923     mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2924     if (!mmc)
2925         return -ENOMEM;
2926 
2927     slot = mmc_priv(mmc);
2928     slot->id = 0;
2929     slot->sdio_id = host->sdio_id0 + slot->id;
2930     slot->mmc = mmc;
2931     slot->host = host;
2932     host->slot = slot;
2933 
2934     mmc->ops = &dw_mci_ops;
2935 
2936     /*if there are external regulators, get them*/
2937     ret = mmc_regulator_get_supply(mmc);
2938     if (ret)
2939         goto err_host_allocated;
2940 
2941     if (!mmc->ocr_avail)
2942         mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2943 
2944     ret = mmc_of_parse(mmc);
2945     if (ret)
2946         goto err_host_allocated;
2947 
2948     ret = dw_mci_init_slot_caps(slot);
2949     if (ret)
2950         goto err_host_allocated;
2951 
2952     /* Useful defaults if platform data is unset. */
2953     if (host->use_dma == TRANS_MODE_IDMAC) {
2954         mmc->max_segs = host->ring_size;
2955         mmc->max_blk_size = 65535;
2956         mmc->max_seg_size = 0x1000;
2957         mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2958         mmc->max_blk_count = mmc->max_req_size / 512;
2959     } else if (host->use_dma == TRANS_MODE_EDMAC) {
2960         mmc->max_segs = 64;
2961         mmc->max_blk_size = 65535;
2962         mmc->max_blk_count = 65535;
2963         mmc->max_req_size =
2964                 mmc->max_blk_size * mmc->max_blk_count;
2965         mmc->max_seg_size = mmc->max_req_size;
2966     } else {
2967         /* TRANS_MODE_PIO */
2968         mmc->max_segs = 64;
2969         mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2970         mmc->max_blk_count = 512;
2971         mmc->max_req_size = mmc->max_blk_size *
2972                     mmc->max_blk_count;
2973         mmc->max_seg_size = mmc->max_req_size;
2974     }
2975 
2976     dw_mci_get_cd(mmc);
2977 
2978     ret = mmc_add_host(mmc);
2979     if (ret)
2980         goto err_host_allocated;
2981 
2982 #if defined(CONFIG_DEBUG_FS)
2983     dw_mci_init_debugfs(slot);
2984 #endif
2985 
2986     return 0;
2987 
2988 err_host_allocated:
2989     mmc_free_host(mmc);
2990     return ret;
2991 }
2992 
2993 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2994 {
2995     /* Debugfs stuff is cleaned up by mmc core */
2996     mmc_remove_host(slot->mmc);
2997     slot->host->slot = NULL;
2998     mmc_free_host(slot->mmc);
2999 }
3000 
3001 static void dw_mci_init_dma(struct dw_mci *host)
3002 {
3003     int addr_config;
3004     struct device *dev = host->dev;
3005 
3006     /*
3007     * Check tansfer mode from HCON[17:16]
3008     * Clear the ambiguous description of dw_mmc databook:
3009     * 2b'00: No DMA Interface -> Actually means using Internal DMA block
3010     * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3011     * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3012     * 2b'11: Non DW DMA Interface -> pio only
3013     * Compared to DesignWare DMA Interface, Generic DMA Interface has a
3014     * simpler request/acknowledge handshake mechanism and both of them
3015     * are regarded as external dma master for dw_mmc.
3016     */
3017     host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3018     if (host->use_dma == DMA_INTERFACE_IDMA) {
3019         host->use_dma = TRANS_MODE_IDMAC;
3020     } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3021            host->use_dma == DMA_INTERFACE_GDMA) {
3022         host->use_dma = TRANS_MODE_EDMAC;
3023     } else {
3024         goto no_dma;
3025     }
3026 
3027     /* Determine which DMA interface to use */
3028     if (host->use_dma == TRANS_MODE_IDMAC) {
3029         /*
3030         * Check ADDR_CONFIG bit in HCON to find
3031         * IDMAC address bus width
3032         */
3033         addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3034 
3035         if (addr_config == 1) {
3036             /* host supports IDMAC in 64-bit address mode */
3037             host->dma_64bit_address = 1;
3038             dev_info(host->dev,
3039                  "IDMAC supports 64-bit address mode.\n");
3040             if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3041                 dma_set_coherent_mask(host->dev,
3042                               DMA_BIT_MASK(64));
3043         } else {
3044             /* host supports IDMAC in 32-bit address mode */
3045             host->dma_64bit_address = 0;
3046             dev_info(host->dev,
3047                  "IDMAC supports 32-bit address mode.\n");
3048         }
3049 
3050         /* Alloc memory for sg translation */
3051         host->sg_cpu = dmam_alloc_coherent(host->dev,
3052                            DESC_RING_BUF_SZ,
3053                            &host->sg_dma, GFP_KERNEL);
3054         if (!host->sg_cpu) {
3055             dev_err(host->dev,
3056                 "%s: could not alloc DMA memory\n",
3057                 __func__);
3058             goto no_dma;
3059         }
3060 
3061         host->dma_ops = &dw_mci_idmac_ops;
3062         dev_info(host->dev, "Using internal DMA controller.\n");
3063     } else {
3064         /* TRANS_MODE_EDMAC: check dma bindings again */
3065         if ((device_property_string_array_count(dev, "dma-names") < 0) ||
3066             !device_property_present(dev, "dmas")) {
3067             goto no_dma;
3068         }
3069         host->dma_ops = &dw_mci_edmac_ops;
3070         dev_info(host->dev, "Using external DMA controller.\n");
3071     }
3072 
3073     if (host->dma_ops->init && host->dma_ops->start &&
3074         host->dma_ops->stop && host->dma_ops->cleanup) {
3075         if (host->dma_ops->init(host)) {
3076             dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3077                 __func__);
3078             goto no_dma;
3079         }
3080     } else {
3081         dev_err(host->dev, "DMA initialization not found.\n");
3082         goto no_dma;
3083     }
3084 
3085     return;
3086 
3087 no_dma:
3088     dev_info(host->dev, "Using PIO mode.\n");
3089     host->use_dma = TRANS_MODE_PIO;
3090 }
3091 
3092 static void dw_mci_cmd11_timer(struct timer_list *t)
3093 {
3094     struct dw_mci *host = from_timer(host, t, cmd11_timer);
3095 
3096     if (host->state != STATE_SENDING_CMD11) {
3097         dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3098         return;
3099     }
3100 
3101     host->cmd_status = SDMMC_INT_RTO;
3102     set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3103     tasklet_schedule(&host->tasklet);
3104 }
3105 
3106 static void dw_mci_cto_timer(struct timer_list *t)
3107 {
3108     struct dw_mci *host = from_timer(host, t, cto_timer);
3109     unsigned long irqflags;
3110     u32 pending;
3111 
3112     spin_lock_irqsave(&host->irq_lock, irqflags);
3113 
3114     /*
3115      * If somehow we have very bad interrupt latency it's remotely possible
3116      * that the timer could fire while the interrupt is still pending or
3117      * while the interrupt is midway through running.  Let's be paranoid
3118      * and detect those two cases.  Note that this is paranoia is somewhat
3119      * justified because in this function we don't actually cancel the
3120      * pending command in the controller--we just assume it will never come.
3121      */
3122     pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3123     if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3124         /* The interrupt should fire; no need to act but we can warn */
3125         dev_warn(host->dev, "Unexpected interrupt latency\n");
3126         goto exit;
3127     }
3128     if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3129         /* Presumably interrupt handler couldn't delete the timer */
3130         dev_warn(host->dev, "CTO timeout when already completed\n");
3131         goto exit;
3132     }
3133 
3134     /*
3135      * Continued paranoia to make sure we're in the state we expect.
3136      * This paranoia isn't really justified but it seems good to be safe.
3137      */
3138     switch (host->state) {
3139     case STATE_SENDING_CMD11:
3140     case STATE_SENDING_CMD:
3141     case STATE_SENDING_STOP:
3142         /*
3143          * If CMD_DONE interrupt does NOT come in sending command
3144          * state, we should notify the driver to terminate current
3145          * transfer and report a command timeout to the core.
3146          */
3147         host->cmd_status = SDMMC_INT_RTO;
3148         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3149         tasklet_schedule(&host->tasklet);
3150         break;
3151     default:
3152         dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3153              host->state);
3154         break;
3155     }
3156 
3157 exit:
3158     spin_unlock_irqrestore(&host->irq_lock, irqflags);
3159 }
3160 
3161 static void dw_mci_dto_timer(struct timer_list *t)
3162 {
3163     struct dw_mci *host = from_timer(host, t, dto_timer);
3164     unsigned long irqflags;
3165     u32 pending;
3166 
3167     spin_lock_irqsave(&host->irq_lock, irqflags);
3168 
3169     /*
3170      * The DTO timer is much longer than the CTO timer, so it's even less
3171      * likely that we'll these cases, but it pays to be paranoid.
3172      */
3173     pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3174     if (pending & SDMMC_INT_DATA_OVER) {
3175         /* The interrupt should fire; no need to act but we can warn */
3176         dev_warn(host->dev, "Unexpected data interrupt latency\n");
3177         goto exit;
3178     }
3179     if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3180         /* Presumably interrupt handler couldn't delete the timer */
3181         dev_warn(host->dev, "DTO timeout when already completed\n");
3182         goto exit;
3183     }
3184 
3185     /*
3186      * Continued paranoia to make sure we're in the state we expect.
3187      * This paranoia isn't really justified but it seems good to be safe.
3188      */
3189     switch (host->state) {
3190     case STATE_SENDING_DATA:
3191     case STATE_DATA_BUSY:
3192         /*
3193          * If DTO interrupt does NOT come in sending data state,
3194          * we should notify the driver to terminate current transfer
3195          * and report a data timeout to the core.
3196          */
3197         host->data_status = SDMMC_INT_DRTO;
3198         set_bit(EVENT_DATA_ERROR, &host->pending_events);
3199         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3200         tasklet_schedule(&host->tasklet);
3201         break;
3202     default:
3203         dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3204              host->state);
3205         break;
3206     }
3207 
3208 exit:
3209     spin_unlock_irqrestore(&host->irq_lock, irqflags);
3210 }
3211 
3212 #ifdef CONFIG_OF
3213 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3214 {
3215     struct dw_mci_board *pdata;
3216     struct device *dev = host->dev;
3217     const struct dw_mci_drv_data *drv_data = host->drv_data;
3218     int ret;
3219     u32 clock_frequency;
3220 
3221     pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3222     if (!pdata)
3223         return ERR_PTR(-ENOMEM);
3224 
3225     /* find reset controller when exist */
3226     pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3227     if (IS_ERR(pdata->rstc))
3228         return ERR_CAST(pdata->rstc);
3229 
3230     if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3231         dev_info(dev,
3232              "fifo-depth property not found, using value of FIFOTH register as default\n");
3233 
3234     device_property_read_u32(dev, "card-detect-delay",
3235                  &pdata->detect_delay_ms);
3236 
3237     device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3238 
3239     if (device_property_present(dev, "fifo-watermark-aligned"))
3240         host->wm_aligned = true;
3241 
3242     if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3243         pdata->bus_hz = clock_frequency;
3244 
3245     if (drv_data && drv_data->parse_dt) {
3246         ret = drv_data->parse_dt(host);
3247         if (ret)
3248             return ERR_PTR(ret);
3249     }
3250 
3251     return pdata;
3252 }
3253 
3254 #else /* CONFIG_OF */
3255 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3256 {
3257     return ERR_PTR(-EINVAL);
3258 }
3259 #endif /* CONFIG_OF */
3260 
3261 static void dw_mci_enable_cd(struct dw_mci *host)
3262 {
3263     unsigned long irqflags;
3264     u32 temp;
3265 
3266     /*
3267      * No need for CD if all slots have a non-error GPIO
3268      * as well as broken card detection is found.
3269      */
3270     if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3271         return;
3272 
3273     if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3274         spin_lock_irqsave(&host->irq_lock, irqflags);
3275         temp = mci_readl(host, INTMASK);
3276         temp  |= SDMMC_INT_CD;
3277         mci_writel(host, INTMASK, temp);
3278         spin_unlock_irqrestore(&host->irq_lock, irqflags);
3279     }
3280 }
3281 
3282 int dw_mci_probe(struct dw_mci *host)
3283 {
3284     const struct dw_mci_drv_data *drv_data = host->drv_data;
3285     int width, i, ret = 0;
3286     u32 fifo_size;
3287 
3288     if (!host->pdata) {
3289         host->pdata = dw_mci_parse_dt(host);
3290         if (IS_ERR(host->pdata))
3291             return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3292                          "platform data not available\n");
3293     }
3294 
3295     host->biu_clk = devm_clk_get(host->dev, "biu");
3296     if (IS_ERR(host->biu_clk)) {
3297         dev_dbg(host->dev, "biu clock not available\n");
3298     } else {
3299         ret = clk_prepare_enable(host->biu_clk);
3300         if (ret) {
3301             dev_err(host->dev, "failed to enable biu clock\n");
3302             return ret;
3303         }
3304     }
3305 
3306     host->ciu_clk = devm_clk_get(host->dev, "ciu");
3307     if (IS_ERR(host->ciu_clk)) {
3308         dev_dbg(host->dev, "ciu clock not available\n");
3309         host->bus_hz = host->pdata->bus_hz;
3310     } else {
3311         ret = clk_prepare_enable(host->ciu_clk);
3312         if (ret) {
3313             dev_err(host->dev, "failed to enable ciu clock\n");
3314             goto err_clk_biu;
3315         }
3316 
3317         if (host->pdata->bus_hz) {
3318             ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3319             if (ret)
3320                 dev_warn(host->dev,
3321                      "Unable to set bus rate to %uHz\n",
3322                      host->pdata->bus_hz);
3323         }
3324         host->bus_hz = clk_get_rate(host->ciu_clk);
3325     }
3326 
3327     if (!host->bus_hz) {
3328         dev_err(host->dev,
3329             "Platform data must supply bus speed\n");
3330         ret = -ENODEV;
3331         goto err_clk_ciu;
3332     }
3333 
3334     if (host->pdata->rstc) {
3335         reset_control_assert(host->pdata->rstc);
3336         usleep_range(10, 50);
3337         reset_control_deassert(host->pdata->rstc);
3338     }
3339 
3340     if (drv_data && drv_data->init) {
3341         ret = drv_data->init(host);
3342         if (ret) {
3343             dev_err(host->dev,
3344                 "implementation specific init failed\n");
3345             goto err_clk_ciu;
3346         }
3347     }
3348 
3349     timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3350     timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3351     timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3352 
3353     spin_lock_init(&host->lock);
3354     spin_lock_init(&host->irq_lock);
3355     INIT_LIST_HEAD(&host->queue);
3356 
3357     dw_mci_init_fault(host);
3358 
3359     /*
3360      * Get the host data width - this assumes that HCON has been set with
3361      * the correct values.
3362      */
3363     i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3364     if (!i) {
3365         host->push_data = dw_mci_push_data16;
3366         host->pull_data = dw_mci_pull_data16;
3367         width = 16;
3368         host->data_shift = 1;
3369     } else if (i == 2) {
3370         host->push_data = dw_mci_push_data64;
3371         host->pull_data = dw_mci_pull_data64;
3372         width = 64;
3373         host->data_shift = 3;
3374     } else {
3375         /* Check for a reserved value, and warn if it is */
3376         WARN((i != 1),
3377              "HCON reports a reserved host data width!\n"
3378              "Defaulting to 32-bit access.\n");
3379         host->push_data = dw_mci_push_data32;
3380         host->pull_data = dw_mci_pull_data32;
3381         width = 32;
3382         host->data_shift = 2;
3383     }
3384 
3385     /* Reset all blocks */
3386     if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3387         ret = -ENODEV;
3388         goto err_clk_ciu;
3389     }
3390 
3391     host->dma_ops = host->pdata->dma_ops;
3392     dw_mci_init_dma(host);
3393 
3394     /* Clear the interrupts for the host controller */
3395     mci_writel(host, RINTSTS, 0xFFFFFFFF);
3396     mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3397 
3398     /* Put in max timeout */
3399     mci_writel(host, TMOUT, 0xFFFFFFFF);
3400 
3401     /*
3402      * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
3403      *                          Tx Mark = fifo_size / 2 DMA Size = 8
3404      */
3405     if (!host->pdata->fifo_depth) {
3406         /*
3407          * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3408          * have been overwritten by the bootloader, just like we're
3409          * about to do, so if you know the value for your hardware, you
3410          * should put it in the platform data.
3411          */
3412         fifo_size = mci_readl(host, FIFOTH);
3413         fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3414     } else {
3415         fifo_size = host->pdata->fifo_depth;
3416     }
3417     host->fifo_depth = fifo_size;
3418     host->fifoth_val =
3419         SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3420     mci_writel(host, FIFOTH, host->fifoth_val);
3421 
3422     /* disable clock to CIU */
3423     mci_writel(host, CLKENA, 0);
3424     mci_writel(host, CLKSRC, 0);
3425 
3426     /*
3427      * In 2.40a spec, Data offset is changed.
3428      * Need to check the version-id and set data-offset for DATA register.
3429      */
3430     host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3431     dev_info(host->dev, "Version ID is %04x\n", host->verid);
3432 
3433     if (host->data_addr_override)
3434         host->fifo_reg = host->regs + host->data_addr_override;
3435     else if (host->verid < DW_MMC_240A)
3436         host->fifo_reg = host->regs + DATA_OFFSET;
3437     else
3438         host->fifo_reg = host->regs + DATA_240A_OFFSET;
3439 
3440     tasklet_setup(&host->tasklet, dw_mci_tasklet_func);
3441     ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3442                    host->irq_flags, "dw-mci", host);
3443     if (ret)
3444         goto err_dmaunmap;
3445 
3446     /*
3447      * Enable interrupts for command done, data over, data empty,
3448      * receive ready and error such as transmit, receive timeout, crc error
3449      */
3450     mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3451            SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3452            DW_MCI_ERROR_FLAGS);
3453     /* Enable mci interrupt */
3454     mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3455 
3456     dev_info(host->dev,
3457          "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3458          host->irq, width, fifo_size);
3459 
3460     /* We need at least one slot to succeed */
3461     ret = dw_mci_init_slot(host);
3462     if (ret) {
3463         dev_dbg(host->dev, "slot %d init failed\n", i);
3464         goto err_dmaunmap;
3465     }
3466 
3467     /* Now that slots are all setup, we can enable card detect */
3468     dw_mci_enable_cd(host);
3469 
3470     return 0;
3471 
3472 err_dmaunmap:
3473     if (host->use_dma && host->dma_ops->exit)
3474         host->dma_ops->exit(host);
3475 
3476     reset_control_assert(host->pdata->rstc);
3477 
3478 err_clk_ciu:
3479     clk_disable_unprepare(host->ciu_clk);
3480 
3481 err_clk_biu:
3482     clk_disable_unprepare(host->biu_clk);
3483 
3484     return ret;
3485 }
3486 EXPORT_SYMBOL(dw_mci_probe);
3487 
3488 void dw_mci_remove(struct dw_mci *host)
3489 {
3490     dev_dbg(host->dev, "remove slot\n");
3491     if (host->slot)
3492         dw_mci_cleanup_slot(host->slot);
3493 
3494     mci_writel(host, RINTSTS, 0xFFFFFFFF);
3495     mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3496 
3497     /* disable clock to CIU */
3498     mci_writel(host, CLKENA, 0);
3499     mci_writel(host, CLKSRC, 0);
3500 
3501     if (host->use_dma && host->dma_ops->exit)
3502         host->dma_ops->exit(host);
3503 
3504     reset_control_assert(host->pdata->rstc);
3505 
3506     clk_disable_unprepare(host->ciu_clk);
3507     clk_disable_unprepare(host->biu_clk);
3508 }
3509 EXPORT_SYMBOL(dw_mci_remove);
3510 
3511 
3512 
3513 #ifdef CONFIG_PM
3514 int dw_mci_runtime_suspend(struct device *dev)
3515 {
3516     struct dw_mci *host = dev_get_drvdata(dev);
3517 
3518     if (host->use_dma && host->dma_ops->exit)
3519         host->dma_ops->exit(host);
3520 
3521     clk_disable_unprepare(host->ciu_clk);
3522 
3523     if (host->slot &&
3524         (mmc_can_gpio_cd(host->slot->mmc) ||
3525          !mmc_card_is_removable(host->slot->mmc)))
3526         clk_disable_unprepare(host->biu_clk);
3527 
3528     return 0;
3529 }
3530 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3531 
3532 int dw_mci_runtime_resume(struct device *dev)
3533 {
3534     int ret = 0;
3535     struct dw_mci *host = dev_get_drvdata(dev);
3536 
3537     if (host->slot &&
3538         (mmc_can_gpio_cd(host->slot->mmc) ||
3539          !mmc_card_is_removable(host->slot->mmc))) {
3540         ret = clk_prepare_enable(host->biu_clk);
3541         if (ret)
3542             return ret;
3543     }
3544 
3545     ret = clk_prepare_enable(host->ciu_clk);
3546     if (ret)
3547         goto err;
3548 
3549     if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3550         clk_disable_unprepare(host->ciu_clk);
3551         ret = -ENODEV;
3552         goto err;
3553     }
3554 
3555     if (host->use_dma && host->dma_ops->init)
3556         host->dma_ops->init(host);
3557 
3558     /*
3559      * Restore the initial value at FIFOTH register
3560      * And Invalidate the prev_blksz with zero
3561      */
3562     mci_writel(host, FIFOTH, host->fifoth_val);
3563     host->prev_blksz = 0;
3564 
3565     /* Put in max timeout */
3566     mci_writel(host, TMOUT, 0xFFFFFFFF);
3567 
3568     mci_writel(host, RINTSTS, 0xFFFFFFFF);
3569     mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3570            SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3571            DW_MCI_ERROR_FLAGS);
3572     mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3573 
3574 
3575     if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3576         dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3577 
3578     /* Force setup bus to guarantee available clock output */
3579     dw_mci_setup_bus(host->slot, true);
3580 
3581     /* Re-enable SDIO interrupts. */
3582     if (sdio_irq_claimed(host->slot->mmc))
3583         __dw_mci_enable_sdio_irq(host->slot, 1);
3584 
3585     /* Now that slots are all setup, we can enable card detect */
3586     dw_mci_enable_cd(host);
3587 
3588     return 0;
3589 
3590 err:
3591     if (host->slot &&
3592         (mmc_can_gpio_cd(host->slot->mmc) ||
3593          !mmc_card_is_removable(host->slot->mmc)))
3594         clk_disable_unprepare(host->biu_clk);
3595 
3596     return ret;
3597 }
3598 EXPORT_SYMBOL(dw_mci_runtime_resume);
3599 #endif /* CONFIG_PM */
3600 
3601 static int __init dw_mci_init(void)
3602 {
3603     pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3604     return 0;
3605 }
3606 
3607 static void __exit dw_mci_exit(void)
3608 {
3609 }
3610 
3611 module_init(dw_mci_init);
3612 module_exit(dw_mci_exit);
3613 
3614 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3615 MODULE_AUTHOR("NXP Semiconductor VietNam");
3616 MODULE_AUTHOR("Imagination Technologies Ltd");
3617 MODULE_LICENSE("GPL v2");