0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/bitops.h>
0036 #include <linux/clk.h>
0037 #include <linux/completion.h>
0038 #include <linux/delay.h>
0039 #include <linux/dma-mapping.h>
0040 #include <linux/dmaengine.h>
0041 #include <linux/mmc/card.h>
0042 #include <linux/mmc/core.h>
0043 #include <linux/mmc/host.h>
0044 #include <linux/mmc/mmc.h>
0045 #include <linux/mmc/sdio.h>
0046 #include <linux/mmc/slot-gpio.h>
0047 #include <linux/mod_devicetable.h>
0048 #include <linux/mutex.h>
0049 #include <linux/of_device.h>
0050 #include <linux/pagemap.h>
0051 #include <linux/platform_data/sh_mmcif.h>
0052 #include <linux/platform_device.h>
0053 #include <linux/pm_qos.h>
0054 #include <linux/pm_runtime.h>
0055 #include <linux/sh_dma.h>
0056 #include <linux/spinlock.h>
0057 #include <linux/module.h>
0058
0059 #define DRIVER_NAME "sh_mmcif"
0060
0061
0062 #define CMD_MASK 0x3f000000
0063 #define CMD_SET_RTYP_NO ((0 << 23) | (0 << 22))
0064 #define CMD_SET_RTYP_6B ((0 << 23) | (1 << 22))
0065 #define CMD_SET_RTYP_17B ((1 << 23) | (0 << 22))
0066 #define CMD_SET_RBSY (1 << 21)
0067 #define CMD_SET_CCSEN (1 << 20)
0068 #define CMD_SET_WDAT (1 << 19)
0069 #define CMD_SET_DWEN (1 << 18)
0070 #define CMD_SET_CMLTE (1 << 17)
0071 #define CMD_SET_CMD12EN (1 << 16)
0072 #define CMD_SET_RIDXC_INDEX ((0 << 15) | (0 << 14))
0073 #define CMD_SET_RIDXC_BITS ((0 << 15) | (1 << 14))
0074 #define CMD_SET_RIDXC_NO ((1 << 15) | (0 << 14))
0075 #define CMD_SET_CRC7C ((0 << 13) | (0 << 12))
0076 #define CMD_SET_CRC7C_BITS ((0 << 13) | (1 << 12))
0077 #define CMD_SET_CRC7C_INTERNAL ((1 << 13) | (0 << 12))
0078 #define CMD_SET_CRC16C (1 << 10)
0079 #define CMD_SET_CRCSTE (1 << 8)
0080 #define CMD_SET_TBIT (1 << 7)
0081 #define CMD_SET_OPDM (1 << 6)
0082 #define CMD_SET_CCSH (1 << 5)
0083 #define CMD_SET_DARS (1 << 2)
0084 #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0))
0085 #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0))
0086 #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0))
0087
0088
0089 #define CMD_CTRL_BREAK (1 << 0)
0090
0091
0092 #define BLOCK_SIZE_MASK 0x0000ffff
0093
0094
0095 #define INT_CCSDE (1 << 29)
0096 #define INT_CMD12DRE (1 << 26)
0097 #define INT_CMD12RBE (1 << 25)
0098 #define INT_CMD12CRE (1 << 24)
0099 #define INT_DTRANE (1 << 23)
0100 #define INT_BUFRE (1 << 22)
0101 #define INT_BUFWEN (1 << 21)
0102 #define INT_BUFREN (1 << 20)
0103 #define INT_CCSRCV (1 << 19)
0104 #define INT_RBSYE (1 << 17)
0105 #define INT_CRSPE (1 << 16)
0106 #define INT_CMDVIO (1 << 15)
0107 #define INT_BUFVIO (1 << 14)
0108 #define INT_WDATERR (1 << 11)
0109 #define INT_RDATERR (1 << 10)
0110 #define INT_RIDXERR (1 << 9)
0111 #define INT_RSPERR (1 << 8)
0112 #define INT_CCSTO (1 << 5)
0113 #define INT_CRCSTO (1 << 4)
0114 #define INT_WDATTO (1 << 3)
0115 #define INT_RDATTO (1 << 2)
0116 #define INT_RBSYTO (1 << 1)
0117 #define INT_RSPTO (1 << 0)
0118 #define INT_ERR_STS (INT_CMDVIO | INT_BUFVIO | INT_WDATERR | \
0119 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
0120 INT_CCSTO | INT_CRCSTO | INT_WDATTO | \
0121 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
0122
0123 #define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \
0124 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
0125 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
0126
0127 #define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE)
0128
0129
0130 #define MASK_ALL 0x00000000
0131 #define MASK_MCCSDE (1 << 29)
0132 #define MASK_MCMD12DRE (1 << 26)
0133 #define MASK_MCMD12RBE (1 << 25)
0134 #define MASK_MCMD12CRE (1 << 24)
0135 #define MASK_MDTRANE (1 << 23)
0136 #define MASK_MBUFRE (1 << 22)
0137 #define MASK_MBUFWEN (1 << 21)
0138 #define MASK_MBUFREN (1 << 20)
0139 #define MASK_MCCSRCV (1 << 19)
0140 #define MASK_MRBSYE (1 << 17)
0141 #define MASK_MCRSPE (1 << 16)
0142 #define MASK_MCMDVIO (1 << 15)
0143 #define MASK_MBUFVIO (1 << 14)
0144 #define MASK_MWDATERR (1 << 11)
0145 #define MASK_MRDATERR (1 << 10)
0146 #define MASK_MRIDXERR (1 << 9)
0147 #define MASK_MRSPERR (1 << 8)
0148 #define MASK_MCCSTO (1 << 5)
0149 #define MASK_MCRCSTO (1 << 4)
0150 #define MASK_MWDATTO (1 << 3)
0151 #define MASK_MRDATTO (1 << 2)
0152 #define MASK_MRBSYTO (1 << 1)
0153 #define MASK_MRSPTO (1 << 0)
0154
0155 #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
0156 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
0157 MASK_MCRCSTO | MASK_MWDATTO | \
0158 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
0159
0160 #define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \
0161 MASK_MBUFREN | MASK_MBUFWEN | \
0162 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \
0163 MASK_MCMD12RBE | MASK_MCMD12CRE)
0164
0165
0166 #define STS1_CMDSEQ (1 << 31)
0167
0168
0169 #define STS2_CRCSTE (1 << 31)
0170 #define STS2_CRC16E (1 << 30)
0171 #define STS2_AC12CRCE (1 << 29)
0172 #define STS2_RSPCRC7E (1 << 28)
0173 #define STS2_CRCSTEBE (1 << 27)
0174 #define STS2_RDATEBE (1 << 26)
0175 #define STS2_AC12REBE (1 << 25)
0176 #define STS2_RSPEBE (1 << 24)
0177 #define STS2_AC12IDXE (1 << 23)
0178 #define STS2_RSPIDXE (1 << 22)
0179 #define STS2_CCSTO (1 << 15)
0180 #define STS2_RDATTO (1 << 14)
0181 #define STS2_DATBSYTO (1 << 13)
0182 #define STS2_CRCSTTO (1 << 12)
0183 #define STS2_AC12BSYTO (1 << 11)
0184 #define STS2_RSPBSYTO (1 << 10)
0185 #define STS2_AC12RSPTO (1 << 9)
0186 #define STS2_RSPTO (1 << 8)
0187 #define STS2_CRC_ERR (STS2_CRCSTE | STS2_CRC16E | \
0188 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
0189 #define STS2_TIMEOUT_ERR (STS2_CCSTO | STS2_RDATTO | \
0190 STS2_DATBSYTO | STS2_CRCSTTO | \
0191 STS2_AC12BSYTO | STS2_RSPBSYTO | \
0192 STS2_AC12RSPTO | STS2_RSPTO)
0193
0194 #define CLKDEV_EMMC_DATA 52000000
0195 #define CLKDEV_MMC_DATA 20000000
0196 #define CLKDEV_INIT 400000
0197
0198 enum sh_mmcif_state {
0199 STATE_IDLE,
0200 STATE_REQUEST,
0201 STATE_IOS,
0202 STATE_TIMEOUT,
0203 };
0204
0205 enum sh_mmcif_wait_for {
0206 MMCIF_WAIT_FOR_REQUEST,
0207 MMCIF_WAIT_FOR_CMD,
0208 MMCIF_WAIT_FOR_MREAD,
0209 MMCIF_WAIT_FOR_MWRITE,
0210 MMCIF_WAIT_FOR_READ,
0211 MMCIF_WAIT_FOR_WRITE,
0212 MMCIF_WAIT_FOR_READ_END,
0213 MMCIF_WAIT_FOR_WRITE_END,
0214 MMCIF_WAIT_FOR_STOP,
0215 };
0216
0217
0218
0219
0220 struct sh_mmcif_host {
0221 struct mmc_host *mmc;
0222 struct mmc_request *mrq;
0223 struct platform_device *pd;
0224 struct clk *clk;
0225 int bus_width;
0226 unsigned char timing;
0227 bool sd_error;
0228 bool dying;
0229 long timeout;
0230 void __iomem *addr;
0231 u32 *pio_ptr;
0232 spinlock_t lock;
0233 enum sh_mmcif_state state;
0234 enum sh_mmcif_wait_for wait_for;
0235 struct delayed_work timeout_work;
0236 size_t blocksize;
0237 int sg_idx;
0238 int sg_blkidx;
0239 bool power;
0240 bool ccs_enable;
0241 bool clk_ctrl2_enable;
0242 struct mutex thread_lock;
0243 u32 clkdiv_map;
0244
0245
0246 struct dma_chan *chan_rx;
0247 struct dma_chan *chan_tx;
0248 struct completion dma_complete;
0249 bool dma_active;
0250 };
0251
0252 static const struct of_device_id sh_mmcif_of_match[] = {
0253 { .compatible = "renesas,sh-mmcif" },
0254 { }
0255 };
0256 MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
0257
0258 #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
0259
0260 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
0261 unsigned int reg, u32 val)
0262 {
0263 writel(val | readl(host->addr + reg), host->addr + reg);
0264 }
0265
0266 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
0267 unsigned int reg, u32 val)
0268 {
0269 writel(~val & readl(host->addr + reg), host->addr + reg);
0270 }
0271
0272 static void sh_mmcif_dma_complete(void *arg)
0273 {
0274 struct sh_mmcif_host *host = arg;
0275 struct mmc_request *mrq = host->mrq;
0276 struct device *dev = sh_mmcif_host_to_dev(host);
0277
0278 dev_dbg(dev, "Command completed\n");
0279
0280 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
0281 dev_name(dev)))
0282 return;
0283
0284 complete(&host->dma_complete);
0285 }
0286
0287 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
0288 {
0289 struct mmc_data *data = host->mrq->data;
0290 struct scatterlist *sg = data->sg;
0291 struct dma_async_tx_descriptor *desc = NULL;
0292 struct dma_chan *chan = host->chan_rx;
0293 struct device *dev = sh_mmcif_host_to_dev(host);
0294 dma_cookie_t cookie = -EINVAL;
0295 int ret;
0296
0297 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
0298 DMA_FROM_DEVICE);
0299 if (ret > 0) {
0300 host->dma_active = true;
0301 desc = dmaengine_prep_slave_sg(chan, sg, ret,
0302 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0303 }
0304
0305 if (desc) {
0306 desc->callback = sh_mmcif_dma_complete;
0307 desc->callback_param = host;
0308 cookie = dmaengine_submit(desc);
0309 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
0310 dma_async_issue_pending(chan);
0311 }
0312 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
0313 __func__, data->sg_len, ret, cookie);
0314
0315 if (!desc) {
0316
0317 if (ret >= 0)
0318 ret = -EIO;
0319 host->chan_rx = NULL;
0320 host->dma_active = false;
0321 dma_release_channel(chan);
0322
0323 chan = host->chan_tx;
0324 if (chan) {
0325 host->chan_tx = NULL;
0326 dma_release_channel(chan);
0327 }
0328 dev_warn(dev,
0329 "DMA failed: %d, falling back to PIO\n", ret);
0330 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
0331 }
0332
0333 dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
0334 desc, cookie, data->sg_len);
0335 }
0336
0337 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
0338 {
0339 struct mmc_data *data = host->mrq->data;
0340 struct scatterlist *sg = data->sg;
0341 struct dma_async_tx_descriptor *desc = NULL;
0342 struct dma_chan *chan = host->chan_tx;
0343 struct device *dev = sh_mmcif_host_to_dev(host);
0344 dma_cookie_t cookie = -EINVAL;
0345 int ret;
0346
0347 ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
0348 DMA_TO_DEVICE);
0349 if (ret > 0) {
0350 host->dma_active = true;
0351 desc = dmaengine_prep_slave_sg(chan, sg, ret,
0352 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0353 }
0354
0355 if (desc) {
0356 desc->callback = sh_mmcif_dma_complete;
0357 desc->callback_param = host;
0358 cookie = dmaengine_submit(desc);
0359 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
0360 dma_async_issue_pending(chan);
0361 }
0362 dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
0363 __func__, data->sg_len, ret, cookie);
0364
0365 if (!desc) {
0366
0367 if (ret >= 0)
0368 ret = -EIO;
0369 host->chan_tx = NULL;
0370 host->dma_active = false;
0371 dma_release_channel(chan);
0372
0373 chan = host->chan_rx;
0374 if (chan) {
0375 host->chan_rx = NULL;
0376 dma_release_channel(chan);
0377 }
0378 dev_warn(dev,
0379 "DMA failed: %d, falling back to PIO\n", ret);
0380 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
0381 }
0382
0383 dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
0384 desc, cookie);
0385 }
0386
0387 static struct dma_chan *
0388 sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
0389 {
0390 dma_cap_mask_t mask;
0391
0392 dma_cap_zero(mask);
0393 dma_cap_set(DMA_SLAVE, mask);
0394 if (slave_id <= 0)
0395 return NULL;
0396
0397 return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
0398 }
0399
0400 static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
0401 struct dma_chan *chan,
0402 enum dma_transfer_direction direction)
0403 {
0404 struct resource *res;
0405 struct dma_slave_config cfg = { 0, };
0406
0407 res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
0408 if (!res)
0409 return -EINVAL;
0410
0411 cfg.direction = direction;
0412
0413 if (direction == DMA_DEV_TO_MEM) {
0414 cfg.src_addr = res->start + MMCIF_CE_DATA;
0415 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0416 } else {
0417 cfg.dst_addr = res->start + MMCIF_CE_DATA;
0418 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0419 }
0420
0421 return dmaengine_slave_config(chan, &cfg);
0422 }
0423
0424 static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
0425 {
0426 struct device *dev = sh_mmcif_host_to_dev(host);
0427 host->dma_active = false;
0428
0429
0430 if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
0431 struct sh_mmcif_plat_data *pdata = dev->platform_data;
0432
0433 host->chan_tx = sh_mmcif_request_dma_pdata(host,
0434 pdata->slave_id_tx);
0435 host->chan_rx = sh_mmcif_request_dma_pdata(host,
0436 pdata->slave_id_rx);
0437 } else {
0438 host->chan_tx = dma_request_chan(dev, "tx");
0439 if (IS_ERR(host->chan_tx))
0440 host->chan_tx = NULL;
0441 host->chan_rx = dma_request_chan(dev, "rx");
0442 if (IS_ERR(host->chan_rx))
0443 host->chan_rx = NULL;
0444 }
0445 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
0446 host->chan_rx);
0447
0448 if (!host->chan_tx || !host->chan_rx ||
0449 sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
0450 sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
0451 goto error;
0452
0453 return;
0454
0455 error:
0456 if (host->chan_tx)
0457 dma_release_channel(host->chan_tx);
0458 if (host->chan_rx)
0459 dma_release_channel(host->chan_rx);
0460 host->chan_tx = host->chan_rx = NULL;
0461 }
0462
0463 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
0464 {
0465 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
0466
0467 if (host->chan_tx) {
0468 struct dma_chan *chan = host->chan_tx;
0469 host->chan_tx = NULL;
0470 dma_release_channel(chan);
0471 }
0472 if (host->chan_rx) {
0473 struct dma_chan *chan = host->chan_rx;
0474 host->chan_rx = NULL;
0475 dma_release_channel(chan);
0476 }
0477
0478 host->dma_active = false;
0479 }
0480
0481 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
0482 {
0483 struct device *dev = sh_mmcif_host_to_dev(host);
0484 struct sh_mmcif_plat_data *p = dev->platform_data;
0485 bool sup_pclk = p ? p->sup_pclk : false;
0486 unsigned int current_clk = clk_get_rate(host->clk);
0487 unsigned int clkdiv;
0488
0489 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
0490 sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
0491
0492 if (!clk)
0493 return;
0494
0495 if (host->clkdiv_map) {
0496 unsigned int freq, best_freq, myclk, div, diff_min, diff;
0497 int i;
0498
0499 clkdiv = 0;
0500 diff_min = ~0;
0501 best_freq = 0;
0502 for (i = 31; i >= 0; i--) {
0503 if (!((1 << i) & host->clkdiv_map))
0504 continue;
0505
0506
0507
0508
0509
0510
0511 div = 1 << (i + 1);
0512 freq = clk_round_rate(host->clk, clk * div);
0513 myclk = freq / div;
0514 diff = (myclk > clk) ? myclk - clk : clk - myclk;
0515
0516 if (diff <= diff_min) {
0517 best_freq = freq;
0518 clkdiv = i;
0519 diff_min = diff;
0520 }
0521 }
0522
0523 dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
0524 (best_freq >> (clkdiv + 1)), clk, best_freq, clkdiv);
0525
0526 clk_set_rate(host->clk, best_freq);
0527 clkdiv = clkdiv << 16;
0528 } else if (sup_pclk && clk == current_clk) {
0529 clkdiv = CLK_SUP_PCLK;
0530 } else {
0531 clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
0532 }
0533
0534 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
0535 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
0536 }
0537
0538 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
0539 {
0540 u32 tmp;
0541
0542 tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
0543
0544 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
0545 sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
0546 if (host->ccs_enable)
0547 tmp |= SCCSTO_29;
0548 if (host->clk_ctrl2_enable)
0549 sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
0550 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
0551 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
0552
0553 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
0554 }
0555
0556 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
0557 {
0558 struct device *dev = sh_mmcif_host_to_dev(host);
0559 u32 state1, state2;
0560 int ret, timeout;
0561
0562 host->sd_error = false;
0563
0564 state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
0565 state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
0566 dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
0567 dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
0568
0569 if (state1 & STS1_CMDSEQ) {
0570 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
0571 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
0572 for (timeout = 10000; timeout; timeout--) {
0573 if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
0574 & STS1_CMDSEQ))
0575 break;
0576 mdelay(1);
0577 }
0578 if (!timeout) {
0579 dev_err(dev,
0580 "Forced end of command sequence timeout err\n");
0581 return -EIO;
0582 }
0583 sh_mmcif_sync_reset(host);
0584 dev_dbg(dev, "Forced end of command sequence\n");
0585 return -EIO;
0586 }
0587
0588 if (state2 & STS2_CRC_ERR) {
0589 dev_err(dev, " CRC error: state %u, wait %u\n",
0590 host->state, host->wait_for);
0591 ret = -EIO;
0592 } else if (state2 & STS2_TIMEOUT_ERR) {
0593 dev_err(dev, " Timeout: state %u, wait %u\n",
0594 host->state, host->wait_for);
0595 ret = -ETIMEDOUT;
0596 } else {
0597 dev_dbg(dev, " End/Index error: state %u, wait %u\n",
0598 host->state, host->wait_for);
0599 ret = -EIO;
0600 }
0601 return ret;
0602 }
0603
0604 static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
0605 {
0606 struct mmc_data *data = host->mrq->data;
0607
0608 host->sg_blkidx += host->blocksize;
0609
0610
0611 BUG_ON(host->sg_blkidx > data->sg->length);
0612
0613 if (host->sg_blkidx == data->sg->length) {
0614 host->sg_blkidx = 0;
0615 if (++host->sg_idx < data->sg_len)
0616 host->pio_ptr = sg_virt(++data->sg);
0617 } else {
0618 host->pio_ptr = p;
0619 }
0620
0621 return host->sg_idx != data->sg_len;
0622 }
0623
0624 static void sh_mmcif_single_read(struct sh_mmcif_host *host,
0625 struct mmc_request *mrq)
0626 {
0627 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
0628 BLOCK_SIZE_MASK) + 3;
0629
0630 host->wait_for = MMCIF_WAIT_FOR_READ;
0631
0632
0633 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
0634 }
0635
0636 static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
0637 {
0638 struct device *dev = sh_mmcif_host_to_dev(host);
0639 struct mmc_data *data = host->mrq->data;
0640 u32 *p = sg_virt(data->sg);
0641 int i;
0642
0643 if (host->sd_error) {
0644 data->error = sh_mmcif_error_manage(host);
0645 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
0646 return false;
0647 }
0648
0649 for (i = 0; i < host->blocksize / 4; i++)
0650 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
0651
0652
0653 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
0654 host->wait_for = MMCIF_WAIT_FOR_READ_END;
0655
0656 return true;
0657 }
0658
0659 static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
0660 struct mmc_request *mrq)
0661 {
0662 struct mmc_data *data = mrq->data;
0663
0664 if (!data->sg_len || !data->sg->length)
0665 return;
0666
0667 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
0668 BLOCK_SIZE_MASK;
0669
0670 host->wait_for = MMCIF_WAIT_FOR_MREAD;
0671 host->sg_idx = 0;
0672 host->sg_blkidx = 0;
0673 host->pio_ptr = sg_virt(data->sg);
0674
0675 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
0676 }
0677
0678 static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
0679 {
0680 struct device *dev = sh_mmcif_host_to_dev(host);
0681 struct mmc_data *data = host->mrq->data;
0682 u32 *p = host->pio_ptr;
0683 int i;
0684
0685 if (host->sd_error) {
0686 data->error = sh_mmcif_error_manage(host);
0687 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
0688 return false;
0689 }
0690
0691 BUG_ON(!data->sg->length);
0692
0693 for (i = 0; i < host->blocksize / 4; i++)
0694 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
0695
0696 if (!sh_mmcif_next_block(host, p))
0697 return false;
0698
0699 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
0700
0701 return true;
0702 }
0703
0704 static void sh_mmcif_single_write(struct sh_mmcif_host *host,
0705 struct mmc_request *mrq)
0706 {
0707 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
0708 BLOCK_SIZE_MASK) + 3;
0709
0710 host->wait_for = MMCIF_WAIT_FOR_WRITE;
0711
0712
0713 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
0714 }
0715
0716 static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
0717 {
0718 struct device *dev = sh_mmcif_host_to_dev(host);
0719 struct mmc_data *data = host->mrq->data;
0720 u32 *p = sg_virt(data->sg);
0721 int i;
0722
0723 if (host->sd_error) {
0724 data->error = sh_mmcif_error_manage(host);
0725 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
0726 return false;
0727 }
0728
0729 for (i = 0; i < host->blocksize / 4; i++)
0730 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
0731
0732
0733 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
0734 host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
0735
0736 return true;
0737 }
0738
0739 static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
0740 struct mmc_request *mrq)
0741 {
0742 struct mmc_data *data = mrq->data;
0743
0744 if (!data->sg_len || !data->sg->length)
0745 return;
0746
0747 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
0748 BLOCK_SIZE_MASK;
0749
0750 host->wait_for = MMCIF_WAIT_FOR_MWRITE;
0751 host->sg_idx = 0;
0752 host->sg_blkidx = 0;
0753 host->pio_ptr = sg_virt(data->sg);
0754
0755 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
0756 }
0757
0758 static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
0759 {
0760 struct device *dev = sh_mmcif_host_to_dev(host);
0761 struct mmc_data *data = host->mrq->data;
0762 u32 *p = host->pio_ptr;
0763 int i;
0764
0765 if (host->sd_error) {
0766 data->error = sh_mmcif_error_manage(host);
0767 dev_dbg(dev, "%s(): %d\n", __func__, data->error);
0768 return false;
0769 }
0770
0771 BUG_ON(!data->sg->length);
0772
0773 for (i = 0; i < host->blocksize / 4; i++)
0774 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
0775
0776 if (!sh_mmcif_next_block(host, p))
0777 return false;
0778
0779 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
0780
0781 return true;
0782 }
0783
0784 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
0785 struct mmc_command *cmd)
0786 {
0787 if (cmd->flags & MMC_RSP_136) {
0788 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
0789 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
0790 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
0791 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
0792 } else
0793 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
0794 }
0795
0796 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
0797 struct mmc_command *cmd)
0798 {
0799 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
0800 }
0801
0802 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
0803 struct mmc_request *mrq)
0804 {
0805 struct device *dev = sh_mmcif_host_to_dev(host);
0806 struct mmc_data *data = mrq->data;
0807 struct mmc_command *cmd = mrq->cmd;
0808 u32 opc = cmd->opcode;
0809 u32 tmp = 0;
0810
0811
0812 switch (mmc_resp_type(cmd)) {
0813 case MMC_RSP_NONE:
0814 tmp |= CMD_SET_RTYP_NO;
0815 break;
0816 case MMC_RSP_R1:
0817 case MMC_RSP_R3:
0818 tmp |= CMD_SET_RTYP_6B;
0819 break;
0820 case MMC_RSP_R1B:
0821 tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
0822 break;
0823 case MMC_RSP_R2:
0824 tmp |= CMD_SET_RTYP_17B;
0825 break;
0826 default:
0827 dev_err(dev, "Unsupported response type.\n");
0828 break;
0829 }
0830
0831
0832 if (data) {
0833 tmp |= CMD_SET_WDAT;
0834 switch (host->bus_width) {
0835 case MMC_BUS_WIDTH_1:
0836 tmp |= CMD_SET_DATW_1;
0837 break;
0838 case MMC_BUS_WIDTH_4:
0839 tmp |= CMD_SET_DATW_4;
0840 break;
0841 case MMC_BUS_WIDTH_8:
0842 tmp |= CMD_SET_DATW_8;
0843 break;
0844 default:
0845 dev_err(dev, "Unsupported bus width.\n");
0846 break;
0847 }
0848 switch (host->timing) {
0849 case MMC_TIMING_MMC_DDR52:
0850
0851
0852
0853
0854
0855
0856
0857 tmp |= CMD_SET_DARS;
0858 break;
0859 }
0860 }
0861
0862 if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
0863 tmp |= CMD_SET_DWEN;
0864
0865 if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
0866 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
0867 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
0868 data->blocks << 16);
0869 }
0870
0871 if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
0872 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
0873 tmp |= CMD_SET_RIDXC_BITS;
0874
0875 if (opc == MMC_SEND_OP_COND)
0876 tmp |= CMD_SET_CRC7C_BITS;
0877
0878 if (opc == MMC_ALL_SEND_CID ||
0879 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
0880 tmp |= CMD_SET_CRC7C_INTERNAL;
0881
0882 return (opc << 24) | tmp;
0883 }
0884
0885 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
0886 struct mmc_request *mrq, u32 opc)
0887 {
0888 struct device *dev = sh_mmcif_host_to_dev(host);
0889
0890 switch (opc) {
0891 case MMC_READ_MULTIPLE_BLOCK:
0892 sh_mmcif_multi_read(host, mrq);
0893 return 0;
0894 case MMC_WRITE_MULTIPLE_BLOCK:
0895 sh_mmcif_multi_write(host, mrq);
0896 return 0;
0897 case MMC_WRITE_BLOCK:
0898 sh_mmcif_single_write(host, mrq);
0899 return 0;
0900 case MMC_READ_SINGLE_BLOCK:
0901 case MMC_SEND_EXT_CSD:
0902 sh_mmcif_single_read(host, mrq);
0903 return 0;
0904 default:
0905 dev_err(dev, "Unsupported CMD%d\n", opc);
0906 return -EINVAL;
0907 }
0908 }
0909
0910 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
0911 struct mmc_request *mrq)
0912 {
0913 struct mmc_command *cmd = mrq->cmd;
0914 u32 opc;
0915 u32 mask = 0;
0916 unsigned long flags;
0917
0918 if (cmd->flags & MMC_RSP_BUSY)
0919 mask = MASK_START_CMD | MASK_MRBSYE;
0920 else
0921 mask = MASK_START_CMD | MASK_MCRSPE;
0922
0923 if (host->ccs_enable)
0924 mask |= MASK_MCCSTO;
0925
0926 if (mrq->data) {
0927 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
0928 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
0929 mrq->data->blksz);
0930 }
0931 opc = sh_mmcif_set_cmd(host, mrq);
0932
0933 if (host->ccs_enable)
0934 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
0935 else
0936 sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
0937 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
0938
0939 sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
0940
0941 spin_lock_irqsave(&host->lock, flags);
0942 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
0943
0944 host->wait_for = MMCIF_WAIT_FOR_CMD;
0945 schedule_delayed_work(&host->timeout_work, host->timeout);
0946 spin_unlock_irqrestore(&host->lock, flags);
0947 }
0948
0949 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
0950 struct mmc_request *mrq)
0951 {
0952 struct device *dev = sh_mmcif_host_to_dev(host);
0953
0954 switch (mrq->cmd->opcode) {
0955 case MMC_READ_MULTIPLE_BLOCK:
0956 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
0957 break;
0958 case MMC_WRITE_MULTIPLE_BLOCK:
0959 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
0960 break;
0961 default:
0962 dev_err(dev, "unsupported stop cmd\n");
0963 mrq->stop->error = sh_mmcif_error_manage(host);
0964 return;
0965 }
0966
0967 host->wait_for = MMCIF_WAIT_FOR_STOP;
0968 }
0969
0970 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
0971 {
0972 struct sh_mmcif_host *host = mmc_priv(mmc);
0973 struct device *dev = sh_mmcif_host_to_dev(host);
0974 unsigned long flags;
0975
0976 spin_lock_irqsave(&host->lock, flags);
0977 if (host->state != STATE_IDLE) {
0978 dev_dbg(dev, "%s() rejected, state %u\n",
0979 __func__, host->state);
0980 spin_unlock_irqrestore(&host->lock, flags);
0981 mrq->cmd->error = -EAGAIN;
0982 mmc_request_done(mmc, mrq);
0983 return;
0984 }
0985
0986 host->state = STATE_REQUEST;
0987 spin_unlock_irqrestore(&host->lock, flags);
0988
0989 host->mrq = mrq;
0990
0991 sh_mmcif_start_cmd(host, mrq);
0992 }
0993
0994 static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
0995 {
0996 struct device *dev = sh_mmcif_host_to_dev(host);
0997
0998 if (host->mmc->f_max) {
0999 unsigned int f_max, f_min = 0, f_min_old;
1000
1001 f_max = host->mmc->f_max;
1002 for (f_min_old = f_max; f_min_old > 2;) {
1003 f_min = clk_round_rate(host->clk, f_min_old / 2);
1004 if (f_min == f_min_old)
1005 break;
1006 f_min_old = f_min;
1007 }
1008
1009
1010
1011
1012 host->clkdiv_map = 0x3ff;
1013
1014 host->mmc->f_max = f_max >> ffs(host->clkdiv_map);
1015 host->mmc->f_min = f_min >> fls(host->clkdiv_map);
1016 } else {
1017 unsigned int clk = clk_get_rate(host->clk);
1018
1019 host->mmc->f_max = clk / 2;
1020 host->mmc->f_min = clk / 512;
1021 }
1022
1023 dev_dbg(dev, "clk max/min = %d/%d\n",
1024 host->mmc->f_max, host->mmc->f_min);
1025 }
1026
1027 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1028 {
1029 struct sh_mmcif_host *host = mmc_priv(mmc);
1030 struct device *dev = sh_mmcif_host_to_dev(host);
1031 unsigned long flags;
1032
1033 spin_lock_irqsave(&host->lock, flags);
1034 if (host->state != STATE_IDLE) {
1035 dev_dbg(dev, "%s() rejected, state %u\n",
1036 __func__, host->state);
1037 spin_unlock_irqrestore(&host->lock, flags);
1038 return;
1039 }
1040
1041 host->state = STATE_IOS;
1042 spin_unlock_irqrestore(&host->lock, flags);
1043
1044 switch (ios->power_mode) {
1045 case MMC_POWER_UP:
1046 if (!IS_ERR(mmc->supply.vmmc))
1047 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1048 if (!host->power) {
1049 clk_prepare_enable(host->clk);
1050 pm_runtime_get_sync(dev);
1051 sh_mmcif_sync_reset(host);
1052 sh_mmcif_request_dma(host);
1053 host->power = true;
1054 }
1055 break;
1056 case MMC_POWER_OFF:
1057 if (!IS_ERR(mmc->supply.vmmc))
1058 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1059 if (host->power) {
1060 sh_mmcif_clock_control(host, 0);
1061 sh_mmcif_release_dma(host);
1062 pm_runtime_put(dev);
1063 clk_disable_unprepare(host->clk);
1064 host->power = false;
1065 }
1066 break;
1067 case MMC_POWER_ON:
1068 sh_mmcif_clock_control(host, ios->clock);
1069 break;
1070 }
1071
1072 host->timing = ios->timing;
1073 host->bus_width = ios->bus_width;
1074 host->state = STATE_IDLE;
1075 }
1076
1077 static const struct mmc_host_ops sh_mmcif_ops = {
1078 .request = sh_mmcif_request,
1079 .set_ios = sh_mmcif_set_ios,
1080 .get_cd = mmc_gpio_get_cd,
1081 };
1082
1083 static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1084 {
1085 struct mmc_command *cmd = host->mrq->cmd;
1086 struct mmc_data *data = host->mrq->data;
1087 struct device *dev = sh_mmcif_host_to_dev(host);
1088 long time;
1089
1090 if (host->sd_error) {
1091 switch (cmd->opcode) {
1092 case MMC_ALL_SEND_CID:
1093 case MMC_SELECT_CARD:
1094 case MMC_APP_CMD:
1095 cmd->error = -ETIMEDOUT;
1096 break;
1097 default:
1098 cmd->error = sh_mmcif_error_manage(host);
1099 break;
1100 }
1101 dev_dbg(dev, "CMD%d error %d\n",
1102 cmd->opcode, cmd->error);
1103 host->sd_error = false;
1104 return false;
1105 }
1106 if (!(cmd->flags & MMC_RSP_PRESENT)) {
1107 cmd->error = 0;
1108 return false;
1109 }
1110
1111 sh_mmcif_get_response(host, cmd);
1112
1113 if (!data)
1114 return false;
1115
1116
1117
1118
1119
1120 init_completion(&host->dma_complete);
1121
1122 if (data->flags & MMC_DATA_READ) {
1123 if (host->chan_rx)
1124 sh_mmcif_start_dma_rx(host);
1125 } else {
1126 if (host->chan_tx)
1127 sh_mmcif_start_dma_tx(host);
1128 }
1129
1130 if (!host->dma_active) {
1131 data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1132 return !data->error;
1133 }
1134
1135
1136 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1137 host->timeout);
1138
1139 if (data->flags & MMC_DATA_READ)
1140 dma_unmap_sg(host->chan_rx->device->dev,
1141 data->sg, data->sg_len,
1142 DMA_FROM_DEVICE);
1143 else
1144 dma_unmap_sg(host->chan_tx->device->dev,
1145 data->sg, data->sg_len,
1146 DMA_TO_DEVICE);
1147
1148 if (host->sd_error) {
1149 dev_err(host->mmc->parent,
1150 "Error IRQ while waiting for DMA completion!\n");
1151
1152 data->error = sh_mmcif_error_manage(host);
1153 } else if (!time) {
1154 dev_err(host->mmc->parent, "DMA timeout!\n");
1155 data->error = -ETIMEDOUT;
1156 } else if (time < 0) {
1157 dev_err(host->mmc->parent,
1158 "wait_for_completion_...() error %ld!\n", time);
1159 data->error = time;
1160 }
1161 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1162 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1163 host->dma_active = false;
1164
1165 if (data->error) {
1166 data->bytes_xfered = 0;
1167
1168 if (data->flags & MMC_DATA_READ)
1169 dmaengine_terminate_sync(host->chan_rx);
1170 else
1171 dmaengine_terminate_sync(host->chan_tx);
1172 }
1173
1174 return false;
1175 }
1176
1177 static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1178 {
1179 struct sh_mmcif_host *host = dev_id;
1180 struct mmc_request *mrq;
1181 struct device *dev = sh_mmcif_host_to_dev(host);
1182 bool wait = false;
1183 unsigned long flags;
1184 int wait_work;
1185
1186 spin_lock_irqsave(&host->lock, flags);
1187 wait_work = host->wait_for;
1188 spin_unlock_irqrestore(&host->lock, flags);
1189
1190 cancel_delayed_work_sync(&host->timeout_work);
1191
1192 mutex_lock(&host->thread_lock);
1193
1194 mrq = host->mrq;
1195 if (!mrq) {
1196 dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1197 host->state, host->wait_for);
1198 mutex_unlock(&host->thread_lock);
1199 return IRQ_HANDLED;
1200 }
1201
1202
1203
1204
1205
1206 switch (wait_work) {
1207 case MMCIF_WAIT_FOR_REQUEST:
1208
1209 mutex_unlock(&host->thread_lock);
1210 return IRQ_HANDLED;
1211 case MMCIF_WAIT_FOR_CMD:
1212
1213 wait = sh_mmcif_end_cmd(host);
1214 break;
1215 case MMCIF_WAIT_FOR_MREAD:
1216
1217 wait = sh_mmcif_mread_block(host);
1218 break;
1219 case MMCIF_WAIT_FOR_READ:
1220
1221 wait = sh_mmcif_read_block(host);
1222 break;
1223 case MMCIF_WAIT_FOR_MWRITE:
1224
1225 wait = sh_mmcif_mwrite_block(host);
1226 break;
1227 case MMCIF_WAIT_FOR_WRITE:
1228
1229 wait = sh_mmcif_write_block(host);
1230 break;
1231 case MMCIF_WAIT_FOR_STOP:
1232 if (host->sd_error) {
1233 mrq->stop->error = sh_mmcif_error_manage(host);
1234 dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1235 break;
1236 }
1237 sh_mmcif_get_cmd12response(host, mrq->stop);
1238 mrq->stop->error = 0;
1239 break;
1240 case MMCIF_WAIT_FOR_READ_END:
1241 case MMCIF_WAIT_FOR_WRITE_END:
1242 if (host->sd_error) {
1243 mrq->data->error = sh_mmcif_error_manage(host);
1244 dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1245 }
1246 break;
1247 default:
1248 BUG();
1249 }
1250
1251 if (wait) {
1252 schedule_delayed_work(&host->timeout_work, host->timeout);
1253
1254 mutex_unlock(&host->thread_lock);
1255 return IRQ_HANDLED;
1256 }
1257
1258 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1259 struct mmc_data *data = mrq->data;
1260 if (!mrq->cmd->error && data && !data->error)
1261 data->bytes_xfered =
1262 data->blocks * data->blksz;
1263
1264 if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1265 sh_mmcif_stop_cmd(host, mrq);
1266 if (!mrq->stop->error) {
1267 schedule_delayed_work(&host->timeout_work, host->timeout);
1268 mutex_unlock(&host->thread_lock);
1269 return IRQ_HANDLED;
1270 }
1271 }
1272 }
1273
1274 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1275 host->state = STATE_IDLE;
1276 host->mrq = NULL;
1277 mmc_request_done(host->mmc, mrq);
1278
1279 mutex_unlock(&host->thread_lock);
1280
1281 return IRQ_HANDLED;
1282 }
1283
1284 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1285 {
1286 struct sh_mmcif_host *host = dev_id;
1287 struct device *dev = sh_mmcif_host_to_dev(host);
1288 u32 state, mask;
1289
1290 state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1291 mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1292 if (host->ccs_enable)
1293 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1294 else
1295 sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1296 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1297
1298 if (state & ~MASK_CLEAN)
1299 dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1300 state);
1301
1302 if (state & INT_ERR_STS || state & ~INT_ALL) {
1303 host->sd_error = true;
1304 dev_dbg(dev, "int err state = 0x%08x\n", state);
1305 }
1306 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1307 if (!host->mrq)
1308 dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1309 if (!host->dma_active)
1310 return IRQ_WAKE_THREAD;
1311 else if (host->sd_error)
1312 sh_mmcif_dma_complete(host);
1313 } else {
1314 dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1315 }
1316
1317 return IRQ_HANDLED;
1318 }
1319
1320 static void sh_mmcif_timeout_work(struct work_struct *work)
1321 {
1322 struct delayed_work *d = to_delayed_work(work);
1323 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1324 struct mmc_request *mrq = host->mrq;
1325 struct device *dev = sh_mmcif_host_to_dev(host);
1326 unsigned long flags;
1327
1328 if (host->dying)
1329
1330 return;
1331
1332 spin_lock_irqsave(&host->lock, flags);
1333 if (host->state == STATE_IDLE) {
1334 spin_unlock_irqrestore(&host->lock, flags);
1335 return;
1336 }
1337
1338 dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1339 host->wait_for, mrq->cmd->opcode);
1340
1341 host->state = STATE_TIMEOUT;
1342 spin_unlock_irqrestore(&host->lock, flags);
1343
1344
1345
1346
1347
1348 switch (host->wait_for) {
1349 case MMCIF_WAIT_FOR_CMD:
1350 mrq->cmd->error = sh_mmcif_error_manage(host);
1351 break;
1352 case MMCIF_WAIT_FOR_STOP:
1353 mrq->stop->error = sh_mmcif_error_manage(host);
1354 break;
1355 case MMCIF_WAIT_FOR_MREAD:
1356 case MMCIF_WAIT_FOR_MWRITE:
1357 case MMCIF_WAIT_FOR_READ:
1358 case MMCIF_WAIT_FOR_WRITE:
1359 case MMCIF_WAIT_FOR_READ_END:
1360 case MMCIF_WAIT_FOR_WRITE_END:
1361 mrq->data->error = sh_mmcif_error_manage(host);
1362 break;
1363 default:
1364 BUG();
1365 }
1366
1367 host->state = STATE_IDLE;
1368 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1369 host->mrq = NULL;
1370 mmc_request_done(host->mmc, mrq);
1371 }
1372
1373 static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1374 {
1375 struct device *dev = sh_mmcif_host_to_dev(host);
1376 struct sh_mmcif_plat_data *pd = dev->platform_data;
1377 struct mmc_host *mmc = host->mmc;
1378
1379 mmc_regulator_get_supply(mmc);
1380
1381 if (!pd)
1382 return;
1383
1384 if (!mmc->ocr_avail)
1385 mmc->ocr_avail = pd->ocr;
1386 else if (pd->ocr)
1387 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1388 }
1389
1390 static int sh_mmcif_probe(struct platform_device *pdev)
1391 {
1392 int ret = 0, irq[2];
1393 struct mmc_host *mmc;
1394 struct sh_mmcif_host *host;
1395 struct device *dev = &pdev->dev;
1396 struct sh_mmcif_plat_data *pd = dev->platform_data;
1397 void __iomem *reg;
1398 const char *name;
1399
1400 irq[0] = platform_get_irq(pdev, 0);
1401 irq[1] = platform_get_irq_optional(pdev, 1);
1402 if (irq[0] < 0)
1403 return -ENXIO;
1404
1405 reg = devm_platform_ioremap_resource(pdev, 0);
1406 if (IS_ERR(reg))
1407 return PTR_ERR(reg);
1408
1409 mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1410 if (!mmc)
1411 return -ENOMEM;
1412
1413 ret = mmc_of_parse(mmc);
1414 if (ret < 0)
1415 goto err_host;
1416
1417 host = mmc_priv(mmc);
1418 host->mmc = mmc;
1419 host->addr = reg;
1420 host->timeout = msecs_to_jiffies(10000);
1421 host->ccs_enable = true;
1422 host->clk_ctrl2_enable = false;
1423
1424 host->pd = pdev;
1425
1426 spin_lock_init(&host->lock);
1427
1428 mmc->ops = &sh_mmcif_ops;
1429 sh_mmcif_init_ocr(host);
1430
1431 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1432 mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1433 mmc->max_busy_timeout = 10000;
1434
1435 if (pd && pd->caps)
1436 mmc->caps |= pd->caps;
1437 mmc->max_segs = 32;
1438 mmc->max_blk_size = 512;
1439 mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1440 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1441 mmc->max_seg_size = mmc->max_req_size;
1442
1443 platform_set_drvdata(pdev, host);
1444
1445 host->clk = devm_clk_get(dev, NULL);
1446 if (IS_ERR(host->clk)) {
1447 ret = PTR_ERR(host->clk);
1448 dev_err(dev, "cannot get clock: %d\n", ret);
1449 goto err_host;
1450 }
1451
1452 ret = clk_prepare_enable(host->clk);
1453 if (ret < 0)
1454 goto err_host;
1455
1456 sh_mmcif_clk_setup(host);
1457
1458 pm_runtime_enable(dev);
1459 host->power = false;
1460
1461 ret = pm_runtime_get_sync(dev);
1462 if (ret < 0)
1463 goto err_clk;
1464
1465 INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1466
1467 sh_mmcif_sync_reset(host);
1468 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1469
1470 name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1471 ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1472 sh_mmcif_irqt, 0, name, host);
1473 if (ret) {
1474 dev_err(dev, "request_irq error (%s)\n", name);
1475 goto err_clk;
1476 }
1477 if (irq[1] >= 0) {
1478 ret = devm_request_threaded_irq(dev, irq[1],
1479 sh_mmcif_intr, sh_mmcif_irqt,
1480 0, "sh_mmc:int", host);
1481 if (ret) {
1482 dev_err(dev, "request_irq error (sh_mmc:int)\n");
1483 goto err_clk;
1484 }
1485 }
1486
1487 mutex_init(&host->thread_lock);
1488
1489 ret = mmc_add_host(mmc);
1490 if (ret < 0)
1491 goto err_clk;
1492
1493 dev_pm_qos_expose_latency_limit(dev, 100);
1494
1495 dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1496 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1497 clk_get_rate(host->clk) / 1000000UL);
1498
1499 pm_runtime_put(dev);
1500 clk_disable_unprepare(host->clk);
1501 return ret;
1502
1503 err_clk:
1504 clk_disable_unprepare(host->clk);
1505 pm_runtime_put_sync(dev);
1506 pm_runtime_disable(dev);
1507 err_host:
1508 mmc_free_host(mmc);
1509 return ret;
1510 }
1511
1512 static int sh_mmcif_remove(struct platform_device *pdev)
1513 {
1514 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1515
1516 host->dying = true;
1517 clk_prepare_enable(host->clk);
1518 pm_runtime_get_sync(&pdev->dev);
1519
1520 dev_pm_qos_hide_latency_limit(&pdev->dev);
1521
1522 mmc_remove_host(host->mmc);
1523 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1524
1525
1526
1527
1528
1529
1530 cancel_delayed_work_sync(&host->timeout_work);
1531
1532 clk_disable_unprepare(host->clk);
1533 mmc_free_host(host->mmc);
1534 pm_runtime_put_sync(&pdev->dev);
1535 pm_runtime_disable(&pdev->dev);
1536
1537 return 0;
1538 }
1539
1540 #ifdef CONFIG_PM_SLEEP
1541 static int sh_mmcif_suspend(struct device *dev)
1542 {
1543 struct sh_mmcif_host *host = dev_get_drvdata(dev);
1544
1545 pm_runtime_get_sync(dev);
1546 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1547 pm_runtime_put(dev);
1548
1549 return 0;
1550 }
1551
1552 static int sh_mmcif_resume(struct device *dev)
1553 {
1554 return 0;
1555 }
1556 #endif
1557
1558 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1559 SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1560 };
1561
1562 static struct platform_driver sh_mmcif_driver = {
1563 .probe = sh_mmcif_probe,
1564 .remove = sh_mmcif_remove,
1565 .driver = {
1566 .name = DRIVER_NAME,
1567 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1568 .pm = &sh_mmcif_dev_pm_ops,
1569 .of_match_table = sh_mmcif_of_match,
1570 },
1571 };
1572
1573 module_platform_driver(sh_mmcif_driver);
1574
1575 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1576 MODULE_LICENSE("GPL v2");
1577 MODULE_ALIAS("platform:" DRIVER_NAME);
1578 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");