0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/delay.h>
0028 #include <linux/device.h>
0029 #include <linux/dma-mapping.h>
0030 #include <linux/highmem.h>
0031 #include <linux/interrupt.h>
0032 #include <linux/io.h>
0033 #include <linux/irq.h>
0034 #include <linux/mfd/tmio.h>
0035 #include <linux/mmc/card.h>
0036 #include <linux/mmc/host.h>
0037 #include <linux/mmc/mmc.h>
0038 #include <linux/mmc/slot-gpio.h>
0039 #include <linux/module.h>
0040 #include <linux/pagemap.h>
0041 #include <linux/platform_device.h>
0042 #include <linux/pm_qos.h>
0043 #include <linux/pm_runtime.h>
0044 #include <linux/regulator/consumer.h>
0045 #include <linux/mmc/sdio.h>
0046 #include <linux/scatterlist.h>
0047 #include <linux/sizes.h>
0048 #include <linux/spinlock.h>
0049 #include <linux/workqueue.h>
0050
0051 #include "tmio_mmc.h"
0052
0053 static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
0054 struct mmc_data *data)
0055 {
0056 if (host->dma_ops)
0057 host->dma_ops->start(host, data);
0058 }
0059
0060 static inline void tmio_mmc_end_dma(struct tmio_mmc_host *host)
0061 {
0062 if (host->dma_ops && host->dma_ops->end)
0063 host->dma_ops->end(host);
0064 }
0065
0066 static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
0067 {
0068 if (host->dma_ops)
0069 host->dma_ops->enable(host, enable);
0070 }
0071
0072 static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
0073 struct tmio_mmc_data *pdata)
0074 {
0075 if (host->dma_ops) {
0076 host->dma_ops->request(host, pdata);
0077 } else {
0078 host->chan_tx = NULL;
0079 host->chan_rx = NULL;
0080 }
0081 }
0082
0083 static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
0084 {
0085 if (host->dma_ops)
0086 host->dma_ops->release(host);
0087 }
0088
0089 static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
0090 {
0091 if (host->dma_ops)
0092 host->dma_ops->abort(host);
0093 }
0094
0095 static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
0096 {
0097 if (host->dma_ops)
0098 host->dma_ops->dataend(host);
0099 }
0100
0101 void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
0102 {
0103 host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
0104 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
0105 }
0106 EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
0107
0108 void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
0109 {
0110 host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
0111 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
0112 }
0113 EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
0114
0115 static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
0116 {
0117 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
0118 }
0119
0120 static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
0121 {
0122 host->sg_len = data->sg_len;
0123 host->sg_ptr = data->sg;
0124 host->sg_orig = data->sg;
0125 host->sg_off = 0;
0126 }
0127
0128 static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
0129 {
0130 host->sg_ptr = sg_next(host->sg_ptr);
0131 host->sg_off = 0;
0132 return --host->sg_len;
0133 }
0134
0135 #define CMDREQ_TIMEOUT 5000
0136
0137 static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
0138 {
0139 struct tmio_mmc_host *host = mmc_priv(mmc);
0140
0141 if (enable && !host->sdio_irq_enabled) {
0142 u16 sdio_status;
0143
0144
0145 pm_runtime_get_sync(mmc_dev(mmc));
0146
0147 host->sdio_irq_enabled = true;
0148 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
0149
0150
0151 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
0152 if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
0153 sdio_status |= TMIO_SDIO_SETBITS_MASK;
0154 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
0155
0156 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
0157 } else if (!enable && host->sdio_irq_enabled) {
0158 host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
0159 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
0160
0161 host->sdio_irq_enabled = false;
0162 pm_runtime_mark_last_busy(mmc_dev(mmc));
0163 pm_runtime_put_autosuspend(mmc_dev(mmc));
0164 }
0165 }
0166
0167 static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
0168 unsigned char bus_width)
0169 {
0170 u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
0171 & ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);
0172
0173
0174 if (bus_width == MMC_BUS_WIDTH_1)
0175 reg |= CARD_OPT_WIDTH;
0176 else if (bus_width == MMC_BUS_WIDTH_8)
0177 reg |= CARD_OPT_WIDTH8;
0178
0179 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
0180 }
0181
0182 static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
0183 {
0184 u16 card_opt, clk_ctrl, sdif_mode;
0185
0186 if (preserve) {
0187 card_opt = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
0188 clk_ctrl = sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL);
0189 if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
0190 sdif_mode = sd_ctrl_read16(host, CTL_SDIF_MODE);
0191 }
0192
0193
0194 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
0195 usleep_range(10000, 11000);
0196 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
0197 usleep_range(10000, 11000);
0198
0199 tmio_mmc_abort_dma(host);
0200
0201 if (host->reset)
0202 host->reset(host, preserve);
0203
0204 sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
0205 host->sdcard_irq_mask = host->sdcard_irq_mask_all;
0206
0207 if (host->native_hotplug)
0208 tmio_mmc_enable_mmc_irqs(host,
0209 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
0210
0211 tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
0212
0213 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
0214 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
0215 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
0216 }
0217
0218 if (preserve) {
0219 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, card_opt);
0220 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk_ctrl);
0221 if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
0222 sd_ctrl_write16(host, CTL_SDIF_MODE, sdif_mode);
0223 }
0224
0225 if (host->mmc->card)
0226 mmc_retune_needed(host->mmc);
0227 }
0228
0229 static void tmio_mmc_reset_work(struct work_struct *work)
0230 {
0231 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
0232 delayed_reset_work.work);
0233 struct mmc_request *mrq;
0234 unsigned long flags;
0235
0236 spin_lock_irqsave(&host->lock, flags);
0237 mrq = host->mrq;
0238
0239
0240
0241
0242
0243
0244 if (IS_ERR_OR_NULL(mrq) ||
0245 time_is_after_jiffies(host->last_req_ts +
0246 msecs_to_jiffies(CMDREQ_TIMEOUT))) {
0247 spin_unlock_irqrestore(&host->lock, flags);
0248 return;
0249 }
0250
0251 dev_warn(&host->pdev->dev,
0252 "timeout waiting for hardware interrupt (CMD%u)\n",
0253 mrq->cmd->opcode);
0254
0255 if (host->data)
0256 host->data->error = -ETIMEDOUT;
0257 else if (host->cmd)
0258 host->cmd->error = -ETIMEDOUT;
0259 else
0260 mrq->cmd->error = -ETIMEDOUT;
0261
0262 host->cmd = NULL;
0263 host->data = NULL;
0264
0265 spin_unlock_irqrestore(&host->lock, flags);
0266
0267 tmio_mmc_reset(host, true);
0268
0269
0270 host->mrq = NULL;
0271 mmc_request_done(host->mmc, mrq);
0272 }
0273
0274
0275
0276 #define APP_CMD 0x0040
0277 #define RESP_NONE 0x0300
0278 #define RESP_R1 0x0400
0279 #define RESP_R1B 0x0500
0280 #define RESP_R2 0x0600
0281 #define RESP_R3 0x0700
0282 #define DATA_PRESENT 0x0800
0283 #define TRANSFER_READ 0x1000
0284 #define TRANSFER_MULTI 0x2000
0285 #define SECURITY_CMD 0x4000
0286 #define NO_CMD12_ISSUE 0x4000
0287
0288 static int tmio_mmc_start_command(struct tmio_mmc_host *host,
0289 struct mmc_command *cmd)
0290 {
0291 struct mmc_data *data = host->data;
0292 int c = cmd->opcode;
0293
0294 switch (mmc_resp_type(cmd)) {
0295 case MMC_RSP_NONE: c |= RESP_NONE; break;
0296 case MMC_RSP_R1:
0297 case MMC_RSP_R1_NO_CRC:
0298 c |= RESP_R1; break;
0299 case MMC_RSP_R1B: c |= RESP_R1B; break;
0300 case MMC_RSP_R2: c |= RESP_R2; break;
0301 case MMC_RSP_R3: c |= RESP_R3; break;
0302 default:
0303 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
0304 return -EINVAL;
0305 }
0306
0307 host->cmd = cmd;
0308
0309
0310
0311
0312
0313
0314 if (data) {
0315 c |= DATA_PRESENT;
0316 if (data->blocks > 1) {
0317 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
0318 c |= TRANSFER_MULTI;
0319
0320
0321
0322
0323
0324 if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
0325 (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
0326 c |= NO_CMD12_ISSUE;
0327 }
0328 if (data->flags & MMC_DATA_READ)
0329 c |= TRANSFER_READ;
0330 }
0331
0332 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
0333
0334
0335 sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
0336 sd_ctrl_write16(host, CTL_SD_CMD, c);
0337
0338 return 0;
0339 }
0340
0341 static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
0342 unsigned short *buf,
0343 unsigned int count)
0344 {
0345 int is_read = host->data->flags & MMC_DATA_READ;
0346 u8 *buf8;
0347
0348
0349
0350
0351 if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
0352 u32 data = 0;
0353 u32 *buf32 = (u32 *)buf;
0354
0355 if (is_read)
0356 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
0357 count >> 2);
0358 else
0359 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
0360 count >> 2);
0361
0362
0363 if (!(count & 0x3))
0364 return;
0365
0366 buf32 += count >> 2;
0367 count %= 4;
0368
0369 if (is_read) {
0370 sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
0371 memcpy(buf32, &data, count);
0372 } else {
0373 memcpy(&data, buf32, count);
0374 sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
0375 }
0376
0377 return;
0378 }
0379
0380 if (is_read)
0381 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
0382 else
0383 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
0384
0385
0386 if (!(count & 0x1))
0387 return;
0388
0389
0390 buf8 = (u8 *)(buf + (count >> 1));
0391
0392
0393
0394
0395
0396
0397
0398 if (is_read)
0399 *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
0400 else
0401 sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
0402 }
0403
0404
0405
0406
0407
0408
0409 static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
0410 {
0411 struct mmc_data *data = host->data;
0412 void *sg_virt;
0413 unsigned short *buf;
0414 unsigned int count;
0415 unsigned long flags;
0416
0417 if (host->dma_on) {
0418 pr_err("PIO IRQ in DMA mode!\n");
0419 return;
0420 } else if (!data) {
0421 pr_debug("Spurious PIO IRQ\n");
0422 return;
0423 }
0424
0425 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
0426 buf = (unsigned short *)(sg_virt + host->sg_off);
0427
0428 count = host->sg_ptr->length - host->sg_off;
0429 if (count > data->blksz)
0430 count = data->blksz;
0431
0432 pr_debug("count: %08x offset: %08x flags %08x\n",
0433 count, host->sg_off, data->flags);
0434
0435
0436 tmio_mmc_transfer_data(host, buf, count);
0437
0438 host->sg_off += count;
0439
0440 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
0441
0442 if (host->sg_off == host->sg_ptr->length)
0443 tmio_mmc_next_sg(host);
0444 }
0445
0446 static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
0447 {
0448 if (host->sg_ptr == &host->bounce_sg) {
0449 unsigned long flags;
0450 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
0451
0452 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
0453 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
0454 }
0455 }
0456
0457
0458 void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
0459 {
0460 struct mmc_data *data = host->data;
0461 struct mmc_command *stop;
0462
0463 host->data = NULL;
0464
0465 if (!data) {
0466 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
0467 return;
0468 }
0469 stop = data->stop;
0470
0471
0472 if (!data->error)
0473 data->bytes_xfered = data->blocks * data->blksz;
0474 else
0475 data->bytes_xfered = 0;
0476
0477 pr_debug("Completed data request\n");
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 if (data->flags & MMC_DATA_READ) {
0489 if (host->dma_on)
0490 tmio_mmc_check_bounce_buffer(host);
0491 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
0492 host->mrq);
0493 } else {
0494 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
0495 host->mrq);
0496 }
0497
0498 if (stop && !host->mrq->sbc) {
0499 if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
0500 dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
0501 stop->opcode, stop->arg);
0502
0503
0504 stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);
0505
0506 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
0507 }
0508
0509 schedule_work(&host->done);
0510 }
0511 EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
0512
0513 static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
0514 {
0515 struct mmc_data *data;
0516
0517 spin_lock(&host->lock);
0518 data = host->data;
0519
0520 if (!data)
0521 goto out;
0522
0523 if (stat & TMIO_STAT_DATATIMEOUT)
0524 data->error = -ETIMEDOUT;
0525 else if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
0526 stat & TMIO_STAT_TXUNDERRUN)
0527 data->error = -EILSEQ;
0528 if (host->dma_on && (data->flags & MMC_DATA_WRITE)) {
0529 u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
0530 bool done = false;
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540 if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
0541 if (status & TMIO_STAT_SCLKDIVEN)
0542 done = true;
0543 } else {
0544 if (!(status & TMIO_STAT_CMD_BUSY))
0545 done = true;
0546 }
0547
0548 if (done) {
0549 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
0550 tmio_mmc_dataend_dma(host);
0551 }
0552 } else if (host->dma_on && (data->flags & MMC_DATA_READ)) {
0553 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
0554 tmio_mmc_dataend_dma(host);
0555 } else {
0556 tmio_mmc_do_data_irq(host);
0557 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
0558 }
0559 out:
0560 spin_unlock(&host->lock);
0561 }
0562
0563 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
0564 {
0565 struct mmc_command *cmd = host->cmd;
0566 int i, addr;
0567
0568 spin_lock(&host->lock);
0569
0570 if (!host->cmd) {
0571 pr_debug("Spurious CMD irq\n");
0572 goto out;
0573 }
0574
0575
0576
0577
0578
0579
0580 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
0581 cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
0582
0583 if (cmd->flags & MMC_RSP_136) {
0584 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
0585 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
0586 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
0587 cmd->resp[3] <<= 8;
0588 } else if (cmd->flags & MMC_RSP_R3) {
0589 cmd->resp[0] = cmd->resp[3];
0590 }
0591
0592 if (stat & TMIO_STAT_CMDTIMEOUT)
0593 cmd->error = -ETIMEDOUT;
0594 else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
0595 stat & TMIO_STAT_STOPBIT_ERR ||
0596 stat & TMIO_STAT_CMD_IDX_ERR)
0597 cmd->error = -EILSEQ;
0598
0599
0600
0601
0602
0603 if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
0604 if (host->data->flags & MMC_DATA_READ) {
0605 if (!host->dma_on) {
0606 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
0607 } else {
0608 tmio_mmc_disable_mmc_irqs(host,
0609 TMIO_MASK_READOP);
0610 tasklet_schedule(&host->dma_issue);
0611 }
0612 } else {
0613 if (!host->dma_on) {
0614 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
0615 } else {
0616 tmio_mmc_disable_mmc_irqs(host,
0617 TMIO_MASK_WRITEOP);
0618 tasklet_schedule(&host->dma_issue);
0619 }
0620 }
0621 } else {
0622 schedule_work(&host->done);
0623 }
0624
0625 out:
0626 spin_unlock(&host->lock);
0627 }
0628
0629 static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
0630 int ireg, int status)
0631 {
0632 struct mmc_host *mmc = host->mmc;
0633
0634
0635 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
0636 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
0637 TMIO_STAT_CARD_REMOVE);
0638 if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
0639 ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
0640 !work_pending(&mmc->detect.work))
0641 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
0642 return true;
0643 }
0644
0645 return false;
0646 }
0647
0648 static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
0649 int status)
0650 {
0651
0652 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
0653 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
0654 TMIO_STAT_CMDTIMEOUT);
0655 tmio_mmc_cmd_irq(host, status);
0656 return true;
0657 }
0658
0659
0660 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
0661 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
0662 tmio_mmc_pio_irq(host);
0663 return true;
0664 }
0665
0666
0667 if (ireg & TMIO_STAT_DATAEND) {
0668 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
0669 tmio_mmc_data_irq(host, status);
0670 return true;
0671 }
0672
0673 return false;
0674 }
0675
0676 static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
0677 {
0678 struct mmc_host *mmc = host->mmc;
0679 struct tmio_mmc_data *pdata = host->pdata;
0680 unsigned int ireg, status;
0681 unsigned int sdio_status;
0682
0683 if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
0684 return false;
0685
0686 status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
0687 ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
0688
0689 sdio_status = status & ~TMIO_SDIO_MASK_ALL;
0690 if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
0691 sdio_status |= TMIO_SDIO_SETBITS_MASK;
0692
0693 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
0694
0695 if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
0696 mmc_signal_sdio_irq(mmc);
0697
0698 return ireg;
0699 }
0700
0701 irqreturn_t tmio_mmc_irq(int irq, void *devid)
0702 {
0703 struct tmio_mmc_host *host = devid;
0704 unsigned int ireg, status;
0705
0706 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
0707 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
0708
0709
0710 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
0711
0712 if (__tmio_mmc_card_detect_irq(host, ireg, status))
0713 return IRQ_HANDLED;
0714 if (__tmio_mmc_sdcard_irq(host, ireg, status))
0715 return IRQ_HANDLED;
0716
0717 if (__tmio_mmc_sdio_irq(host))
0718 return IRQ_HANDLED;
0719
0720 return IRQ_NONE;
0721 }
0722 EXPORT_SYMBOL_GPL(tmio_mmc_irq);
0723
0724 static int tmio_mmc_start_data(struct tmio_mmc_host *host,
0725 struct mmc_data *data)
0726 {
0727 struct tmio_mmc_data *pdata = host->pdata;
0728
0729 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
0730 data->blksz, data->blocks);
0731
0732
0733 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
0734 host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
0735 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
0736
0737 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
0738 pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
0739 mmc_hostname(host->mmc), data->blksz);
0740 return -EINVAL;
0741 }
0742 }
0743
0744 tmio_mmc_init_sg(host, data);
0745 host->data = data;
0746 host->dma_on = false;
0747
0748
0749 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
0750 if (host->mmc->max_blk_count >= SZ_64K)
0751 sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
0752 else
0753 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
0754
0755 tmio_mmc_start_dma(host, data);
0756
0757 return 0;
0758 }
0759
0760 static void tmio_process_mrq(struct tmio_mmc_host *host,
0761 struct mmc_request *mrq)
0762 {
0763 struct mmc_command *cmd;
0764 int ret;
0765
0766 if (mrq->sbc && host->cmd != mrq->sbc) {
0767 cmd = mrq->sbc;
0768 } else {
0769 cmd = mrq->cmd;
0770 if (mrq->data) {
0771 ret = tmio_mmc_start_data(host, mrq->data);
0772 if (ret)
0773 goto fail;
0774 }
0775 }
0776
0777 ret = tmio_mmc_start_command(host, cmd);
0778 if (ret)
0779 goto fail;
0780
0781 schedule_delayed_work(&host->delayed_reset_work,
0782 msecs_to_jiffies(CMDREQ_TIMEOUT));
0783 return;
0784
0785 fail:
0786 host->mrq = NULL;
0787 mrq->cmd->error = ret;
0788 mmc_request_done(host->mmc, mrq);
0789 }
0790
0791
0792 static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
0793 {
0794 struct tmio_mmc_host *host = mmc_priv(mmc);
0795 unsigned long flags;
0796
0797 spin_lock_irqsave(&host->lock, flags);
0798
0799 if (host->mrq) {
0800 pr_debug("request not null\n");
0801 if (IS_ERR(host->mrq)) {
0802 spin_unlock_irqrestore(&host->lock, flags);
0803 mrq->cmd->error = -EAGAIN;
0804 mmc_request_done(mmc, mrq);
0805 return;
0806 }
0807 }
0808
0809 host->last_req_ts = jiffies;
0810 wmb();
0811 host->mrq = mrq;
0812
0813 spin_unlock_irqrestore(&host->lock, flags);
0814
0815 tmio_process_mrq(host, mrq);
0816 }
0817
0818 static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
0819 {
0820 struct mmc_request *mrq;
0821 unsigned long flags;
0822
0823 spin_lock_irqsave(&host->lock, flags);
0824
0825 tmio_mmc_end_dma(host);
0826
0827 mrq = host->mrq;
0828 if (IS_ERR_OR_NULL(mrq)) {
0829 spin_unlock_irqrestore(&host->lock, flags);
0830 return;
0831 }
0832
0833
0834 if (host->cmd != mrq->sbc) {
0835 host->cmd = NULL;
0836 host->data = NULL;
0837 host->mrq = NULL;
0838 }
0839
0840 cancel_delayed_work(&host->delayed_reset_work);
0841
0842 spin_unlock_irqrestore(&host->lock, flags);
0843
0844 if (mrq->cmd->error || (mrq->data && mrq->data->error)) {
0845 tmio_mmc_ack_mmc_irqs(host, TMIO_MASK_IRQ);
0846 tmio_mmc_abort_dma(host);
0847 }
0848
0849
0850 if (host->check_retune && host->check_retune(host, mrq))
0851 mmc_retune_needed(host->mmc);
0852
0853
0854 if (host->mrq && !mrq->cmd->error) {
0855 tmio_process_mrq(host, mrq);
0856 return;
0857 }
0858
0859 if (host->fixup_request)
0860 host->fixup_request(host, mrq);
0861
0862 mmc_request_done(host->mmc, mrq);
0863 }
0864
0865 static void tmio_mmc_done_work(struct work_struct *work)
0866 {
0867 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
0868 done);
0869 tmio_mmc_finish_request(host);
0870 }
0871
0872 static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
0873 {
0874 struct mmc_host *mmc = host->mmc;
0875 int ret = 0;
0876
0877
0878
0879 if (host->set_pwr)
0880 host->set_pwr(host->pdev, 1);
0881
0882 if (!IS_ERR(mmc->supply.vmmc)) {
0883 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
0884
0885
0886
0887
0888
0889
0890 usleep_range(200, 300);
0891 }
0892
0893
0894
0895
0896 if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
0897 ret = regulator_enable(mmc->supply.vqmmc);
0898 usleep_range(200, 300);
0899 }
0900
0901 if (ret < 0)
0902 dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
0903 ret);
0904 }
0905
0906 static void tmio_mmc_power_off(struct tmio_mmc_host *host)
0907 {
0908 struct mmc_host *mmc = host->mmc;
0909
0910 if (!IS_ERR(mmc->supply.vqmmc))
0911 regulator_disable(mmc->supply.vqmmc);
0912
0913 if (!IS_ERR(mmc->supply.vmmc))
0914 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
0915
0916 if (host->set_pwr)
0917 host->set_pwr(host->pdev, 0);
0918 }
0919
0920 static unsigned int tmio_mmc_get_timeout_cycles(struct tmio_mmc_host *host)
0921 {
0922 u16 val = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT);
0923
0924 val = (val & CARD_OPT_TOP_MASK) >> CARD_OPT_TOP_SHIFT;
0925 return 1 << (13 + val);
0926 }
0927
0928 static void tmio_mmc_max_busy_timeout(struct tmio_mmc_host *host)
0929 {
0930 unsigned int clk_rate = host->mmc->actual_clock ?: host->mmc->f_max;
0931
0932 host->mmc->max_busy_timeout = host->get_timeout_cycles(host) /
0933 (clk_rate / MSEC_PER_SEC);
0934 }
0935
0936
0937
0938
0939
0940
0941
0942 static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0943 {
0944 struct tmio_mmc_host *host = mmc_priv(mmc);
0945 struct device *dev = &host->pdev->dev;
0946 unsigned long flags;
0947
0948 mutex_lock(&host->ios_lock);
0949
0950 spin_lock_irqsave(&host->lock, flags);
0951 if (host->mrq) {
0952 if (IS_ERR(host->mrq)) {
0953 dev_dbg(dev,
0954 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
0955 current->comm, task_pid_nr(current),
0956 ios->clock, ios->power_mode);
0957 host->mrq = ERR_PTR(-EINTR);
0958 } else {
0959 dev_dbg(dev,
0960 "%s.%d: CMD%u active since %lu, now %lu!\n",
0961 current->comm, task_pid_nr(current),
0962 host->mrq->cmd->opcode, host->last_req_ts,
0963 jiffies);
0964 }
0965 spin_unlock_irqrestore(&host->lock, flags);
0966
0967 mutex_unlock(&host->ios_lock);
0968 return;
0969 }
0970
0971 host->mrq = ERR_PTR(-EBUSY);
0972
0973 spin_unlock_irqrestore(&host->lock, flags);
0974
0975 switch (ios->power_mode) {
0976 case MMC_POWER_OFF:
0977 tmio_mmc_power_off(host);
0978
0979 if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
0980 tmio_mmc_reset(host, false);
0981
0982 host->set_clock(host, 0);
0983 break;
0984 case MMC_POWER_UP:
0985 tmio_mmc_power_on(host, ios->vdd);
0986 host->set_clock(host, ios->clock);
0987 tmio_mmc_set_bus_width(host, ios->bus_width);
0988 break;
0989 case MMC_POWER_ON:
0990 host->set_clock(host, ios->clock);
0991 tmio_mmc_set_bus_width(host, ios->bus_width);
0992 break;
0993 }
0994
0995 if (host->pdata->flags & TMIO_MMC_USE_BUSY_TIMEOUT)
0996 tmio_mmc_max_busy_timeout(host);
0997
0998
0999 usleep_range(140, 200);
1000 if (PTR_ERR(host->mrq) == -EINTR)
1001 dev_dbg(&host->pdev->dev,
1002 "%s.%d: IOS interrupted: clk %u, mode %u",
1003 current->comm, task_pid_nr(current),
1004 ios->clock, ios->power_mode);
1005 host->mrq = NULL;
1006
1007 host->clk_cache = ios->clock;
1008
1009 mutex_unlock(&host->ios_lock);
1010 }
1011
1012 static int tmio_mmc_get_ro(struct mmc_host *mmc)
1013 {
1014 struct tmio_mmc_host *host = mmc_priv(mmc);
1015
1016 return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1017 TMIO_STAT_WRPROTECT);
1018 }
1019
1020 static int tmio_mmc_get_cd(struct mmc_host *mmc)
1021 {
1022 struct tmio_mmc_host *host = mmc_priv(mmc);
1023
1024 return !!(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) &
1025 TMIO_STAT_SIGSTATE);
1026 }
1027
1028 static int tmio_multi_io_quirk(struct mmc_card *card,
1029 unsigned int direction, int blk_size)
1030 {
1031 struct tmio_mmc_host *host = mmc_priv(card->host);
1032
1033 if (host->multi_io_quirk)
1034 return host->multi_io_quirk(card, direction, blk_size);
1035
1036 return blk_size;
1037 }
1038
1039 static struct mmc_host_ops tmio_mmc_ops = {
1040 .request = tmio_mmc_request,
1041 .set_ios = tmio_mmc_set_ios,
1042 .get_ro = tmio_mmc_get_ro,
1043 .get_cd = tmio_mmc_get_cd,
1044 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
1045 .multi_io_quirk = tmio_multi_io_quirk,
1046 };
1047
1048 static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
1049 {
1050 struct tmio_mmc_data *pdata = host->pdata;
1051 struct mmc_host *mmc = host->mmc;
1052 int err;
1053
1054 err = mmc_regulator_get_supply(mmc);
1055 if (err)
1056 return err;
1057
1058
1059 if (!mmc->ocr_avail)
1060 mmc->ocr_avail = pdata->ocr_mask;
1061
1062
1063
1064
1065
1066 if (!mmc->ocr_avail)
1067 return -EPROBE_DEFER;
1068
1069 return 0;
1070 }
1071
1072 static void tmio_mmc_of_parse(struct platform_device *pdev,
1073 struct mmc_host *mmc)
1074 {
1075 const struct device_node *np = pdev->dev.of_node;
1076
1077 if (!np)
1078 return;
1079
1080
1081
1082
1083
1084
1085 if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL))
1086 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1087 }
1088
1089 struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
1090 struct tmio_mmc_data *pdata)
1091 {
1092 struct tmio_mmc_host *host;
1093 struct mmc_host *mmc;
1094 void __iomem *ctl;
1095 int ret;
1096
1097 ctl = devm_platform_ioremap_resource(pdev, 0);
1098 if (IS_ERR(ctl))
1099 return ERR_CAST(ctl);
1100
1101 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
1102 if (!mmc)
1103 return ERR_PTR(-ENOMEM);
1104
1105 host = mmc_priv(mmc);
1106 host->ctl = ctl;
1107 host->mmc = mmc;
1108 host->pdev = pdev;
1109 host->pdata = pdata;
1110 host->ops = tmio_mmc_ops;
1111 mmc->ops = &host->ops;
1112
1113 ret = mmc_of_parse(host->mmc);
1114 if (ret) {
1115 host = ERR_PTR(ret);
1116 goto free;
1117 }
1118
1119 tmio_mmc_of_parse(pdev, mmc);
1120
1121 platform_set_drvdata(pdev, host);
1122
1123 return host;
1124 free:
1125 mmc_free_host(mmc);
1126
1127 return host;
1128 }
1129 EXPORT_SYMBOL_GPL(tmio_mmc_host_alloc);
1130
1131 void tmio_mmc_host_free(struct tmio_mmc_host *host)
1132 {
1133 mmc_free_host(host->mmc);
1134 }
1135 EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1136
1137 int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1138 {
1139 struct platform_device *pdev = _host->pdev;
1140 struct tmio_mmc_data *pdata = _host->pdata;
1141 struct mmc_host *mmc = _host->mmc;
1142 int ret;
1143
1144
1145
1146
1147
1148 if (mmc->f_min == 0)
1149 return -EINVAL;
1150
1151 if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
1152 _host->write16_hook = NULL;
1153
1154 if (pdata->flags & TMIO_MMC_USE_BUSY_TIMEOUT && !_host->get_timeout_cycles)
1155 _host->get_timeout_cycles = tmio_mmc_get_timeout_cycles;
1156
1157 _host->set_pwr = pdata->set_pwr;
1158
1159 ret = tmio_mmc_init_ocr(_host);
1160 if (ret < 0)
1161 return ret;
1162
1163
1164
1165
1166
1167 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
1168 if (ret == -EPROBE_DEFER)
1169 return ret;
1170
1171 mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
1172 mmc->caps2 |= pdata->capabilities2;
1173 mmc->max_segs = pdata->max_segs ? : 32;
1174 mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
1175 mmc->max_blk_count = pdata->max_blk_count ? :
1176 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1177 mmc->max_req_size = min_t(size_t,
1178 mmc->max_blk_size * mmc->max_blk_count,
1179 dma_max_mapping_size(&pdev->dev));
1180 mmc->max_seg_size = mmc->max_req_size;
1181
1182 if (mmc_can_gpio_ro(mmc))
1183 _host->ops.get_ro = mmc_gpio_get_ro;
1184
1185 if (mmc_can_gpio_cd(mmc))
1186 _host->ops.get_cd = mmc_gpio_get_cd;
1187
1188
1189 _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
1190 mmc->caps & MMC_CAP_NEEDS_POLL ||
1191 !mmc_card_is_removable(mmc));
1192
1193
1194
1195
1196
1197 if (_host->native_hotplug)
1198 pm_runtime_get_noresume(&pdev->dev);
1199
1200 _host->sdio_irq_enabled = false;
1201 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1202 _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
1203
1204 if (!_host->sdcard_irq_mask_all)
1205 _host->sdcard_irq_mask_all = TMIO_MASK_ALL;
1206
1207 _host->set_clock(_host, 0);
1208 tmio_mmc_reset(_host, false);
1209
1210 spin_lock_init(&_host->lock);
1211 mutex_init(&_host->ios_lock);
1212
1213
1214 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
1215 INIT_WORK(&_host->done, tmio_mmc_done_work);
1216
1217
1218 tmio_mmc_request_dma(_host, pdata);
1219
1220 pm_runtime_get_noresume(&pdev->dev);
1221 pm_runtime_set_active(&pdev->dev);
1222 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1223 pm_runtime_use_autosuspend(&pdev->dev);
1224 pm_runtime_enable(&pdev->dev);
1225
1226 ret = mmc_add_host(mmc);
1227 if (ret)
1228 goto remove_host;
1229
1230 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1231 pm_runtime_put(&pdev->dev);
1232
1233 return 0;
1234
1235 remove_host:
1236 pm_runtime_put_noidle(&pdev->dev);
1237 tmio_mmc_host_remove(_host);
1238 return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
1241
1242 void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1243 {
1244 struct platform_device *pdev = host->pdev;
1245 struct mmc_host *mmc = host->mmc;
1246
1247 pm_runtime_get_sync(&pdev->dev);
1248
1249 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1250 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1251
1252 dev_pm_qos_hide_latency_limit(&pdev->dev);
1253
1254 mmc_remove_host(mmc);
1255 cancel_work_sync(&host->done);
1256 cancel_delayed_work_sync(&host->delayed_reset_work);
1257 tmio_mmc_release_dma(host);
1258 tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all);
1259
1260 if (host->native_hotplug)
1261 pm_runtime_put_noidle(&pdev->dev);
1262
1263 pm_runtime_disable(&pdev->dev);
1264 pm_runtime_dont_use_autosuspend(&pdev->dev);
1265 pm_runtime_put_noidle(&pdev->dev);
1266 }
1267 EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1268
1269 #ifdef CONFIG_PM
1270 static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
1271 {
1272 if (!host->clk_enable)
1273 return -ENOTSUPP;
1274
1275 return host->clk_enable(host);
1276 }
1277
1278 static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
1279 {
1280 if (host->clk_disable)
1281 host->clk_disable(host);
1282 }
1283
1284 int tmio_mmc_host_runtime_suspend(struct device *dev)
1285 {
1286 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1287
1288 tmio_mmc_disable_mmc_irqs(host, host->sdcard_irq_mask_all);
1289
1290 if (host->clk_cache)
1291 host->set_clock(host, 0);
1292
1293 tmio_mmc_clk_disable(host);
1294
1295 return 0;
1296 }
1297 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_suspend);
1298
1299 int tmio_mmc_host_runtime_resume(struct device *dev)
1300 {
1301 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1302
1303 tmio_mmc_clk_enable(host);
1304 tmio_mmc_reset(host, false);
1305
1306 if (host->clk_cache)
1307 host->set_clock(host, host->clk_cache);
1308
1309 tmio_mmc_enable_dma(host, true);
1310
1311 return 0;
1312 }
1313 EXPORT_SYMBOL_GPL(tmio_mmc_host_runtime_resume);
1314 #endif
1315
1316 MODULE_LICENSE("GPL v2");