0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitops.h>
0010 #include <linux/clk.h>
0011 #include <linux/delay.h>
0012 #include <linux/dmaengine.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/err.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/irq.h>
0018 #include <linux/mmc/host.h>
0019 #include <linux/mmc/slot-gpio.h>
0020 #include <linux/module.h>
0021 #include <linux/of_device.h>
0022 #include <linux/pinctrl/consumer.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/scatterlist.h>
0025
0026 #include <asm/cacheflush.h>
0027
0028 #define JZ_REG_MMC_STRPCL 0x00
0029 #define JZ_REG_MMC_STATUS 0x04
0030 #define JZ_REG_MMC_CLKRT 0x08
0031 #define JZ_REG_MMC_CMDAT 0x0C
0032 #define JZ_REG_MMC_RESTO 0x10
0033 #define JZ_REG_MMC_RDTO 0x14
0034 #define JZ_REG_MMC_BLKLEN 0x18
0035 #define JZ_REG_MMC_NOB 0x1C
0036 #define JZ_REG_MMC_SNOB 0x20
0037 #define JZ_REG_MMC_IMASK 0x24
0038 #define JZ_REG_MMC_IREG 0x28
0039 #define JZ_REG_MMC_CMD 0x2C
0040 #define JZ_REG_MMC_ARG 0x30
0041 #define JZ_REG_MMC_RESP_FIFO 0x34
0042 #define JZ_REG_MMC_RXFIFO 0x38
0043 #define JZ_REG_MMC_TXFIFO 0x3C
0044 #define JZ_REG_MMC_LPM 0x40
0045 #define JZ_REG_MMC_DMAC 0x44
0046
0047 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7)
0048 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6)
0049 #define JZ_MMC_STRPCL_START_READWAIT BIT(5)
0050 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4)
0051 #define JZ_MMC_STRPCL_RESET BIT(3)
0052 #define JZ_MMC_STRPCL_START_OP BIT(2)
0053 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0))
0054 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0)
0055 #define JZ_MMC_STRPCL_CLOCK_START BIT(1)
0056
0057
0058 #define JZ_MMC_STATUS_IS_RESETTING BIT(15)
0059 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14)
0060 #define JZ_MMC_STATUS_PRG_DONE BIT(13)
0061 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12)
0062 #define JZ_MMC_STATUS_END_CMD_RES BIT(11)
0063 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10)
0064 #define JZ_MMC_STATUS_IS_READWAIT BIT(9)
0065 #define JZ_MMC_STATUS_CLK_EN BIT(8)
0066 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7)
0067 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6)
0068 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5)
0069 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4)
0070 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3)
0071 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2)
0072 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1)
0073 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0)
0074
0075 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0))
0076 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2))
0077
0078
0079 #define JZ_MMC_CMDAT_IO_ABORT BIT(11)
0080 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10)
0081 #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9))
0082 #define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9))
0083 #define JZ_MMC_CMDAT_DMA_EN BIT(8)
0084 #define JZ_MMC_CMDAT_INIT BIT(7)
0085 #define JZ_MMC_CMDAT_BUSY BIT(6)
0086 #define JZ_MMC_CMDAT_STREAM BIT(5)
0087 #define JZ_MMC_CMDAT_WRITE BIT(4)
0088 #define JZ_MMC_CMDAT_DATA_EN BIT(3)
0089 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0))
0090 #define JZ_MMC_CMDAT_RSP_R1 1
0091 #define JZ_MMC_CMDAT_RSP_R2 2
0092 #define JZ_MMC_CMDAT_RSP_R3 3
0093
0094 #define JZ_MMC_IRQ_SDIO BIT(7)
0095 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6)
0096 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5)
0097 #define JZ_MMC_IRQ_END_CMD_RES BIT(2)
0098 #define JZ_MMC_IRQ_PRG_DONE BIT(1)
0099 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0)
0100
0101 #define JZ_MMC_DMAC_DMA_SEL BIT(1)
0102 #define JZ_MMC_DMAC_DMA_EN BIT(0)
0103
0104 #define JZ_MMC_LPM_DRV_RISING BIT(31)
0105 #define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31)
0106 #define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30)
0107 #define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29)
0108 #define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0)
0109
0110 #define JZ_MMC_CLK_RATE 24000000
0111 #define JZ_MMC_REQ_TIMEOUT_MS 5000
0112
0113 enum jz4740_mmc_version {
0114 JZ_MMC_JZ4740,
0115 JZ_MMC_JZ4725B,
0116 JZ_MMC_JZ4760,
0117 JZ_MMC_JZ4780,
0118 JZ_MMC_X1000,
0119 };
0120
0121 enum jz4740_mmc_state {
0122 JZ4740_MMC_STATE_READ_RESPONSE,
0123 JZ4740_MMC_STATE_TRANSFER_DATA,
0124 JZ4740_MMC_STATE_SEND_STOP,
0125 JZ4740_MMC_STATE_DONE,
0126 };
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 enum jz4780_cookie {
0142 COOKIE_UNMAPPED = 0,
0143 COOKIE_PREMAPPED,
0144 COOKIE_MAPPED,
0145 };
0146
0147 struct jz4740_mmc_host {
0148 struct mmc_host *mmc;
0149 struct platform_device *pdev;
0150 struct clk *clk;
0151
0152 enum jz4740_mmc_version version;
0153
0154 int irq;
0155
0156 void __iomem *base;
0157 struct resource *mem_res;
0158 struct mmc_request *req;
0159 struct mmc_command *cmd;
0160
0161 unsigned long waiting;
0162
0163 uint32_t cmdat;
0164
0165 uint32_t irq_mask;
0166
0167 spinlock_t lock;
0168
0169 struct timer_list timeout_timer;
0170 struct sg_mapping_iter miter;
0171 enum jz4740_mmc_state state;
0172
0173
0174 struct dma_chan *dma_rx;
0175 struct dma_chan *dma_tx;
0176 bool use_dma;
0177
0178
0179
0180
0181
0182 #define JZ4740_MMC_FIFO_HALF_SIZE 8
0183 };
0184
0185 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host,
0186 uint32_t val)
0187 {
0188 if (host->version >= JZ_MMC_JZ4725B)
0189 return writel(val, host->base + JZ_REG_MMC_IMASK);
0190 else
0191 return writew(val, host->base + JZ_REG_MMC_IMASK);
0192 }
0193
0194 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host,
0195 uint32_t val)
0196 {
0197 if (host->version >= JZ_MMC_JZ4780)
0198 writel(val, host->base + JZ_REG_MMC_IREG);
0199 else
0200 writew(val, host->base + JZ_REG_MMC_IREG);
0201 }
0202
0203 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host)
0204 {
0205 if (host->version >= JZ_MMC_JZ4780)
0206 return readl(host->base + JZ_REG_MMC_IREG);
0207 else
0208 return readw(host->base + JZ_REG_MMC_IREG);
0209 }
0210
0211
0212
0213
0214 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host)
0215 {
0216 if (!host->use_dma)
0217 return;
0218
0219 dma_release_channel(host->dma_tx);
0220 if (host->dma_rx)
0221 dma_release_channel(host->dma_rx);
0222 }
0223
0224 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host)
0225 {
0226 struct device *dev = mmc_dev(host->mmc);
0227
0228 host->dma_tx = dma_request_chan(dev, "tx-rx");
0229 if (!IS_ERR(host->dma_tx))
0230 return 0;
0231
0232 if (PTR_ERR(host->dma_tx) != -ENODEV) {
0233 dev_err(dev, "Failed to get dma tx-rx channel\n");
0234 return PTR_ERR(host->dma_tx);
0235 }
0236
0237 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
0238 if (IS_ERR(host->dma_tx)) {
0239 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n");
0240 return PTR_ERR(host->dma_tx);
0241 }
0242
0243 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
0244 if (IS_ERR(host->dma_rx)) {
0245 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n");
0246 dma_release_channel(host->dma_tx);
0247 return PTR_ERR(host->dma_rx);
0248 }
0249
0250
0251
0252
0253
0254 if (host->dma_tx) {
0255 struct device *dev = host->dma_tx->device->dev;
0256 unsigned int max_seg_size = dma_get_max_seg_size(dev);
0257
0258 if (max_seg_size < host->mmc->max_seg_size)
0259 host->mmc->max_seg_size = max_seg_size;
0260 }
0261
0262 if (host->dma_rx) {
0263 struct device *dev = host->dma_rx->device->dev;
0264 unsigned int max_seg_size = dma_get_max_seg_size(dev);
0265
0266 if (max_seg_size < host->mmc->max_seg_size)
0267 host->mmc->max_seg_size = max_seg_size;
0268 }
0269
0270 return 0;
0271 }
0272
0273 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
0274 struct mmc_data *data)
0275 {
0276 if ((data->flags & MMC_DATA_READ) && host->dma_rx)
0277 return host->dma_rx;
0278 else
0279 return host->dma_tx;
0280 }
0281
0282 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host,
0283 struct mmc_data *data)
0284 {
0285 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
0286 enum dma_data_direction dir = mmc_get_dma_dir(data);
0287
0288 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
0289 data->host_cookie = COOKIE_UNMAPPED;
0290 }
0291
0292
0293
0294
0295 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host,
0296 struct mmc_data *data,
0297 int cookie)
0298 {
0299 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
0300 enum dma_data_direction dir = mmc_get_dma_dir(data);
0301 int sg_count;
0302
0303 if (data->host_cookie == COOKIE_PREMAPPED)
0304 return data->sg_count;
0305
0306 sg_count = dma_map_sg(chan->device->dev,
0307 data->sg,
0308 data->sg_len,
0309 dir);
0310
0311 if (sg_count <= 0) {
0312 dev_err(mmc_dev(host->mmc),
0313 "Failed to map scatterlist for DMA operation\n");
0314 return -EINVAL;
0315 }
0316
0317 data->sg_count = sg_count;
0318 data->host_cookie = cookie;
0319
0320 return data->sg_count;
0321 }
0322
0323 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host,
0324 struct mmc_data *data)
0325 {
0326 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
0327 struct dma_async_tx_descriptor *desc;
0328 struct dma_slave_config conf = {
0329 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
0330 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
0331 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
0332 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE,
0333 };
0334 int sg_count;
0335
0336 if (data->flags & MMC_DATA_WRITE) {
0337 conf.direction = DMA_MEM_TO_DEV;
0338 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO;
0339 } else {
0340 conf.direction = DMA_DEV_TO_MEM;
0341 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO;
0342 }
0343
0344 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED);
0345 if (sg_count < 0)
0346 return sg_count;
0347
0348 dmaengine_slave_config(chan, &conf);
0349 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count,
0350 conf.direction,
0351 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0352 if (!desc) {
0353 dev_err(mmc_dev(host->mmc),
0354 "Failed to allocate DMA %s descriptor",
0355 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX");
0356 goto dma_unmap;
0357 }
0358
0359 dmaengine_submit(desc);
0360 dma_async_issue_pending(chan);
0361
0362 return 0;
0363
0364 dma_unmap:
0365 if (data->host_cookie == COOKIE_MAPPED)
0366 jz4740_mmc_dma_unmap(host, data);
0367 return -ENOMEM;
0368 }
0369
0370 static void jz4740_mmc_pre_request(struct mmc_host *mmc,
0371 struct mmc_request *mrq)
0372 {
0373 struct jz4740_mmc_host *host = mmc_priv(mmc);
0374 struct mmc_data *data = mrq->data;
0375
0376 if (!host->use_dma)
0377 return;
0378
0379 data->host_cookie = COOKIE_UNMAPPED;
0380 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0)
0381 data->host_cookie = COOKIE_UNMAPPED;
0382 }
0383
0384 static void jz4740_mmc_post_request(struct mmc_host *mmc,
0385 struct mmc_request *mrq,
0386 int err)
0387 {
0388 struct jz4740_mmc_host *host = mmc_priv(mmc);
0389 struct mmc_data *data = mrq->data;
0390
0391 if (data && data->host_cookie != COOKIE_UNMAPPED)
0392 jz4740_mmc_dma_unmap(host, data);
0393
0394 if (err) {
0395 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
0396
0397 dmaengine_terminate_all(chan);
0398 }
0399 }
0400
0401
0402
0403 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
0404 unsigned int irq, bool enabled)
0405 {
0406 unsigned long flags;
0407
0408 spin_lock_irqsave(&host->lock, flags);
0409 if (enabled)
0410 host->irq_mask &= ~irq;
0411 else
0412 host->irq_mask |= irq;
0413
0414 jz4740_mmc_write_irq_mask(host, host->irq_mask);
0415 spin_unlock_irqrestore(&host->lock, flags);
0416 }
0417
0418 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
0419 bool start_transfer)
0420 {
0421 uint16_t val = JZ_MMC_STRPCL_CLOCK_START;
0422
0423 if (start_transfer)
0424 val |= JZ_MMC_STRPCL_START_OP;
0425
0426 writew(val, host->base + JZ_REG_MMC_STRPCL);
0427 }
0428
0429 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host)
0430 {
0431 uint32_t status;
0432 unsigned int timeout = 1000;
0433
0434 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL);
0435 do {
0436 status = readl(host->base + JZ_REG_MMC_STATUS);
0437 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout);
0438 }
0439
0440 static void jz4740_mmc_reset(struct jz4740_mmc_host *host)
0441 {
0442 uint32_t status;
0443 unsigned int timeout = 1000;
0444
0445 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL);
0446 udelay(10);
0447 do {
0448 status = readl(host->base + JZ_REG_MMC_STATUS);
0449 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout);
0450 }
0451
0452 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host)
0453 {
0454 struct mmc_request *req;
0455 struct mmc_data *data;
0456
0457 req = host->req;
0458 data = req->data;
0459 host->req = NULL;
0460
0461 if (data && data->host_cookie == COOKIE_MAPPED)
0462 jz4740_mmc_dma_unmap(host, data);
0463 mmc_request_done(host->mmc, req);
0464 }
0465
0466 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host,
0467 unsigned int irq)
0468 {
0469 unsigned int timeout = 0x800;
0470 uint32_t status;
0471
0472 do {
0473 status = jz4740_mmc_read_irq_reg(host);
0474 } while (!(status & irq) && --timeout);
0475
0476 if (timeout == 0) {
0477 set_bit(0, &host->waiting);
0478 mod_timer(&host->timeout_timer,
0479 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
0480 jz4740_mmc_set_irq_enabled(host, irq, true);
0481 return true;
0482 }
0483
0484 return false;
0485 }
0486
0487 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,
0488 struct mmc_data *data)
0489 {
0490 int status;
0491
0492 status = readl(host->base + JZ_REG_MMC_STATUS);
0493 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) {
0494 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) {
0495 host->req->cmd->error = -ETIMEDOUT;
0496 data->error = -ETIMEDOUT;
0497 } else {
0498 host->req->cmd->error = -EIO;
0499 data->error = -EIO;
0500 }
0501 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) {
0502 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) {
0503 host->req->cmd->error = -ETIMEDOUT;
0504 data->error = -ETIMEDOUT;
0505 } else {
0506 host->req->cmd->error = -EIO;
0507 data->error = -EIO;
0508 }
0509 }
0510 }
0511
0512 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host,
0513 struct mmc_data *data)
0514 {
0515 struct sg_mapping_iter *miter = &host->miter;
0516 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO;
0517 uint32_t *buf;
0518 bool timeout;
0519 size_t i, j;
0520
0521 while (sg_miter_next(miter)) {
0522 buf = miter->addr;
0523 i = miter->length / 4;
0524 j = i / 8;
0525 i = i & 0x7;
0526 while (j) {
0527 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
0528 if (unlikely(timeout))
0529 goto poll_timeout;
0530
0531 writel(buf[0], fifo_addr);
0532 writel(buf[1], fifo_addr);
0533 writel(buf[2], fifo_addr);
0534 writel(buf[3], fifo_addr);
0535 writel(buf[4], fifo_addr);
0536 writel(buf[5], fifo_addr);
0537 writel(buf[6], fifo_addr);
0538 writel(buf[7], fifo_addr);
0539 buf += 8;
0540 --j;
0541 }
0542 if (unlikely(i)) {
0543 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ);
0544 if (unlikely(timeout))
0545 goto poll_timeout;
0546
0547 while (i) {
0548 writel(*buf, fifo_addr);
0549 ++buf;
0550 --i;
0551 }
0552 }
0553 data->bytes_xfered += miter->length;
0554 }
0555 sg_miter_stop(miter);
0556
0557 return false;
0558
0559 poll_timeout:
0560 miter->consumed = (void *)buf - miter->addr;
0561 data->bytes_xfered += miter->consumed;
0562 sg_miter_stop(miter);
0563
0564 return true;
0565 }
0566
0567 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host,
0568 struct mmc_data *data)
0569 {
0570 struct sg_mapping_iter *miter = &host->miter;
0571 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO;
0572 uint32_t *buf;
0573 uint32_t d;
0574 uint32_t status;
0575 size_t i, j;
0576 unsigned int timeout;
0577
0578 while (sg_miter_next(miter)) {
0579 buf = miter->addr;
0580 i = miter->length;
0581 j = i / 32;
0582 i = i & 0x1f;
0583 while (j) {
0584 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
0585 if (unlikely(timeout))
0586 goto poll_timeout;
0587
0588 buf[0] = readl(fifo_addr);
0589 buf[1] = readl(fifo_addr);
0590 buf[2] = readl(fifo_addr);
0591 buf[3] = readl(fifo_addr);
0592 buf[4] = readl(fifo_addr);
0593 buf[5] = readl(fifo_addr);
0594 buf[6] = readl(fifo_addr);
0595 buf[7] = readl(fifo_addr);
0596
0597 buf += 8;
0598 --j;
0599 }
0600
0601 if (unlikely(i)) {
0602 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ);
0603 if (unlikely(timeout))
0604 goto poll_timeout;
0605
0606 while (i >= 4) {
0607 *buf++ = readl(fifo_addr);
0608 i -= 4;
0609 }
0610 if (unlikely(i > 0)) {
0611 d = readl(fifo_addr);
0612 memcpy(buf, &d, i);
0613 }
0614 }
0615 data->bytes_xfered += miter->length;
0616 }
0617 sg_miter_stop(miter);
0618
0619
0620
0621 timeout = 1000;
0622 status = readl(host->base + JZ_REG_MMC_STATUS);
0623 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) {
0624 d = readl(fifo_addr);
0625 status = readl(host->base + JZ_REG_MMC_STATUS);
0626 }
0627
0628 return false;
0629
0630 poll_timeout:
0631 miter->consumed = (void *)buf - miter->addr;
0632 data->bytes_xfered += miter->consumed;
0633 sg_miter_stop(miter);
0634
0635 return true;
0636 }
0637
0638 static void jz4740_mmc_timeout(struct timer_list *t)
0639 {
0640 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
0641
0642 if (!test_and_clear_bit(0, &host->waiting))
0643 return;
0644
0645 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false);
0646
0647 host->req->cmd->error = -ETIMEDOUT;
0648 jz4740_mmc_request_done(host);
0649 }
0650
0651 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host,
0652 struct mmc_command *cmd)
0653 {
0654 int i;
0655 uint16_t tmp;
0656 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO;
0657
0658 if (cmd->flags & MMC_RSP_136) {
0659 tmp = readw(fifo_addr);
0660 for (i = 0; i < 4; ++i) {
0661 cmd->resp[i] = tmp << 24;
0662 tmp = readw(fifo_addr);
0663 cmd->resp[i] |= tmp << 8;
0664 tmp = readw(fifo_addr);
0665 cmd->resp[i] |= tmp >> 8;
0666 }
0667 } else {
0668 cmd->resp[0] = readw(fifo_addr) << 24;
0669 cmd->resp[0] |= readw(fifo_addr) << 8;
0670 cmd->resp[0] |= readw(fifo_addr) & 0xff;
0671 }
0672 }
0673
0674 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
0675 struct mmc_command *cmd)
0676 {
0677 uint32_t cmdat = host->cmdat;
0678
0679 host->cmdat &= ~JZ_MMC_CMDAT_INIT;
0680 jz4740_mmc_clock_disable(host);
0681
0682 host->cmd = cmd;
0683
0684 if (cmd->flags & MMC_RSP_BUSY)
0685 cmdat |= JZ_MMC_CMDAT_BUSY;
0686
0687 switch (mmc_resp_type(cmd)) {
0688 case MMC_RSP_R1B:
0689 case MMC_RSP_R1:
0690 cmdat |= JZ_MMC_CMDAT_RSP_R1;
0691 break;
0692 case MMC_RSP_R2:
0693 cmdat |= JZ_MMC_CMDAT_RSP_R2;
0694 break;
0695 case MMC_RSP_R3:
0696 cmdat |= JZ_MMC_CMDAT_RSP_R3;
0697 break;
0698 default:
0699 break;
0700 }
0701
0702 if (cmd->data) {
0703 cmdat |= JZ_MMC_CMDAT_DATA_EN;
0704 if (cmd->data->flags & MMC_DATA_WRITE)
0705 cmdat |= JZ_MMC_CMDAT_WRITE;
0706 if (host->use_dma) {
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716 if (host->version >= JZ_MMC_JZ4780) {
0717 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL,
0718 host->base + JZ_REG_MMC_DMAC);
0719 } else {
0720 cmdat |= JZ_MMC_CMDAT_DMA_EN;
0721 }
0722 } else if (host->version >= JZ_MMC_JZ4780) {
0723 writel(0, host->base + JZ_REG_MMC_DMAC);
0724 }
0725
0726 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN);
0727 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB);
0728 }
0729
0730 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD);
0731 writel(cmd->arg, host->base + JZ_REG_MMC_ARG);
0732 writel(cmdat, host->base + JZ_REG_MMC_CMDAT);
0733
0734 jz4740_mmc_clock_enable(host, 1);
0735 }
0736
0737 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host)
0738 {
0739 struct mmc_command *cmd = host->req->cmd;
0740 struct mmc_data *data = cmd->data;
0741 int direction;
0742
0743 if (data->flags & MMC_DATA_READ)
0744 direction = SG_MITER_TO_SG;
0745 else
0746 direction = SG_MITER_FROM_SG;
0747
0748 sg_miter_start(&host->miter, data->sg, data->sg_len, direction);
0749 }
0750
0751
0752 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)
0753 {
0754 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid;
0755 struct mmc_command *cmd = host->req->cmd;
0756 struct mmc_request *req = host->req;
0757 struct mmc_data *data = cmd->data;
0758 bool timeout = false;
0759
0760 if (cmd->error)
0761 host->state = JZ4740_MMC_STATE_DONE;
0762
0763 switch (host->state) {
0764 case JZ4740_MMC_STATE_READ_RESPONSE:
0765 if (cmd->flags & MMC_RSP_PRESENT)
0766 jz4740_mmc_read_response(host, cmd);
0767
0768 if (!data)
0769 break;
0770
0771 jz_mmc_prepare_data_transfer(host);
0772 fallthrough;
0773
0774 case JZ4740_MMC_STATE_TRANSFER_DATA:
0775 if (host->use_dma) {
0776
0777
0778
0779
0780
0781
0782 timeout = jz4740_mmc_start_dma_transfer(host, data);
0783 data->bytes_xfered = data->blocks * data->blksz;
0784 } else if (data->flags & MMC_DATA_READ)
0785
0786
0787
0788
0789
0790 timeout = jz4740_mmc_read_data(host, data);
0791 else
0792 timeout = jz4740_mmc_write_data(host, data);
0793
0794 if (unlikely(timeout)) {
0795 host->state = JZ4740_MMC_STATE_TRANSFER_DATA;
0796 break;
0797 }
0798
0799 jz4740_mmc_transfer_check_state(host, data);
0800
0801 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
0802 if (unlikely(timeout)) {
0803 host->state = JZ4740_MMC_STATE_SEND_STOP;
0804 break;
0805 }
0806 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE);
0807 fallthrough;
0808
0809 case JZ4740_MMC_STATE_SEND_STOP:
0810 if (!req->stop)
0811 break;
0812
0813 jz4740_mmc_send_command(host, req->stop);
0814
0815 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) {
0816 timeout = jz4740_mmc_poll_irq(host,
0817 JZ_MMC_IRQ_PRG_DONE);
0818 if (timeout) {
0819 host->state = JZ4740_MMC_STATE_DONE;
0820 break;
0821 }
0822 }
0823 fallthrough;
0824
0825 case JZ4740_MMC_STATE_DONE:
0826 break;
0827 }
0828
0829 if (!timeout)
0830 jz4740_mmc_request_done(host);
0831
0832 return IRQ_HANDLED;
0833 }
0834
0835 static irqreturn_t jz_mmc_irq(int irq, void *devid)
0836 {
0837 struct jz4740_mmc_host *host = devid;
0838 struct mmc_command *cmd = host->cmd;
0839 uint32_t irq_reg, status, tmp;
0840
0841 status = readl(host->base + JZ_REG_MMC_STATUS);
0842 irq_reg = jz4740_mmc_read_irq_reg(host);
0843
0844 tmp = irq_reg;
0845 irq_reg &= ~host->irq_mask;
0846
0847 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ |
0848 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE);
0849
0850 if (tmp != irq_reg)
0851 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg);
0852
0853 if (irq_reg & JZ_MMC_IRQ_SDIO) {
0854 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO);
0855 mmc_signal_sdio_irq(host->mmc);
0856 irq_reg &= ~JZ_MMC_IRQ_SDIO;
0857 }
0858
0859 if (host->req && cmd && irq_reg) {
0860 if (test_and_clear_bit(0, &host->waiting)) {
0861 del_timer(&host->timeout_timer);
0862
0863 if (status & JZ_MMC_STATUS_TIMEOUT_RES) {
0864 cmd->error = -ETIMEDOUT;
0865 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) {
0866 cmd->error = -EIO;
0867 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR |
0868 JZ_MMC_STATUS_CRC_WRITE_ERROR)) {
0869 if (cmd->data)
0870 cmd->data->error = -EIO;
0871 cmd->error = -EIO;
0872 }
0873
0874 jz4740_mmc_set_irq_enabled(host, irq_reg, false);
0875 jz4740_mmc_write_irq_reg(host, irq_reg);
0876
0877 return IRQ_WAKE_THREAD;
0878 }
0879 }
0880
0881 return IRQ_HANDLED;
0882 }
0883
0884 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate)
0885 {
0886 int div = 0;
0887 int real_rate;
0888
0889 jz4740_mmc_clock_disable(host);
0890 clk_set_rate(host->clk, host->mmc->f_max);
0891
0892 real_rate = clk_get_rate(host->clk);
0893
0894 while (real_rate > rate && div < 7) {
0895 ++div;
0896 real_rate >>= 1;
0897 }
0898
0899 writew(div, host->base + JZ_REG_MMC_CLKRT);
0900
0901 if (real_rate > 25000000) {
0902 if (host->version >= JZ_MMC_JZ4780) {
0903 writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY |
0904 JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY |
0905 JZ_MMC_LPM_LOW_POWER_MODE_EN,
0906 host->base + JZ_REG_MMC_LPM);
0907 } else if (host->version >= JZ_MMC_JZ4760) {
0908 writel(JZ_MMC_LPM_DRV_RISING |
0909 JZ_MMC_LPM_LOW_POWER_MODE_EN,
0910 host->base + JZ_REG_MMC_LPM);
0911 } else if (host->version >= JZ_MMC_JZ4725B)
0912 writel(JZ_MMC_LPM_LOW_POWER_MODE_EN,
0913 host->base + JZ_REG_MMC_LPM);
0914 }
0915
0916 return real_rate;
0917 }
0918
0919 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req)
0920 {
0921 struct jz4740_mmc_host *host = mmc_priv(mmc);
0922
0923 host->req = req;
0924
0925 jz4740_mmc_write_irq_reg(host, ~0);
0926 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true);
0927
0928 host->state = JZ4740_MMC_STATE_READ_RESPONSE;
0929 set_bit(0, &host->waiting);
0930 mod_timer(&host->timeout_timer,
0931 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS));
0932 jz4740_mmc_send_command(host, req->cmd);
0933 }
0934
0935 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0936 {
0937 struct jz4740_mmc_host *host = mmc_priv(mmc);
0938 if (ios->clock)
0939 jz4740_mmc_set_clock_rate(host, ios->clock);
0940
0941 switch (ios->power_mode) {
0942 case MMC_POWER_UP:
0943 jz4740_mmc_reset(host);
0944 if (!IS_ERR(mmc->supply.vmmc))
0945 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
0946 host->cmdat |= JZ_MMC_CMDAT_INIT;
0947 clk_prepare_enable(host->clk);
0948 break;
0949 case MMC_POWER_ON:
0950 break;
0951 default:
0952 if (!IS_ERR(mmc->supply.vmmc))
0953 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
0954 clk_disable_unprepare(host->clk);
0955 break;
0956 }
0957
0958 switch (ios->bus_width) {
0959 case MMC_BUS_WIDTH_1:
0960 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
0961 break;
0962 case MMC_BUS_WIDTH_4:
0963 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
0964 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT;
0965 break;
0966 case MMC_BUS_WIDTH_8:
0967 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK;
0968 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT;
0969 break;
0970 default:
0971 break;
0972 }
0973 }
0974
0975 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
0976 {
0977 struct jz4740_mmc_host *host = mmc_priv(mmc);
0978 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable);
0979 }
0980
0981 static const struct mmc_host_ops jz4740_mmc_ops = {
0982 .request = jz4740_mmc_request,
0983 .pre_req = jz4740_mmc_pre_request,
0984 .post_req = jz4740_mmc_post_request,
0985 .set_ios = jz4740_mmc_set_ios,
0986 .get_ro = mmc_gpio_get_ro,
0987 .get_cd = mmc_gpio_get_cd,
0988 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
0989 };
0990
0991 static const struct of_device_id jz4740_mmc_of_match[] = {
0992 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 },
0993 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B },
0994 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 },
0995 { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 },
0996 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 },
0997 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 },
0998 {},
0999 };
1000 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match);
1001
1002 static int jz4740_mmc_probe(struct platform_device* pdev)
1003 {
1004 int ret;
1005 struct mmc_host *mmc;
1006 struct jz4740_mmc_host *host;
1007 const struct of_device_id *match;
1008
1009 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev);
1010 if (!mmc) {
1011 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n");
1012 return -ENOMEM;
1013 }
1014
1015 host = mmc_priv(mmc);
1016
1017 match = of_match_device(jz4740_mmc_of_match, &pdev->dev);
1018 if (match) {
1019 host->version = (enum jz4740_mmc_version)match->data;
1020 } else {
1021
1022 host->version = JZ_MMC_JZ4740;
1023 }
1024
1025 ret = mmc_of_parse(mmc);
1026 if (ret) {
1027 dev_err_probe(&pdev->dev, ret, "could not parse device properties\n");
1028 goto err_free_host;
1029 }
1030
1031 mmc_regulator_get_supply(mmc);
1032
1033 host->irq = platform_get_irq(pdev, 0);
1034 if (host->irq < 0) {
1035 ret = host->irq;
1036 goto err_free_host;
1037 }
1038
1039 host->clk = devm_clk_get(&pdev->dev, "mmc");
1040 if (IS_ERR(host->clk)) {
1041 ret = PTR_ERR(host->clk);
1042 dev_err(&pdev->dev, "Failed to get mmc clock\n");
1043 goto err_free_host;
1044 }
1045
1046 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res);
1048 if (IS_ERR(host->base)) {
1049 ret = PTR_ERR(host->base);
1050 goto err_free_host;
1051 }
1052
1053 mmc->ops = &jz4740_mmc_ops;
1054 if (!mmc->f_max)
1055 mmc->f_max = JZ_MMC_CLK_RATE;
1056 mmc->f_min = mmc->f_max / 128;
1057 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1058
1059
1060
1061
1062
1063 mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS;
1064
1065 mmc->max_blk_size = (1 << 10) - 1;
1066 mmc->max_blk_count = (1 << 15) - 1;
1067 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1068
1069 mmc->max_segs = 128;
1070 mmc->max_seg_size = mmc->max_req_size;
1071
1072 host->mmc = mmc;
1073 host->pdev = pdev;
1074 spin_lock_init(&host->lock);
1075 host->irq_mask = ~0;
1076
1077 jz4740_mmc_reset(host);
1078
1079 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,
1080 dev_name(&pdev->dev), host);
1081 if (ret) {
1082 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
1083 goto err_free_host;
1084 }
1085
1086 jz4740_mmc_clock_disable(host);
1087 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
1088
1089 ret = jz4740_mmc_acquire_dma_channels(host);
1090 if (ret == -EPROBE_DEFER)
1091 goto err_free_irq;
1092 host->use_dma = !ret;
1093
1094 platform_set_drvdata(pdev, host);
1095 ret = mmc_add_host(mmc);
1096
1097 if (ret) {
1098 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret);
1099 goto err_release_dma;
1100 }
1101 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n");
1102
1103 dev_info(&pdev->dev, "Using %s, %d-bit mode\n",
1104 host->use_dma ? "DMA" : "PIO",
1105 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 :
1106 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1));
1107
1108 return 0;
1109
1110 err_release_dma:
1111 if (host->use_dma)
1112 jz4740_mmc_release_dma_channels(host);
1113 err_free_irq:
1114 free_irq(host->irq, host);
1115 err_free_host:
1116 mmc_free_host(mmc);
1117
1118 return ret;
1119 }
1120
1121 static int jz4740_mmc_remove(struct platform_device *pdev)
1122 {
1123 struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
1124
1125 del_timer_sync(&host->timeout_timer);
1126 jz4740_mmc_set_irq_enabled(host, 0xff, false);
1127 jz4740_mmc_reset(host);
1128
1129 mmc_remove_host(host->mmc);
1130
1131 free_irq(host->irq, host);
1132
1133 if (host->use_dma)
1134 jz4740_mmc_release_dma_channels(host);
1135
1136 mmc_free_host(host->mmc);
1137
1138 return 0;
1139 }
1140
1141 static int jz4740_mmc_suspend(struct device *dev)
1142 {
1143 return pinctrl_pm_select_sleep_state(dev);
1144 }
1145
1146 static int jz4740_mmc_resume(struct device *dev)
1147 {
1148 return pinctrl_select_default_state(dev);
1149 }
1150
1151 static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
1152 jz4740_mmc_resume);
1153
1154 static struct platform_driver jz4740_mmc_driver = {
1155 .probe = jz4740_mmc_probe,
1156 .remove = jz4740_mmc_remove,
1157 .driver = {
1158 .name = "jz4740-mmc",
1159 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1160 .of_match_table = of_match_ptr(jz4740_mmc_of_match),
1161 .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops),
1162 },
1163 };
1164
1165 module_platform_driver(jz4740_mmc_driver);
1166
1167 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
1168 MODULE_LICENSE("GPL");
1169 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");