0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include <linux/clk.h>
0034 #include <linux/module.h>
0035 #include <linux/init.h>
0036 #include <linux/platform_device.h>
0037 #include <linux/mm.h>
0038 #include <linux/interrupt.h>
0039 #include <linux/dma-mapping.h>
0040 #include <linux/scatterlist.h>
0041 #include <linux/highmem.h>
0042 #include <linux/leds.h>
0043 #include <linux/mmc/host.h>
0044 #include <linux/slab.h>
0045
0046 #include <asm/io.h>
0047 #include <asm/mach-au1x00/au1000.h>
0048 #include <asm/mach-au1x00/au1xxx_dbdma.h>
0049 #include <asm/mach-au1x00/au1100_mmc.h>
0050
0051 #define DRIVER_NAME "au1xxx-mmc"
0052
0053
0054
0055
0056 #ifdef DEBUG
0057 #define DBG(fmt, idx, args...) \
0058 pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)
0059 #else
0060 #define DBG(fmt, idx, args...) do {} while (0)
0061 #endif
0062
0063
0064 #define AU1XMMC_DESCRIPTOR_COUNT 1
0065
0066
0067 #define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff
0068 #define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff
0069
0070 #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \
0071 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \
0072 MMC_VDD_33_34 | MMC_VDD_34_35 | MMC_VDD_35_36)
0073
0074
0075
0076
0077 #define STOP_CMD \
0078 (SD_CMD_RT_1B | SD_CMD_CT_7 | (0xC << SD_CMD_CI_SHIFT) | SD_CMD_GO)
0079
0080
0081 #define AU1XMMC_INTERRUPTS \
0082 (SD_CONFIG_SC | SD_CONFIG_DT | SD_CONFIG_RAT | \
0083 SD_CONFIG_CR | SD_CONFIG_I)
0084
0085
0086 #define AU1XMMC_DETECT_TIMEOUT (HZ/2)
0087
0088 struct au1xmmc_host {
0089 struct mmc_host *mmc;
0090 struct mmc_request *mrq;
0091
0092 u32 flags;
0093 void __iomem *iobase;
0094 u32 clock;
0095 u32 bus_width;
0096 u32 power_mode;
0097
0098 int status;
0099
0100 struct {
0101 int len;
0102 int dir;
0103 } dma;
0104
0105 struct {
0106 int index;
0107 int offset;
0108 int len;
0109 } pio;
0110
0111 u32 tx_chan;
0112 u32 rx_chan;
0113
0114 int irq;
0115
0116 struct tasklet_struct finish_task;
0117 struct tasklet_struct data_task;
0118 struct au1xmmc_platform_data *platdata;
0119 struct platform_device *pdev;
0120 struct resource *ioarea;
0121 struct clk *clk;
0122 };
0123
0124
0125 #define HOST_F_XMIT 0x0001
0126 #define HOST_F_RECV 0x0002
0127 #define HOST_F_DMA 0x0010
0128 #define HOST_F_DBDMA 0x0020
0129 #define HOST_F_ACTIVE 0x0100
0130 #define HOST_F_STOP 0x1000
0131
0132 #define HOST_S_IDLE 0x0001
0133 #define HOST_S_CMD 0x0002
0134 #define HOST_S_DATA 0x0003
0135 #define HOST_S_STOP 0x0004
0136
0137
0138 #define HOST_STATUS(h) ((h)->iobase + SD_STATUS)
0139 #define HOST_CONFIG(h) ((h)->iobase + SD_CONFIG)
0140 #define HOST_ENABLE(h) ((h)->iobase + SD_ENABLE)
0141 #define HOST_TXPORT(h) ((h)->iobase + SD_TXPORT)
0142 #define HOST_RXPORT(h) ((h)->iobase + SD_RXPORT)
0143 #define HOST_CMDARG(h) ((h)->iobase + SD_CMDARG)
0144 #define HOST_BLKSIZE(h) ((h)->iobase + SD_BLKSIZE)
0145 #define HOST_CMD(h) ((h)->iobase + SD_CMD)
0146 #define HOST_CONFIG2(h) ((h)->iobase + SD_CONFIG2)
0147 #define HOST_TIMEOUT(h) ((h)->iobase + SD_TIMEOUT)
0148 #define HOST_DEBUG(h) ((h)->iobase + SD_DEBUG)
0149
0150 #define DMA_CHANNEL(h) \
0151 (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan)
0152
0153 static inline int has_dbdma(void)
0154 {
0155 switch (alchemy_get_cputype()) {
0156 case ALCHEMY_CPU_AU1200:
0157 case ALCHEMY_CPU_AU1300:
0158 return 1;
0159 default:
0160 return 0;
0161 }
0162 }
0163
0164 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
0165 {
0166 u32 val = __raw_readl(HOST_CONFIG(host));
0167 val |= mask;
0168 __raw_writel(val, HOST_CONFIG(host));
0169 wmb();
0170 }
0171
0172 static inline void FLUSH_FIFO(struct au1xmmc_host *host)
0173 {
0174 u32 val = __raw_readl(HOST_CONFIG2(host));
0175
0176 __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
0177 wmb();
0178 mdelay(1);
0179
0180
0181 val &= ~SD_CONFIG2_DF;
0182
0183 __raw_writel(val, HOST_CONFIG2(host));
0184 wmb();
0185 }
0186
0187 static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
0188 {
0189 u32 val = __raw_readl(HOST_CONFIG(host));
0190 val &= ~mask;
0191 __raw_writel(val, HOST_CONFIG(host));
0192 wmb();
0193 }
0194
0195 static inline void SEND_STOP(struct au1xmmc_host *host)
0196 {
0197 u32 config2;
0198
0199 WARN_ON(host->status != HOST_S_DATA);
0200 host->status = HOST_S_STOP;
0201
0202 config2 = __raw_readl(HOST_CONFIG2(host));
0203 __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
0204 wmb();
0205
0206
0207 __raw_writel(STOP_CMD, HOST_CMD(host));
0208 wmb();
0209 }
0210
0211 static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
0212 {
0213 if (host->platdata && host->platdata->set_power)
0214 host->platdata->set_power(host->mmc, state);
0215 }
0216
0217 static int au1xmmc_card_inserted(struct mmc_host *mmc)
0218 {
0219 struct au1xmmc_host *host = mmc_priv(mmc);
0220
0221 if (host->platdata && host->platdata->card_inserted)
0222 return !!host->platdata->card_inserted(host->mmc);
0223
0224 return -ENOSYS;
0225 }
0226
0227 static int au1xmmc_card_readonly(struct mmc_host *mmc)
0228 {
0229 struct au1xmmc_host *host = mmc_priv(mmc);
0230
0231 if (host->platdata && host->platdata->card_readonly)
0232 return !!host->platdata->card_readonly(mmc);
0233
0234 return -ENOSYS;
0235 }
0236
0237 static void au1xmmc_finish_request(struct au1xmmc_host *host)
0238 {
0239 struct mmc_request *mrq = host->mrq;
0240
0241 host->mrq = NULL;
0242 host->flags &= HOST_F_ACTIVE | HOST_F_DMA;
0243
0244 host->dma.len = 0;
0245 host->dma.dir = 0;
0246
0247 host->pio.index = 0;
0248 host->pio.offset = 0;
0249 host->pio.len = 0;
0250
0251 host->status = HOST_S_IDLE;
0252
0253 mmc_request_done(host->mmc, mrq);
0254 }
0255
0256 static void au1xmmc_tasklet_finish(struct tasklet_struct *t)
0257 {
0258 struct au1xmmc_host *host = from_tasklet(host, t, finish_task);
0259 au1xmmc_finish_request(host);
0260 }
0261
0262 static int au1xmmc_send_command(struct au1xmmc_host *host,
0263 struct mmc_command *cmd, struct mmc_data *data)
0264 {
0265 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
0266
0267 switch (mmc_resp_type(cmd)) {
0268 case MMC_RSP_NONE:
0269 break;
0270 case MMC_RSP_R1:
0271 mmccmd |= SD_CMD_RT_1;
0272 break;
0273 case MMC_RSP_R1B:
0274 mmccmd |= SD_CMD_RT_1B;
0275 break;
0276 case MMC_RSP_R2:
0277 mmccmd |= SD_CMD_RT_2;
0278 break;
0279 case MMC_RSP_R3:
0280 mmccmd |= SD_CMD_RT_3;
0281 break;
0282 default:
0283 pr_info("au1xmmc: unhandled response type %02x\n",
0284 mmc_resp_type(cmd));
0285 return -EINVAL;
0286 }
0287
0288 if (data) {
0289 if (data->flags & MMC_DATA_READ) {
0290 if (data->blocks > 1)
0291 mmccmd |= SD_CMD_CT_4;
0292 else
0293 mmccmd |= SD_CMD_CT_2;
0294 } else if (data->flags & MMC_DATA_WRITE) {
0295 if (data->blocks > 1)
0296 mmccmd |= SD_CMD_CT_3;
0297 else
0298 mmccmd |= SD_CMD_CT_1;
0299 }
0300 }
0301
0302 __raw_writel(cmd->arg, HOST_CMDARG(host));
0303 wmb();
0304
0305 __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
0306 wmb();
0307
0308
0309 while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
0310 ;
0311
0312 return 0;
0313 }
0314
0315 static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
0316 {
0317 struct mmc_request *mrq = host->mrq;
0318 struct mmc_data *data;
0319 u32 crc;
0320
0321 WARN_ON((host->status != HOST_S_DATA) && (host->status != HOST_S_STOP));
0322
0323 if (host->mrq == NULL)
0324 return;
0325
0326 data = mrq->cmd->data;
0327
0328 if (status == 0)
0329 status = __raw_readl(HOST_STATUS(host));
0330
0331
0332 while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
0333 status = __raw_readl(HOST_STATUS(host));
0334
0335 data->error = 0;
0336 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
0337
0338
0339 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
0340 if (host->flags & HOST_F_XMIT)
0341 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
0342
0343 if (crc)
0344 data->error = -EILSEQ;
0345
0346
0347 __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
0348
0349 data->bytes_xfered = 0;
0350
0351 if (!data->error) {
0352 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
0353 u32 chan = DMA_CHANNEL(host);
0354
0355 chan_tab_t *c = *((chan_tab_t **)chan);
0356 au1x_dma_chan_t *cp = c->chan_ptr;
0357 data->bytes_xfered = cp->ddma_bytecnt;
0358 } else
0359 data->bytes_xfered =
0360 (data->blocks * data->blksz) - host->pio.len;
0361 }
0362
0363 au1xmmc_finish_request(host);
0364 }
0365
0366 static void au1xmmc_tasklet_data(struct tasklet_struct *t)
0367 {
0368 struct au1xmmc_host *host = from_tasklet(host, t, data_task);
0369
0370 u32 status = __raw_readl(HOST_STATUS(host));
0371 au1xmmc_data_complete(host, status);
0372 }
0373
0374 #define AU1XMMC_MAX_TRANSFER 8
0375
0376 static void au1xmmc_send_pio(struct au1xmmc_host *host)
0377 {
0378 struct mmc_data *data;
0379 int sg_len, max, count;
0380 unsigned char *sg_ptr, val;
0381 u32 status;
0382 struct scatterlist *sg;
0383
0384 data = host->mrq->data;
0385
0386 if (!(host->flags & HOST_F_XMIT))
0387 return;
0388
0389
0390 sg = &data->sg[host->pio.index];
0391 sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset;
0392
0393
0394 sg_len = data->sg[host->pio.index].length - host->pio.offset;
0395
0396
0397 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
0398 if (max > AU1XMMC_MAX_TRANSFER)
0399 max = AU1XMMC_MAX_TRANSFER;
0400
0401 for (count = 0; count < max; count++) {
0402 status = __raw_readl(HOST_STATUS(host));
0403
0404 if (!(status & SD_STATUS_TH))
0405 break;
0406
0407 val = sg_ptr[count];
0408
0409 __raw_writel((unsigned long)val, HOST_TXPORT(host));
0410 wmb();
0411 }
0412 kunmap_atomic(sg_ptr);
0413
0414 host->pio.len -= count;
0415 host->pio.offset += count;
0416
0417 if (count == sg_len) {
0418 host->pio.index++;
0419 host->pio.offset = 0;
0420 }
0421
0422 if (host->pio.len == 0) {
0423 IRQ_OFF(host, SD_CONFIG_TH);
0424
0425 if (host->flags & HOST_F_STOP)
0426 SEND_STOP(host);
0427
0428 tasklet_schedule(&host->data_task);
0429 }
0430 }
0431
0432 static void au1xmmc_receive_pio(struct au1xmmc_host *host)
0433 {
0434 struct mmc_data *data;
0435 int max, count, sg_len = 0;
0436 unsigned char *sg_ptr = NULL;
0437 u32 status, val;
0438 struct scatterlist *sg;
0439
0440 data = host->mrq->data;
0441
0442 if (!(host->flags & HOST_F_RECV))
0443 return;
0444
0445 max = host->pio.len;
0446
0447 if (host->pio.index < host->dma.len) {
0448 sg = &data->sg[host->pio.index];
0449 sg_ptr = kmap_atomic(sg_page(sg)) + sg->offset + host->pio.offset;
0450
0451
0452 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
0453
0454
0455 if (sg_len < max)
0456 max = sg_len;
0457 }
0458
0459 if (max > AU1XMMC_MAX_TRANSFER)
0460 max = AU1XMMC_MAX_TRANSFER;
0461
0462 for (count = 0; count < max; count++) {
0463 status = __raw_readl(HOST_STATUS(host));
0464
0465 if (!(status & SD_STATUS_NE))
0466 break;
0467
0468 if (status & SD_STATUS_RC) {
0469 DBG("RX CRC Error [%d + %d].\n", host->pdev->id,
0470 host->pio.len, count);
0471 break;
0472 }
0473
0474 if (status & SD_STATUS_RO) {
0475 DBG("RX Overrun [%d + %d]\n", host->pdev->id,
0476 host->pio.len, count);
0477 break;
0478 }
0479 else if (status & SD_STATUS_RU) {
0480 DBG("RX Underrun [%d + %d]\n", host->pdev->id,
0481 host->pio.len, count);
0482 break;
0483 }
0484
0485 val = __raw_readl(HOST_RXPORT(host));
0486
0487 if (sg_ptr)
0488 sg_ptr[count] = (unsigned char)(val & 0xFF);
0489 }
0490 if (sg_ptr)
0491 kunmap_atomic(sg_ptr);
0492
0493 host->pio.len -= count;
0494 host->pio.offset += count;
0495
0496 if (sg_len && count == sg_len) {
0497 host->pio.index++;
0498 host->pio.offset = 0;
0499 }
0500
0501 if (host->pio.len == 0) {
0502
0503 IRQ_OFF(host, SD_CONFIG_NE);
0504
0505 if (host->flags & HOST_F_STOP)
0506 SEND_STOP(host);
0507
0508 tasklet_schedule(&host->data_task);
0509 }
0510 }
0511
0512
0513
0514
0515 static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
0516 {
0517 struct mmc_request *mrq = host->mrq;
0518 struct mmc_command *cmd;
0519 u32 r[4];
0520 int i, trans;
0521
0522 if (!host->mrq)
0523 return;
0524
0525 cmd = mrq->cmd;
0526 cmd->error = 0;
0527
0528 if (cmd->flags & MMC_RSP_PRESENT) {
0529 if (cmd->flags & MMC_RSP_136) {
0530 r[0] = __raw_readl(host->iobase + SD_RESP3);
0531 r[1] = __raw_readl(host->iobase + SD_RESP2);
0532 r[2] = __raw_readl(host->iobase + SD_RESP1);
0533 r[3] = __raw_readl(host->iobase + SD_RESP0);
0534
0535
0536
0537
0538
0539 for (i = 0; i < 4; i++) {
0540 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
0541 if (i != 3)
0542 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
0543 }
0544 } else {
0545
0546
0547
0548
0549
0550
0551
0552 cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
0553 }
0554 }
0555
0556
0557 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
0558 cmd->error = -EILSEQ;
0559
0560 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
0561
0562 if (!trans || cmd->error) {
0563 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA | SD_CONFIG_RF);
0564 tasklet_schedule(&host->finish_task);
0565 return;
0566 }
0567
0568 host->status = HOST_S_DATA;
0569
0570 if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {
0571 u32 channel = DMA_CHANNEL(host);
0572
0573
0574
0575 if (host->flags & HOST_F_RECV) {
0576 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
0577
0578 while((status & mask) != mask)
0579 status = __raw_readl(HOST_STATUS(host));
0580 }
0581
0582 au1xxx_dbdma_start(channel);
0583 }
0584 }
0585
0586 static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
0587 {
0588 unsigned int pbus = clk_get_rate(host->clk);
0589 unsigned int divisor = ((pbus / rate) / 2) - 1;
0590 u32 config;
0591
0592 config = __raw_readl(HOST_CONFIG(host));
0593
0594 config &= ~(SD_CONFIG_DIV);
0595 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
0596
0597 __raw_writel(config, HOST_CONFIG(host));
0598 wmb();
0599 }
0600
0601 static int au1xmmc_prepare_data(struct au1xmmc_host *host,
0602 struct mmc_data *data)
0603 {
0604 int datalen = data->blocks * data->blksz;
0605
0606 if (data->flags & MMC_DATA_READ)
0607 host->flags |= HOST_F_RECV;
0608 else
0609 host->flags |= HOST_F_XMIT;
0610
0611 if (host->mrq->stop)
0612 host->flags |= HOST_F_STOP;
0613
0614 host->dma.dir = DMA_BIDIRECTIONAL;
0615
0616 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
0617 data->sg_len, host->dma.dir);
0618
0619 if (host->dma.len == 0)
0620 return -ETIMEDOUT;
0621
0622 __raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
0623
0624 if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
0625 int i;
0626 u32 channel = DMA_CHANNEL(host);
0627
0628 au1xxx_dbdma_stop(channel);
0629
0630 for (i = 0; i < host->dma.len; i++) {
0631 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
0632 struct scatterlist *sg = &data->sg[i];
0633 int sg_len = sg->length;
0634
0635 int len = (datalen > sg_len) ? sg_len : datalen;
0636
0637 if (i == host->dma.len - 1)
0638 flags = DDMA_FLAGS_IE;
0639
0640 if (host->flags & HOST_F_XMIT) {
0641 ret = au1xxx_dbdma_put_source(channel,
0642 sg_phys(sg), len, flags);
0643 } else {
0644 ret = au1xxx_dbdma_put_dest(channel,
0645 sg_phys(sg), len, flags);
0646 }
0647
0648 if (!ret)
0649 goto dataerr;
0650
0651 datalen -= len;
0652 }
0653 } else {
0654 host->pio.index = 0;
0655 host->pio.offset = 0;
0656 host->pio.len = datalen;
0657
0658 if (host->flags & HOST_F_XMIT)
0659 IRQ_ON(host, SD_CONFIG_TH);
0660 else
0661 IRQ_ON(host, SD_CONFIG_NE);
0662
0663 }
0664
0665 return 0;
0666
0667 dataerr:
0668 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
0669 host->dma.dir);
0670 return -ETIMEDOUT;
0671 }
0672
0673
0674 static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
0675 {
0676 struct au1xmmc_host *host = mmc_priv(mmc);
0677 int ret = 0;
0678
0679 WARN_ON(irqs_disabled());
0680 WARN_ON(host->status != HOST_S_IDLE);
0681
0682 host->mrq = mrq;
0683 host->status = HOST_S_CMD;
0684
0685
0686 if (0 == au1xmmc_card_inserted(mmc)) {
0687 mrq->cmd->error = -ENOMEDIUM;
0688 au1xmmc_finish_request(host);
0689 return;
0690 }
0691
0692 if (mrq->data) {
0693 FLUSH_FIFO(host);
0694 ret = au1xmmc_prepare_data(host, mrq->data);
0695 }
0696
0697 if (!ret)
0698 ret = au1xmmc_send_command(host, mrq->cmd, mrq->data);
0699
0700 if (ret) {
0701 mrq->cmd->error = ret;
0702 au1xmmc_finish_request(host);
0703 }
0704 }
0705
0706 static void au1xmmc_reset_controller(struct au1xmmc_host *host)
0707 {
0708
0709 __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
0710 wmb();
0711 mdelay(1);
0712
0713 __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
0714 wmb();
0715 mdelay(5);
0716
0717 __raw_writel(~0, HOST_STATUS(host));
0718 wmb();
0719
0720 __raw_writel(0, HOST_BLKSIZE(host));
0721 __raw_writel(0x001fffff, HOST_TIMEOUT(host));
0722 wmb();
0723
0724 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
0725 wmb();
0726
0727 __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
0728 wmb();
0729 mdelay(1);
0730
0731 __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
0732 wmb();
0733
0734
0735 __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
0736 wmb();
0737 }
0738
0739
0740 static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0741 {
0742 struct au1xmmc_host *host = mmc_priv(mmc);
0743 u32 config2;
0744
0745 if (ios->power_mode == MMC_POWER_OFF)
0746 au1xmmc_set_power(host, 0);
0747 else if (ios->power_mode == MMC_POWER_ON) {
0748 au1xmmc_set_power(host, 1);
0749 }
0750
0751 if (ios->clock && ios->clock != host->clock) {
0752 au1xmmc_set_clock(host, ios->clock);
0753 host->clock = ios->clock;
0754 }
0755
0756 config2 = __raw_readl(HOST_CONFIG2(host));
0757 switch (ios->bus_width) {
0758 case MMC_BUS_WIDTH_8:
0759 config2 |= SD_CONFIG2_BB;
0760 break;
0761 case MMC_BUS_WIDTH_4:
0762 config2 &= ~SD_CONFIG2_BB;
0763 config2 |= SD_CONFIG2_WB;
0764 break;
0765 case MMC_BUS_WIDTH_1:
0766 config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
0767 break;
0768 }
0769 __raw_writel(config2, HOST_CONFIG2(host));
0770 wmb();
0771 }
0772
0773 #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
0774 #define STATUS_DATA_IN (SD_STATUS_NE)
0775 #define STATUS_DATA_OUT (SD_STATUS_TH)
0776
0777 static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
0778 {
0779 struct au1xmmc_host *host = dev_id;
0780 u32 status;
0781
0782 status = __raw_readl(HOST_STATUS(host));
0783
0784 if (!(status & SD_STATUS_I))
0785 return IRQ_NONE;
0786
0787 if (status & SD_STATUS_SI)
0788 mmc_signal_sdio_irq(host->mmc);
0789
0790 if (host->mrq && (status & STATUS_TIMEOUT)) {
0791 if (status & SD_STATUS_RAT)
0792 host->mrq->cmd->error = -ETIMEDOUT;
0793 else if (status & SD_STATUS_DT)
0794 host->mrq->data->error = -ETIMEDOUT;
0795
0796
0797 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
0798
0799
0800 tasklet_schedule(&host->finish_task);
0801 }
0802 #if 0
0803 else if (status & SD_STATUS_DD) {
0804
0805 if (!(host->flags & HOST_F_DMA) && (status & SD_STATUS_NE))
0806 au1xmmc_receive_pio(host);
0807 else {
0808 au1xmmc_data_complete(host, status);
0809
0810 }
0811 }
0812 #endif
0813 else if (status & SD_STATUS_CR) {
0814 if (host->status == HOST_S_CMD)
0815 au1xmmc_cmd_complete(host, status);
0816
0817 } else if (!(host->flags & HOST_F_DMA)) {
0818 if ((host->flags & HOST_F_XMIT) && (status & STATUS_DATA_OUT))
0819 au1xmmc_send_pio(host);
0820 else if ((host->flags & HOST_F_RECV) && (status & STATUS_DATA_IN))
0821 au1xmmc_receive_pio(host);
0822
0823 } else if (status & 0x203F3C70) {
0824 DBG("Unhandled status %8.8x\n", host->pdev->id,
0825 status);
0826 }
0827
0828 __raw_writel(status, HOST_STATUS(host));
0829 wmb();
0830
0831 return IRQ_HANDLED;
0832 }
0833
0834
0835 static dbdev_tab_t au1xmmc_mem_dbdev = {
0836 .dev_id = DSCR_CMD0_ALWAYS,
0837 .dev_flags = DEV_FLAGS_ANYUSE,
0838 .dev_tsize = 0,
0839 .dev_devwidth = 8,
0840 .dev_physaddr = 0x00000000,
0841 .dev_intlevel = 0,
0842 .dev_intpolarity = 0,
0843 };
0844 static int memid;
0845
0846 static void au1xmmc_dbdma_callback(int irq, void *dev_id)
0847 {
0848 struct au1xmmc_host *host = (struct au1xmmc_host *)dev_id;
0849
0850
0851 if (!host->mrq)
0852 return;
0853
0854 if (host->flags & HOST_F_STOP)
0855 SEND_STOP(host);
0856
0857 tasklet_schedule(&host->data_task);
0858 }
0859
0860 static int au1xmmc_dbdma_init(struct au1xmmc_host *host)
0861 {
0862 struct resource *res;
0863 int txid, rxid;
0864
0865 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 0);
0866 if (!res)
0867 return -ENODEV;
0868 txid = res->start;
0869
0870 res = platform_get_resource(host->pdev, IORESOURCE_DMA, 1);
0871 if (!res)
0872 return -ENODEV;
0873 rxid = res->start;
0874
0875 if (!memid)
0876 return -ENODEV;
0877
0878 host->tx_chan = au1xxx_dbdma_chan_alloc(memid, txid,
0879 au1xmmc_dbdma_callback, (void *)host);
0880 if (!host->tx_chan) {
0881 dev_err(&host->pdev->dev, "cannot allocate TX DMA\n");
0882 return -ENODEV;
0883 }
0884
0885 host->rx_chan = au1xxx_dbdma_chan_alloc(rxid, memid,
0886 au1xmmc_dbdma_callback, (void *)host);
0887 if (!host->rx_chan) {
0888 dev_err(&host->pdev->dev, "cannot allocate RX DMA\n");
0889 au1xxx_dbdma_chan_free(host->tx_chan);
0890 return -ENODEV;
0891 }
0892
0893 au1xxx_dbdma_set_devwidth(host->tx_chan, 8);
0894 au1xxx_dbdma_set_devwidth(host->rx_chan, 8);
0895
0896 au1xxx_dbdma_ring_alloc(host->tx_chan, AU1XMMC_DESCRIPTOR_COUNT);
0897 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);
0898
0899
0900 host->flags |= HOST_F_DMA | HOST_F_DBDMA;
0901
0902 return 0;
0903 }
0904
0905 static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)
0906 {
0907 if (host->flags & HOST_F_DMA) {
0908 host->flags &= ~HOST_F_DMA;
0909 au1xxx_dbdma_chan_free(host->tx_chan);
0910 au1xxx_dbdma_chan_free(host->rx_chan);
0911 }
0912 }
0913
0914 static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)
0915 {
0916 struct au1xmmc_host *host = mmc_priv(mmc);
0917
0918 if (en)
0919 IRQ_ON(host, SD_CONFIG_SI);
0920 else
0921 IRQ_OFF(host, SD_CONFIG_SI);
0922 }
0923
0924 static const struct mmc_host_ops au1xmmc_ops = {
0925 .request = au1xmmc_request,
0926 .set_ios = au1xmmc_set_ios,
0927 .get_ro = au1xmmc_card_readonly,
0928 .get_cd = au1xmmc_card_inserted,
0929 .enable_sdio_irq = au1xmmc_enable_sdio_irq,
0930 };
0931
0932 static int au1xmmc_probe(struct platform_device *pdev)
0933 {
0934 struct mmc_host *mmc;
0935 struct au1xmmc_host *host;
0936 struct resource *r;
0937 int ret, iflag;
0938
0939 mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
0940 if (!mmc) {
0941 dev_err(&pdev->dev, "no memory for mmc_host\n");
0942 ret = -ENOMEM;
0943 goto out0;
0944 }
0945
0946 host = mmc_priv(mmc);
0947 host->mmc = mmc;
0948 host->platdata = pdev->dev.platform_data;
0949 host->pdev = pdev;
0950
0951 ret = -ENODEV;
0952 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0953 if (!r) {
0954 dev_err(&pdev->dev, "no mmio defined\n");
0955 goto out1;
0956 }
0957
0958 host->ioarea = request_mem_region(r->start, resource_size(r),
0959 pdev->name);
0960 if (!host->ioarea) {
0961 dev_err(&pdev->dev, "mmio already in use\n");
0962 goto out1;
0963 }
0964
0965 host->iobase = ioremap(r->start, 0x3c);
0966 if (!host->iobase) {
0967 dev_err(&pdev->dev, "cannot remap mmio\n");
0968 goto out2;
0969 }
0970
0971 host->irq = platform_get_irq(pdev, 0);
0972 if (host->irq < 0) {
0973 ret = host->irq;
0974 goto out3;
0975 }
0976
0977 mmc->ops = &au1xmmc_ops;
0978
0979 mmc->f_min = 450000;
0980 mmc->f_max = 24000000;
0981
0982 mmc->max_blk_size = 2048;
0983 mmc->max_blk_count = 512;
0984
0985 mmc->ocr_avail = AU1XMMC_OCR;
0986 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
0987 mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
0988
0989 iflag = IRQF_SHARED;
0990
0991 switch (alchemy_get_cputype()) {
0992 case ALCHEMY_CPU_AU1100:
0993 mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
0994 break;
0995 case ALCHEMY_CPU_AU1200:
0996 mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
0997 break;
0998 case ALCHEMY_CPU_AU1300:
0999 iflag = 0;
1000 mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
1001 mmc->f_max = 52000000;
1002 if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
1003 mmc->caps |= MMC_CAP_8_BIT_DATA;
1004 break;
1005 }
1006
1007 ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
1008 if (ret) {
1009 dev_err(&pdev->dev, "cannot grab IRQ\n");
1010 goto out3;
1011 }
1012
1013 host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
1014 if (IS_ERR(host->clk)) {
1015 dev_err(&pdev->dev, "cannot find clock\n");
1016 ret = PTR_ERR(host->clk);
1017 goto out_irq;
1018 }
1019
1020 ret = clk_prepare_enable(host->clk);
1021 if (ret) {
1022 dev_err(&pdev->dev, "cannot enable clock\n");
1023 goto out_clk;
1024 }
1025
1026 host->status = HOST_S_IDLE;
1027
1028
1029 if (host->platdata && host->platdata->cd_setup) {
1030 ret = host->platdata->cd_setup(mmc, 1);
1031 if (ret) {
1032 dev_warn(&pdev->dev, "board CD setup failed\n");
1033 mmc->caps |= MMC_CAP_NEEDS_POLL;
1034 }
1035 } else
1036 mmc->caps |= MMC_CAP_NEEDS_POLL;
1037
1038
1039 if (host->platdata)
1040 mmc->caps &= ~(host->platdata->mask_host_caps);
1041
1042 tasklet_setup(&host->data_task, au1xmmc_tasklet_data);
1043
1044 tasklet_setup(&host->finish_task, au1xmmc_tasklet_finish);
1045
1046 if (has_dbdma()) {
1047 ret = au1xmmc_dbdma_init(host);
1048 if (ret)
1049 pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n");
1050 }
1051
1052 #ifdef CONFIG_LEDS_CLASS
1053 if (host->platdata && host->platdata->led) {
1054 struct led_classdev *led = host->platdata->led;
1055 led->name = mmc_hostname(mmc);
1056 led->brightness = LED_OFF;
1057 led->default_trigger = mmc_hostname(mmc);
1058 ret = led_classdev_register(mmc_dev(mmc), led);
1059 if (ret)
1060 goto out5;
1061 }
1062 #endif
1063
1064 au1xmmc_reset_controller(host);
1065
1066 ret = mmc_add_host(mmc);
1067 if (ret) {
1068 dev_err(&pdev->dev, "cannot add mmc host\n");
1069 goto out6;
1070 }
1071
1072 platform_set_drvdata(pdev, host);
1073
1074 pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
1075 " (mode=%s)\n", pdev->id, host->iobase,
1076 host->flags & HOST_F_DMA ? "dma" : "pio");
1077
1078 return 0;
1079
1080 out6:
1081 #ifdef CONFIG_LEDS_CLASS
1082 if (host->platdata && host->platdata->led)
1083 led_classdev_unregister(host->platdata->led);
1084 out5:
1085 #endif
1086 __raw_writel(0, HOST_ENABLE(host));
1087 __raw_writel(0, HOST_CONFIG(host));
1088 __raw_writel(0, HOST_CONFIG2(host));
1089 wmb();
1090
1091 if (host->flags & HOST_F_DBDMA)
1092 au1xmmc_dbdma_shutdown(host);
1093
1094 tasklet_kill(&host->data_task);
1095 tasklet_kill(&host->finish_task);
1096
1097 if (host->platdata && host->platdata->cd_setup &&
1098 !(mmc->caps & MMC_CAP_NEEDS_POLL))
1099 host->platdata->cd_setup(mmc, 0);
1100 out_clk:
1101 clk_disable_unprepare(host->clk);
1102 clk_put(host->clk);
1103 out_irq:
1104 free_irq(host->irq, host);
1105 out3:
1106 iounmap((void *)host->iobase);
1107 out2:
1108 release_resource(host->ioarea);
1109 kfree(host->ioarea);
1110 out1:
1111 mmc_free_host(mmc);
1112 out0:
1113 return ret;
1114 }
1115
1116 static int au1xmmc_remove(struct platform_device *pdev)
1117 {
1118 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1119
1120 if (host) {
1121 mmc_remove_host(host->mmc);
1122
1123 #ifdef CONFIG_LEDS_CLASS
1124 if (host->platdata && host->platdata->led)
1125 led_classdev_unregister(host->platdata->led);
1126 #endif
1127
1128 if (host->platdata && host->platdata->cd_setup &&
1129 !(host->mmc->caps & MMC_CAP_NEEDS_POLL))
1130 host->platdata->cd_setup(host->mmc, 0);
1131
1132 __raw_writel(0, HOST_ENABLE(host));
1133 __raw_writel(0, HOST_CONFIG(host));
1134 __raw_writel(0, HOST_CONFIG2(host));
1135 wmb();
1136
1137 tasklet_kill(&host->data_task);
1138 tasklet_kill(&host->finish_task);
1139
1140 if (host->flags & HOST_F_DBDMA)
1141 au1xmmc_dbdma_shutdown(host);
1142
1143 au1xmmc_set_power(host, 0);
1144
1145 clk_disable_unprepare(host->clk);
1146 clk_put(host->clk);
1147
1148 free_irq(host->irq, host);
1149 iounmap((void *)host->iobase);
1150 release_resource(host->ioarea);
1151 kfree(host->ioarea);
1152
1153 mmc_free_host(host->mmc);
1154 }
1155 return 0;
1156 }
1157
1158 #ifdef CONFIG_PM
1159 static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
1160 {
1161 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1162
1163 __raw_writel(0, HOST_CONFIG2(host));
1164 __raw_writel(0, HOST_CONFIG(host));
1165 __raw_writel(0xffffffff, HOST_STATUS(host));
1166 __raw_writel(0, HOST_ENABLE(host));
1167 wmb();
1168
1169 return 0;
1170 }
1171
1172 static int au1xmmc_resume(struct platform_device *pdev)
1173 {
1174 struct au1xmmc_host *host = platform_get_drvdata(pdev);
1175
1176 au1xmmc_reset_controller(host);
1177
1178 return 0;
1179 }
1180 #else
1181 #define au1xmmc_suspend NULL
1182 #define au1xmmc_resume NULL
1183 #endif
1184
1185 static struct platform_driver au1xmmc_driver = {
1186 .probe = au1xmmc_probe,
1187 .remove = au1xmmc_remove,
1188 .suspend = au1xmmc_suspend,
1189 .resume = au1xmmc_resume,
1190 .driver = {
1191 .name = DRIVER_NAME,
1192 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1193 },
1194 };
1195
1196 static int __init au1xmmc_init(void)
1197 {
1198 if (has_dbdma()) {
1199
1200
1201
1202
1203 memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
1204 if (!memid)
1205 pr_err("au1xmmc: cannot add memory dbdma\n");
1206 }
1207 return platform_driver_register(&au1xmmc_driver);
1208 }
1209
1210 static void __exit au1xmmc_exit(void)
1211 {
1212 if (has_dbdma() && memid)
1213 au1xxx_ddma_del_device(memid);
1214
1215 platform_driver_unregister(&au1xmmc_driver);
1216 }
1217
1218 module_init(au1xmmc_init);
1219 module_exit(au1xmmc_exit);
1220
1221 MODULE_AUTHOR("Advanced Micro Devices, Inc");
1222 MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1223 MODULE_LICENSE("GPL");
1224 MODULE_ALIAS("platform:au1xxx-mmc");