0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/sched.h>
0014 #include <linux/delay.h>
0015 #include <linux/slab.h>
0016 #include <linux/module.h>
0017 #include <linux/bio.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/crc7.h>
0020 #include <linux/crc-itu-t.h>
0021 #include <linux/scatterlist.h>
0022
0023 #include <linux/mmc/host.h>
0024 #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */
0025 #include <linux/mmc/slot-gpio.h>
0026
0027 #include <linux/spi/spi.h>
0028 #include <linux/spi/mmc_spi.h>
0029
0030 #include <asm/unaligned.h>
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 #define SPI_MMC_RESPONSE_CODE(x) ((x) & 0x1f)
0067 #define SPI_RESPONSE_ACCEPTED ((2 << 1)|1)
0068 #define SPI_RESPONSE_CRC_ERR ((5 << 1)|1)
0069 #define SPI_RESPONSE_WRITE_ERR ((6 << 1)|1)
0070
0071
0072
0073
0074 #define SPI_TOKEN_SINGLE 0xfe
0075 #define SPI_TOKEN_MULTI_WRITE 0xfc
0076 #define SPI_TOKEN_STOP_TRAN 0xfd
0077
0078 #define MMC_SPI_BLOCKSIZE 512
0079
0080 #define MMC_SPI_R1B_TIMEOUT_MS 3000
0081 #define MMC_SPI_INIT_TIMEOUT_MS 3000
0082
0083
0084
0085
0086
0087
0088
0089
0090 #define MMC_SPI_BLOCKSATONCE 128
0091
0092
0093
0094
0095
0096
0097
0098
0099 struct scratch {
0100 u8 status[29];
0101 u8 data_token;
0102 __be16 crc_val;
0103 };
0104
0105 struct mmc_spi_host {
0106 struct mmc_host *mmc;
0107 struct spi_device *spi;
0108
0109 unsigned char power_mode;
0110 u16 powerup_msecs;
0111
0112 struct mmc_spi_platform_data *pdata;
0113
0114
0115 struct spi_transfer token, t, crc, early_status;
0116 struct spi_message m;
0117
0118
0119 struct spi_transfer status;
0120 struct spi_message readback;
0121
0122
0123 struct device *dma_dev;
0124
0125
0126 struct scratch *data;
0127 dma_addr_t data_dma;
0128
0129
0130
0131
0132
0133 void *ones;
0134 dma_addr_t ones_dma;
0135 };
0136
0137
0138
0139
0140
0141
0142
0143
0144 static inline int mmc_cs_off(struct mmc_spi_host *host)
0145 {
0146
0147 return spi_setup(host->spi);
0148 }
0149
0150 static int
0151 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
0152 {
0153 int status;
0154
0155 if (len > sizeof(*host->data)) {
0156 WARN_ON(1);
0157 return -EIO;
0158 }
0159
0160 host->status.len = len;
0161
0162 if (host->dma_dev)
0163 dma_sync_single_for_device(host->dma_dev,
0164 host->data_dma, sizeof(*host->data),
0165 DMA_FROM_DEVICE);
0166
0167 status = spi_sync_locked(host->spi, &host->readback);
0168
0169 if (host->dma_dev)
0170 dma_sync_single_for_cpu(host->dma_dev,
0171 host->data_dma, sizeof(*host->data),
0172 DMA_FROM_DEVICE);
0173
0174 return status;
0175 }
0176
0177 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
0178 unsigned n, u8 byte)
0179 {
0180 u8 *cp = host->data->status;
0181 unsigned long start = jiffies;
0182
0183 do {
0184 int status;
0185 unsigned i;
0186
0187 status = mmc_spi_readbytes(host, n);
0188 if (status < 0)
0189 return status;
0190
0191 for (i = 0; i < n; i++) {
0192 if (cp[i] != byte)
0193 return cp[i];
0194 }
0195
0196
0197 cond_resched();
0198 } while (time_is_after_jiffies(start + timeout));
0199 return -ETIMEDOUT;
0200 }
0201
0202 static inline int
0203 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
0204 {
0205 return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
0206 }
0207
0208 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
0209 {
0210 return mmc_spi_skip(host, timeout, 1, 0xff);
0211 }
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 static char *maptype(struct mmc_command *cmd)
0224 {
0225 switch (mmc_spi_resp_type(cmd)) {
0226 case MMC_RSP_SPI_R1: return "R1";
0227 case MMC_RSP_SPI_R1B: return "R1B";
0228 case MMC_RSP_SPI_R2: return "R2/R5";
0229 case MMC_RSP_SPI_R3: return "R3/R4/R7";
0230 default: return "?";
0231 }
0232 }
0233
0234
0235 static int mmc_spi_response_get(struct mmc_spi_host *host,
0236 struct mmc_command *cmd, int cs_on)
0237 {
0238 unsigned long timeout_ms;
0239 u8 *cp = host->data->status;
0240 u8 *end = cp + host->t.len;
0241 int value = 0;
0242 int bitshift;
0243 u8 leftover = 0;
0244 unsigned short rotator;
0245 int i;
0246 char tag[32];
0247
0248 snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s",
0249 cmd->opcode, maptype(cmd));
0250
0251
0252
0253
0254
0255
0256
0257 cp += 8;
0258 while (cp < end && *cp == 0xff)
0259 cp++;
0260
0261
0262 if (cp == end) {
0263 cp = host->data->status;
0264 end = cp+1;
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277 for (i = 2; i < 16; i++) {
0278 value = mmc_spi_readbytes(host, 1);
0279 if (value < 0)
0280 goto done;
0281 if (*cp != 0xff)
0282 goto checkstatus;
0283 }
0284 value = -ETIMEDOUT;
0285 goto done;
0286 }
0287
0288 checkstatus:
0289 bitshift = 0;
0290 if (*cp & 0x80) {
0291
0292 rotator = *cp++ << 8;
0293
0294 if (cp == end) {
0295 value = mmc_spi_readbytes(host, 1);
0296 if (value < 0)
0297 goto done;
0298 cp = host->data->status;
0299 end = cp+1;
0300 }
0301 rotator |= *cp++;
0302 while (rotator & 0x8000) {
0303 bitshift++;
0304 rotator <<= 1;
0305 }
0306 cmd->resp[0] = rotator >> 8;
0307 leftover = rotator;
0308 } else {
0309 cmd->resp[0] = *cp++;
0310 }
0311 cmd->error = 0;
0312
0313
0314 if (cmd->resp[0] != 0) {
0315 if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
0316 & cmd->resp[0])
0317 value = -EFAULT;
0318 else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
0319 value = -ENOSYS;
0320 else if (R1_SPI_COM_CRC & cmd->resp[0])
0321 value = -EILSEQ;
0322 else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
0323 & cmd->resp[0])
0324 value = -EIO;
0325
0326 }
0327
0328 switch (mmc_spi_resp_type(cmd)) {
0329
0330
0331
0332
0333 case MMC_RSP_SPI_R1B:
0334
0335 while (cp < end && *cp == 0)
0336 cp++;
0337 if (cp == end) {
0338 timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
0339 MMC_SPI_R1B_TIMEOUT_MS;
0340 mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
0341 }
0342 break;
0343
0344
0345
0346
0347 case MMC_RSP_SPI_R2:
0348
0349 if (cp == end) {
0350 value = mmc_spi_readbytes(host, 1);
0351 if (value < 0)
0352 goto done;
0353 cp = host->data->status;
0354 end = cp+1;
0355 }
0356 if (bitshift) {
0357 rotator = leftover << 8;
0358 rotator |= *cp << bitshift;
0359 cmd->resp[0] |= (rotator & 0xFF00);
0360 } else {
0361 cmd->resp[0] |= *cp << 8;
0362 }
0363 break;
0364
0365
0366 case MMC_RSP_SPI_R3:
0367 rotator = leftover << 8;
0368 cmd->resp[1] = 0;
0369 for (i = 0; i < 4; i++) {
0370 cmd->resp[1] <<= 8;
0371
0372 if (cp == end) {
0373 value = mmc_spi_readbytes(host, 1);
0374 if (value < 0)
0375 goto done;
0376 cp = host->data->status;
0377 end = cp+1;
0378 }
0379 if (bitshift) {
0380 rotator |= *cp++ << bitshift;
0381 cmd->resp[1] |= (rotator >> 8);
0382 rotator <<= 8;
0383 } else {
0384 cmd->resp[1] |= *cp++;
0385 }
0386 }
0387 break;
0388
0389
0390 case MMC_RSP_SPI_R1:
0391 break;
0392
0393 default:
0394 dev_dbg(&host->spi->dev, "bad response type %04x\n",
0395 mmc_spi_resp_type(cmd));
0396 if (value >= 0)
0397 value = -EINVAL;
0398 goto done;
0399 }
0400
0401 if (value < 0)
0402 dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
0403 tag, cmd->resp[0], cmd->resp[1]);
0404
0405
0406 if (value >= 0 && cs_on)
0407 return value;
0408 done:
0409 if (value < 0)
0410 cmd->error = value;
0411 mmc_cs_off(host);
0412 return value;
0413 }
0414
0415
0416
0417
0418
0419
0420
0421 static int
0422 mmc_spi_command_send(struct mmc_spi_host *host,
0423 struct mmc_request *mrq,
0424 struct mmc_command *cmd, int cs_on)
0425 {
0426 struct scratch *data = host->data;
0427 u8 *cp = data->status;
0428 int status;
0429 struct spi_transfer *t;
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444 memset(cp, 0xff, sizeof(data->status));
0445
0446 cp[1] = 0x40 | cmd->opcode;
0447 put_unaligned_be32(cmd->arg, cp + 2);
0448 cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
0449 cp += 7;
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
0487 cp += 2;
0488
0489 } else {
0490 cp += 10;
0491 if (cmd->flags & MMC_RSP_SPI_S2)
0492 cp++;
0493 else if (cmd->flags & MMC_RSP_SPI_B4)
0494 cp += 4;
0495 else if (cmd->flags & MMC_RSP_BUSY)
0496 cp = data->status + sizeof(data->status);
0497
0498 }
0499
0500 dev_dbg(&host->spi->dev, " CMD%d, resp %s\n",
0501 cmd->opcode, maptype(cmd));
0502
0503
0504 spi_message_init(&host->m);
0505
0506 t = &host->t;
0507 memset(t, 0, sizeof(*t));
0508 t->tx_buf = t->rx_buf = data->status;
0509 t->tx_dma = t->rx_dma = host->data_dma;
0510 t->len = cp - data->status;
0511 t->cs_change = 1;
0512 spi_message_add_tail(t, &host->m);
0513
0514 if (host->dma_dev) {
0515 host->m.is_dma_mapped = 1;
0516 dma_sync_single_for_device(host->dma_dev,
0517 host->data_dma, sizeof(*host->data),
0518 DMA_BIDIRECTIONAL);
0519 }
0520 status = spi_sync_locked(host->spi, &host->m);
0521
0522 if (host->dma_dev)
0523 dma_sync_single_for_cpu(host->dma_dev,
0524 host->data_dma, sizeof(*host->data),
0525 DMA_BIDIRECTIONAL);
0526 if (status < 0) {
0527 dev_dbg(&host->spi->dev, " ... write returned %d\n", status);
0528 cmd->error = status;
0529 return status;
0530 }
0531
0532
0533 return mmc_spi_response_get(host, cmd, cs_on);
0534 }
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547 static void
0548 mmc_spi_setup_data_message(
0549 struct mmc_spi_host *host,
0550 bool multiple,
0551 enum dma_data_direction direction)
0552 {
0553 struct spi_transfer *t;
0554 struct scratch *scratch = host->data;
0555 dma_addr_t dma = host->data_dma;
0556
0557 spi_message_init(&host->m);
0558 if (dma)
0559 host->m.is_dma_mapped = 1;
0560
0561
0562
0563
0564 if (direction == DMA_TO_DEVICE) {
0565 t = &host->token;
0566 memset(t, 0, sizeof(*t));
0567 t->len = 1;
0568 if (multiple)
0569 scratch->data_token = SPI_TOKEN_MULTI_WRITE;
0570 else
0571 scratch->data_token = SPI_TOKEN_SINGLE;
0572 t->tx_buf = &scratch->data_token;
0573 if (dma)
0574 t->tx_dma = dma + offsetof(struct scratch, data_token);
0575 spi_message_add_tail(t, &host->m);
0576 }
0577
0578
0579
0580
0581 t = &host->t;
0582 memset(t, 0, sizeof(*t));
0583 t->tx_buf = host->ones;
0584 t->tx_dma = host->ones_dma;
0585
0586 spi_message_add_tail(t, &host->m);
0587
0588 t = &host->crc;
0589 memset(t, 0, sizeof(*t));
0590 t->len = 2;
0591 if (direction == DMA_TO_DEVICE) {
0592
0593 t->tx_buf = &scratch->crc_val;
0594 if (dma)
0595 t->tx_dma = dma + offsetof(struct scratch, crc_val);
0596 } else {
0597 t->tx_buf = host->ones;
0598 t->tx_dma = host->ones_dma;
0599 t->rx_buf = &scratch->crc_val;
0600 if (dma)
0601 t->rx_dma = dma + offsetof(struct scratch, crc_val);
0602 }
0603 spi_message_add_tail(t, &host->m);
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619 if (multiple || direction == DMA_TO_DEVICE) {
0620 t = &host->early_status;
0621 memset(t, 0, sizeof(*t));
0622 t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
0623 t->tx_buf = host->ones;
0624 t->tx_dma = host->ones_dma;
0625 t->rx_buf = scratch->status;
0626 if (dma)
0627 t->rx_dma = dma + offsetof(struct scratch, status);
0628 t->cs_change = 1;
0629 spi_message_add_tail(t, &host->m);
0630 }
0631 }
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645 static int
0646 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
0647 unsigned long timeout)
0648 {
0649 struct spi_device *spi = host->spi;
0650 int status, i;
0651 struct scratch *scratch = host->data;
0652 u32 pattern;
0653
0654 if (host->mmc->use_spi_crc)
0655 scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
0656 if (host->dma_dev)
0657 dma_sync_single_for_device(host->dma_dev,
0658 host->data_dma, sizeof(*scratch),
0659 DMA_BIDIRECTIONAL);
0660
0661 status = spi_sync_locked(spi, &host->m);
0662
0663 if (status != 0) {
0664 dev_dbg(&spi->dev, "write error (%d)\n", status);
0665 return status;
0666 }
0667
0668 if (host->dma_dev)
0669 dma_sync_single_for_cpu(host->dma_dev,
0670 host->data_dma, sizeof(*scratch),
0671 DMA_BIDIRECTIONAL);
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685 pattern = get_unaligned_be32(scratch->status);
0686
0687
0688 pattern |= 0xE0000000;
0689
0690
0691 while (pattern & 0x80000000)
0692 pattern <<= 1;
0693
0694 pattern >>= 27;
0695
0696 switch (pattern) {
0697 case SPI_RESPONSE_ACCEPTED:
0698 status = 0;
0699 break;
0700 case SPI_RESPONSE_CRC_ERR:
0701
0702 status = -EILSEQ;
0703 break;
0704 case SPI_RESPONSE_WRITE_ERR:
0705
0706
0707
0708 status = -EIO;
0709 break;
0710 default:
0711 status = -EPROTO;
0712 break;
0713 }
0714 if (status != 0) {
0715 dev_dbg(&spi->dev, "write error %02x (%d)\n",
0716 scratch->status[0], status);
0717 return status;
0718 }
0719
0720 t->tx_buf += t->len;
0721 if (host->dma_dev)
0722 t->tx_dma += t->len;
0723
0724
0725
0726
0727 for (i = 4; i < sizeof(scratch->status); i++) {
0728
0729 if (scratch->status[i] & 0x01)
0730 return 0;
0731 }
0732 return mmc_spi_wait_unbusy(host, timeout);
0733 }
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751 static int
0752 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
0753 unsigned long timeout)
0754 {
0755 struct spi_device *spi = host->spi;
0756 int status;
0757 struct scratch *scratch = host->data;
0758 unsigned int bitshift;
0759 u8 leftover;
0760
0761
0762
0763
0764 status = mmc_spi_readbytes(host, 1);
0765 if (status < 0)
0766 return status;
0767 status = scratch->status[0];
0768 if (status == 0xff || status == 0)
0769 status = mmc_spi_readtoken(host, timeout);
0770
0771 if (status < 0) {
0772 dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
0773 return status;
0774 }
0775
0776
0777
0778
0779 bitshift = 7;
0780 while (status & 0x80) {
0781 status <<= 1;
0782 bitshift--;
0783 }
0784 leftover = status << 1;
0785
0786 if (host->dma_dev) {
0787 dma_sync_single_for_device(host->dma_dev,
0788 host->data_dma, sizeof(*scratch),
0789 DMA_BIDIRECTIONAL);
0790 dma_sync_single_for_device(host->dma_dev,
0791 t->rx_dma, t->len,
0792 DMA_FROM_DEVICE);
0793 }
0794
0795 status = spi_sync_locked(spi, &host->m);
0796 if (status < 0) {
0797 dev_dbg(&spi->dev, "read error %d\n", status);
0798 return status;
0799 }
0800
0801 if (host->dma_dev) {
0802 dma_sync_single_for_cpu(host->dma_dev,
0803 host->data_dma, sizeof(*scratch),
0804 DMA_BIDIRECTIONAL);
0805 dma_sync_single_for_cpu(host->dma_dev,
0806 t->rx_dma, t->len,
0807 DMA_FROM_DEVICE);
0808 }
0809
0810 if (bitshift) {
0811
0812
0813
0814 u8 *cp = t->rx_buf;
0815 unsigned int len;
0816 unsigned int bitright = 8 - bitshift;
0817 u8 temp;
0818 for (len = t->len; len; len--) {
0819 temp = *cp;
0820 *cp++ = leftover | (temp >> bitshift);
0821 leftover = temp << bitright;
0822 }
0823 cp = (u8 *) &scratch->crc_val;
0824 temp = *cp;
0825 *cp++ = leftover | (temp >> bitshift);
0826 leftover = temp << bitright;
0827 temp = *cp;
0828 *cp = leftover | (temp >> bitshift);
0829 }
0830
0831 if (host->mmc->use_spi_crc) {
0832 u16 crc = crc_itu_t(0, t->rx_buf, t->len);
0833
0834 be16_to_cpus(&scratch->crc_val);
0835 if (scratch->crc_val != crc) {
0836 dev_dbg(&spi->dev,
0837 "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
0838 scratch->crc_val, crc, t->len);
0839 return -EILSEQ;
0840 }
0841 }
0842
0843 t->rx_buf += t->len;
0844 if (host->dma_dev)
0845 t->rx_dma += t->len;
0846
0847 return 0;
0848 }
0849
0850
0851
0852
0853
0854
0855 static void
0856 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
0857 struct mmc_data *data, u32 blk_size)
0858 {
0859 struct spi_device *spi = host->spi;
0860 struct device *dma_dev = host->dma_dev;
0861 struct spi_transfer *t;
0862 enum dma_data_direction direction = mmc_get_dma_dir(data);
0863 struct scatterlist *sg;
0864 unsigned n_sg;
0865 bool multiple = (data->blocks > 1);
0866 const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
0867 u32 clock_rate;
0868 unsigned long timeout;
0869
0870 mmc_spi_setup_data_message(host, multiple, direction);
0871 t = &host->t;
0872
0873 if (t->speed_hz)
0874 clock_rate = t->speed_hz;
0875 else
0876 clock_rate = spi->max_speed_hz;
0877
0878 timeout = data->timeout_ns / 1000 +
0879 data->timeout_clks * 1000000 / clock_rate;
0880 timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
0881
0882
0883
0884
0885 for_each_sg(data->sg, sg, data->sg_len, n_sg) {
0886 int status = 0;
0887 dma_addr_t dma_addr = 0;
0888 void *kmap_addr;
0889 unsigned length = sg->length;
0890 enum dma_data_direction dir = direction;
0891
0892
0893
0894
0895 if (dma_dev) {
0896
0897 if ((sg->offset != 0 || length != PAGE_SIZE)
0898 && dir == DMA_FROM_DEVICE)
0899 dir = DMA_BIDIRECTIONAL;
0900
0901 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
0902 PAGE_SIZE, dir);
0903 if (dma_mapping_error(dma_dev, dma_addr)) {
0904 data->error = -EFAULT;
0905 break;
0906 }
0907 if (direction == DMA_TO_DEVICE)
0908 t->tx_dma = dma_addr + sg->offset;
0909 else
0910 t->rx_dma = dma_addr + sg->offset;
0911 }
0912
0913
0914 kmap_addr = kmap(sg_page(sg));
0915 if (direction == DMA_TO_DEVICE)
0916 t->tx_buf = kmap_addr + sg->offset;
0917 else
0918 t->rx_buf = kmap_addr + sg->offset;
0919
0920
0921 while (length) {
0922 t->len = min(length, blk_size);
0923
0924 dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
0925
0926 if (direction == DMA_TO_DEVICE)
0927 status = mmc_spi_writeblock(host, t, timeout);
0928 else
0929 status = mmc_spi_readblock(host, t, timeout);
0930 if (status < 0)
0931 break;
0932
0933 data->bytes_xfered += t->len;
0934 length -= t->len;
0935
0936 if (!multiple)
0937 break;
0938 }
0939
0940
0941 if (direction == DMA_FROM_DEVICE)
0942 flush_dcache_page(sg_page(sg));
0943 kunmap(sg_page(sg));
0944 if (dma_dev)
0945 dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
0946
0947 if (status < 0) {
0948 data->error = status;
0949 dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
0950 break;
0951 }
0952 }
0953
0954
0955
0956
0957
0958
0959
0960 if (direction == DMA_TO_DEVICE && multiple) {
0961 struct scratch *scratch = host->data;
0962 int tmp;
0963 const unsigned statlen = sizeof(scratch->status);
0964
0965 dev_dbg(&spi->dev, " STOP_TRAN\n");
0966
0967
0968
0969
0970
0971
0972 INIT_LIST_HEAD(&host->m.transfers);
0973 list_add(&host->early_status.transfer_list,
0974 &host->m.transfers);
0975
0976 memset(scratch->status, 0xff, statlen);
0977 scratch->status[0] = SPI_TOKEN_STOP_TRAN;
0978
0979 host->early_status.tx_buf = host->early_status.rx_buf;
0980 host->early_status.tx_dma = host->early_status.rx_dma;
0981 host->early_status.len = statlen;
0982
0983 if (host->dma_dev)
0984 dma_sync_single_for_device(host->dma_dev,
0985 host->data_dma, sizeof(*scratch),
0986 DMA_BIDIRECTIONAL);
0987
0988 tmp = spi_sync_locked(spi, &host->m);
0989
0990 if (host->dma_dev)
0991 dma_sync_single_for_cpu(host->dma_dev,
0992 host->data_dma, sizeof(*scratch),
0993 DMA_BIDIRECTIONAL);
0994
0995 if (tmp < 0) {
0996 if (!data->error)
0997 data->error = tmp;
0998 return;
0999 }
1000
1001
1002
1003
1004
1005 for (tmp = 2; tmp < statlen; tmp++) {
1006 if (scratch->status[tmp] != 0)
1007 return;
1008 }
1009 tmp = mmc_spi_wait_unbusy(host, timeout);
1010 if (tmp < 0 && !data->error)
1011 data->error = tmp;
1012 }
1013 }
1014
1015
1016
1017
1018
1019
1020
1021 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1022 {
1023 struct mmc_spi_host *host = mmc_priv(mmc);
1024 int status = -EINVAL;
1025 int crc_retry = 5;
1026 struct mmc_command stop;
1027
1028 #ifdef DEBUG
1029
1030 {
1031 struct mmc_command *cmd;
1032 int invalid = 0;
1033
1034 cmd = mrq->cmd;
1035 if (!mmc_spi_resp_type(cmd)) {
1036 dev_dbg(&host->spi->dev, "bogus command\n");
1037 cmd->error = -EINVAL;
1038 invalid = 1;
1039 }
1040
1041 cmd = mrq->stop;
1042 if (cmd && !mmc_spi_resp_type(cmd)) {
1043 dev_dbg(&host->spi->dev, "bogus STOP command\n");
1044 cmd->error = -EINVAL;
1045 invalid = 1;
1046 }
1047
1048 if (invalid) {
1049 dump_stack();
1050 mmc_request_done(host->mmc, mrq);
1051 return;
1052 }
1053 }
1054 #endif
1055
1056
1057 spi_bus_lock(host->spi->master);
1058
1059 crc_recover:
1060
1061 status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1062 if (status == 0 && mrq->data) {
1063 mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1064
1065
1066
1067
1068
1069
1070
1071
1072 if (mrq->data->error == -EILSEQ && crc_retry) {
1073 stop.opcode = MMC_STOP_TRANSMISSION;
1074 stop.arg = 0;
1075 stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1076 status = mmc_spi_command_send(host, mrq, &stop, 0);
1077 crc_retry--;
1078 mrq->data->error = 0;
1079 goto crc_recover;
1080 }
1081
1082 if (mrq->stop)
1083 status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1084 else
1085 mmc_cs_off(host);
1086 }
1087
1088
1089 spi_bus_unlock(host->spi->master);
1090
1091 mmc_request_done(host->mmc, mrq);
1092 }
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1103 {
1104
1105
1106
1107 mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
1108 mmc_spi_readbytes(host, 10);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 host->spi->mode ^= SPI_CS_HIGH;
1130 if (spi_setup(host->spi) != 0) {
1131
1132 dev_warn(&host->spi->dev,
1133 "can't change chip-select polarity\n");
1134 host->spi->mode ^= SPI_CS_HIGH;
1135 } else {
1136 mmc_spi_readbytes(host, 18);
1137
1138 host->spi->mode ^= SPI_CS_HIGH;
1139 if (spi_setup(host->spi) != 0) {
1140
1141 dev_err(&host->spi->dev,
1142 "can't restore chip-select polarity\n");
1143 }
1144 }
1145 }
1146
1147 static char *mmc_powerstring(u8 power_mode)
1148 {
1149 switch (power_mode) {
1150 case MMC_POWER_OFF: return "off";
1151 case MMC_POWER_UP: return "up";
1152 case MMC_POWER_ON: return "on";
1153 }
1154 return "?";
1155 }
1156
1157 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1158 {
1159 struct mmc_spi_host *host = mmc_priv(mmc);
1160
1161 if (host->power_mode != ios->power_mode) {
1162 int canpower;
1163
1164 canpower = host->pdata && host->pdata->setpower;
1165
1166 dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
1167 mmc_powerstring(ios->power_mode),
1168 ios->vdd,
1169 canpower ? ", can switch" : "");
1170
1171
1172
1173
1174 if (canpower) {
1175 switch (ios->power_mode) {
1176 case MMC_POWER_OFF:
1177 case MMC_POWER_UP:
1178 host->pdata->setpower(&host->spi->dev,
1179 ios->vdd);
1180 if (ios->power_mode == MMC_POWER_UP)
1181 msleep(host->powerup_msecs);
1182 }
1183 }
1184
1185
1186 if (ios->power_mode == MMC_POWER_ON)
1187 mmc_spi_initsequence(host);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1199 int mres;
1200 u8 nullbyte = 0;
1201
1202 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1203 mres = spi_setup(host->spi);
1204 if (mres < 0)
1205 dev_dbg(&host->spi->dev,
1206 "switch to SPI mode 0 failed\n");
1207
1208 if (spi_write(host->spi, &nullbyte, 1) < 0)
1209 dev_dbg(&host->spi->dev,
1210 "put spi signals to low failed\n");
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 msleep(10);
1222 if (mres == 0) {
1223 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1224 mres = spi_setup(host->spi);
1225 if (mres < 0)
1226 dev_dbg(&host->spi->dev,
1227 "switch back to SPI mode 3 failed\n");
1228 }
1229 }
1230
1231 host->power_mode = ios->power_mode;
1232 }
1233
1234 if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1235 int status;
1236
1237 host->spi->max_speed_hz = ios->clock;
1238 status = spi_setup(host->spi);
1239 dev_dbg(&host->spi->dev, " clock to %d Hz, %d\n",
1240 host->spi->max_speed_hz, status);
1241 }
1242 }
1243
1244 static const struct mmc_host_ops mmc_spi_ops = {
1245 .request = mmc_spi_request,
1246 .set_ios = mmc_spi_set_ios,
1247 .get_ro = mmc_gpio_get_ro,
1248 .get_cd = mmc_gpio_get_cd,
1249 };
1250
1251
1252
1253
1254
1255
1256
1257
1258 static irqreturn_t
1259 mmc_spi_detect_irq(int irq, void *mmc)
1260 {
1261 struct mmc_spi_host *host = mmc_priv(mmc);
1262 u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1263
1264 mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1265 return IRQ_HANDLED;
1266 }
1267
1268 #ifdef CONFIG_HAS_DMA
1269 static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
1270 {
1271 struct spi_device *spi = host->spi;
1272 struct device *dev;
1273
1274 if (!spi->master->dev.parent->dma_mask)
1275 return 0;
1276
1277 dev = spi->master->dev.parent;
1278
1279 host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
1280 DMA_TO_DEVICE);
1281 if (dma_mapping_error(dev, host->ones_dma))
1282 return -ENOMEM;
1283
1284 host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
1285 DMA_BIDIRECTIONAL);
1286 if (dma_mapping_error(dev, host->data_dma)) {
1287 dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1288 DMA_TO_DEVICE);
1289 return -ENOMEM;
1290 }
1291
1292 dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
1293 DMA_BIDIRECTIONAL);
1294
1295 host->dma_dev = dev;
1296 return 0;
1297 }
1298
1299 static void mmc_spi_dma_free(struct mmc_spi_host *host)
1300 {
1301 if (!host->dma_dev)
1302 return;
1303
1304 dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1305 DMA_TO_DEVICE);
1306 dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
1307 DMA_BIDIRECTIONAL);
1308 }
1309 #else
1310 static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
1311 static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
1312 #endif
1313
1314 static int mmc_spi_probe(struct spi_device *spi)
1315 {
1316 void *ones;
1317 struct mmc_host *mmc;
1318 struct mmc_spi_host *host;
1319 int status;
1320 bool has_ro = false;
1321
1322
1323
1324
1325 if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1326 return -EINVAL;
1327
1328
1329
1330
1331
1332
1333
1334 if (spi->mode != SPI_MODE_3)
1335 spi->mode = SPI_MODE_0;
1336 spi->bits_per_word = 8;
1337
1338 status = spi_setup(spi);
1339 if (status < 0) {
1340 dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1341 spi->mode, spi->max_speed_hz / 1000,
1342 status);
1343 return status;
1344 }
1345
1346
1347
1348
1349
1350
1351
1352 status = -ENOMEM;
1353 ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1354 if (!ones)
1355 goto nomem;
1356 memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1357
1358 mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1359 if (!mmc)
1360 goto nomem;
1361
1362 mmc->ops = &mmc_spi_ops;
1363 mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1364 mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1365 mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1366 mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1367
1368 mmc->caps = MMC_CAP_SPI;
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 mmc->f_min = 400000;
1379 mmc->f_max = spi->max_speed_hz;
1380
1381 host = mmc_priv(mmc);
1382 host->mmc = mmc;
1383 host->spi = spi;
1384
1385 host->ones = ones;
1386
1387 dev_set_drvdata(&spi->dev, mmc);
1388
1389
1390
1391
1392 host->pdata = mmc_spi_get_pdata(spi);
1393 if (host->pdata)
1394 mmc->ocr_avail = host->pdata->ocr_mask;
1395 if (!mmc->ocr_avail) {
1396 dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1397 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1398 }
1399 if (host->pdata && host->pdata->setpower) {
1400 host->powerup_msecs = host->pdata->powerup_msecs;
1401 if (!host->powerup_msecs || host->powerup_msecs > 250)
1402 host->powerup_msecs = 250;
1403 }
1404
1405
1406 host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1407 if (!host->data)
1408 goto fail_nobuf1;
1409
1410 status = mmc_spi_dma_alloc(host);
1411 if (status)
1412 goto fail_dma;
1413
1414
1415 spi_message_init(&host->readback);
1416 host->readback.is_dma_mapped = (host->dma_dev != NULL);
1417
1418 spi_message_add_tail(&host->status, &host->readback);
1419 host->status.tx_buf = host->ones;
1420 host->status.tx_dma = host->ones_dma;
1421 host->status.rx_buf = &host->data->status;
1422 host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1423 host->status.cs_change = 1;
1424
1425
1426 if (host->pdata && host->pdata->init) {
1427 status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1428 if (status != 0)
1429 goto fail_glue_init;
1430 }
1431
1432
1433 if (host->pdata) {
1434 mmc->caps |= host->pdata->caps;
1435 mmc->caps2 |= host->pdata->caps2;
1436 }
1437
1438 status = mmc_add_host(mmc);
1439 if (status != 0)
1440 goto fail_add_host;
1441
1442
1443
1444
1445
1446 status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
1447 if (status == -EPROBE_DEFER)
1448 goto fail_add_host;
1449 if (!status) {
1450
1451
1452
1453
1454
1455 mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1456 mmc_gpiod_request_cd_irq(mmc);
1457 }
1458 mmc_detect_change(mmc, 0);
1459
1460
1461 status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
1462 if (status == -EPROBE_DEFER)
1463 goto fail_add_host;
1464 if (!status)
1465 has_ro = true;
1466
1467 dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1468 dev_name(&mmc->class_dev),
1469 host->dma_dev ? "" : ", no DMA",
1470 has_ro ? "" : ", no WP",
1471 (host->pdata && host->pdata->setpower)
1472 ? "" : ", no poweroff",
1473 (mmc->caps & MMC_CAP_NEEDS_POLL)
1474 ? ", cd polling" : "");
1475 return 0;
1476
1477 fail_add_host:
1478 mmc_remove_host(mmc);
1479 fail_glue_init:
1480 mmc_spi_dma_free(host);
1481 fail_dma:
1482 kfree(host->data);
1483 fail_nobuf1:
1484 mmc_spi_put_pdata(spi);
1485 mmc_free_host(mmc);
1486 nomem:
1487 kfree(ones);
1488 return status;
1489 }
1490
1491
1492 static void mmc_spi_remove(struct spi_device *spi)
1493 {
1494 struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
1495 struct mmc_spi_host *host = mmc_priv(mmc);
1496
1497
1498 if (host->pdata && host->pdata->exit)
1499 host->pdata->exit(&spi->dev, mmc);
1500
1501 mmc_remove_host(mmc);
1502
1503 mmc_spi_dma_free(host);
1504 kfree(host->data);
1505 kfree(host->ones);
1506
1507 spi->max_speed_hz = mmc->f_max;
1508 mmc_spi_put_pdata(spi);
1509 mmc_free_host(mmc);
1510 }
1511
1512 static const struct spi_device_id mmc_spi_dev_ids[] = {
1513 { "mmc-spi-slot"},
1514 { },
1515 };
1516 MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
1517
1518 static const struct of_device_id mmc_spi_of_match_table[] = {
1519 { .compatible = "mmc-spi-slot", },
1520 {},
1521 };
1522 MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1523
1524 static struct spi_driver mmc_spi_driver = {
1525 .driver = {
1526 .name = "mmc_spi",
1527 .of_match_table = mmc_spi_of_match_table,
1528 },
1529 .id_table = mmc_spi_dev_ids,
1530 .probe = mmc_spi_probe,
1531 .remove = mmc_spi_remove,
1532 };
1533
1534 module_spi_driver(mmc_spi_driver);
1535
1536 MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
1537 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1538 MODULE_LICENSE("GPL");
1539 MODULE_ALIAS("spi:mmc_spi");