Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Access SD/MMC cards through SPI master controllers
0004  *
0005  * (C) Copyright 2005, Intec Automation,
0006  *      Mike Lavender (mike@steroidmicros)
0007  * (C) Copyright 2006-2007, David Brownell
0008  * (C) Copyright 2007, Axis Communications,
0009  *      Hans-Peter Nilsson (hp@axis.com)
0010  * (C) Copyright 2007, ATRON electronic GmbH,
0011  *      Jan Nikitenko <jan.nikitenko@gmail.com>
0012  */
0013 #include <linux/sched.h>
0014 #include <linux/delay.h>
0015 #include <linux/slab.h>
0016 #include <linux/module.h>
0017 #include <linux/bio.h>
0018 #include <linux/dma-mapping.h>
0019 #include <linux/crc7.h>
0020 #include <linux/crc-itu-t.h>
0021 #include <linux/scatterlist.h>
0022 
0023 #include <linux/mmc/host.h>
0024 #include <linux/mmc/mmc.h>      /* for R1_SPI_* bit values */
0025 #include <linux/mmc/slot-gpio.h>
0026 
0027 #include <linux/spi/spi.h>
0028 #include <linux/spi/mmc_spi.h>
0029 
0030 #include <asm/unaligned.h>
0031 
0032 
0033 /* NOTES:
0034  *
0035  * - For now, we won't try to interoperate with a real mmc/sd/sdio
0036  *   controller, although some of them do have hardware support for
0037  *   SPI protocol.  The main reason for such configs would be mmc-ish
0038  *   cards like DataFlash, which don't support that "native" protocol.
0039  *
0040  *   We don't have a "DataFlash/MMC/SD/SDIO card slot" abstraction to
0041  *   switch between driver stacks, and in any case if "native" mode
0042  *   is available, it will be faster and hence preferable.
0043  *
0044  * - MMC depends on a different chipselect management policy than the
0045  *   SPI interface currently supports for shared bus segments:  it needs
0046  *   to issue multiple spi_message requests with the chipselect active,
0047  *   using the results of one message to decide the next one to issue.
0048  *
0049  *   Pending updates to the programming interface, this driver expects
0050  *   that it not share the bus with other drivers (precluding conflicts).
0051  *
0052  * - We tell the controller to keep the chipselect active from the
0053  *   beginning of an mmc_host_ops.request until the end.  So beware
0054  *   of SPI controller drivers that mis-handle the cs_change flag!
0055  *
0056  *   However, many cards seem OK with chipselect flapping up/down
0057  *   during that time ... at least on unshared bus segments.
0058  */
0059 
0060 
0061 /*
0062  * Local protocol constants, internal to data block protocols.
0063  */
0064 
0065 /* Response tokens used to ack each block written: */
0066 #define SPI_MMC_RESPONSE_CODE(x)    ((x) & 0x1f)
0067 #define SPI_RESPONSE_ACCEPTED       ((2 << 1)|1)
0068 #define SPI_RESPONSE_CRC_ERR        ((5 << 1)|1)
0069 #define SPI_RESPONSE_WRITE_ERR      ((6 << 1)|1)
0070 
0071 /* Read and write blocks start with these tokens and end with crc;
0072  * on error, read tokens act like a subset of R2_SPI_* values.
0073  */
0074 #define SPI_TOKEN_SINGLE    0xfe    /* single block r/w, multiblock read */
0075 #define SPI_TOKEN_MULTI_WRITE   0xfc    /* multiblock write */
0076 #define SPI_TOKEN_STOP_TRAN 0xfd    /* terminate multiblock write */
0077 
0078 #define MMC_SPI_BLOCKSIZE   512
0079 
0080 #define MMC_SPI_R1B_TIMEOUT_MS  3000
0081 #define MMC_SPI_INIT_TIMEOUT_MS 3000
0082 
0083 /* One of the critical speed parameters is the amount of data which may
0084  * be transferred in one command. If this value is too low, the SD card
0085  * controller has to do multiple partial block writes (argggh!). With
0086  * today (2008) SD cards there is little speed gain if we transfer more
0087  * than 64 KBytes at a time. So use this value until there is any indication
0088  * that we should do more here.
0089  */
0090 #define MMC_SPI_BLOCKSATONCE    128
0091 
0092 /****************************************************************************/
0093 
0094 /*
0095  * Local Data Structures
0096  */
0097 
0098 /* "scratch" is per-{command,block} data exchanged with the card */
0099 struct scratch {
0100     u8          status[29];
0101     u8          data_token;
0102     __be16          crc_val;
0103 };
0104 
0105 struct mmc_spi_host {
0106     struct mmc_host     *mmc;
0107     struct spi_device   *spi;
0108 
0109     unsigned char       power_mode;
0110     u16         powerup_msecs;
0111 
0112     struct mmc_spi_platform_data    *pdata;
0113 
0114     /* for bulk data transfers */
0115     struct spi_transfer token, t, crc, early_status;
0116     struct spi_message  m;
0117 
0118     /* for status readback */
0119     struct spi_transfer status;
0120     struct spi_message  readback;
0121 
0122     /* underlying DMA-aware controller, or null */
0123     struct device       *dma_dev;
0124 
0125     /* buffer used for commands and for message "overhead" */
0126     struct scratch      *data;
0127     dma_addr_t      data_dma;
0128 
0129     /* Specs say to write ones most of the time, even when the card
0130      * has no need to read its input data; and many cards won't care.
0131      * This is our source of those ones.
0132      */
0133     void            *ones;
0134     dma_addr_t      ones_dma;
0135 };
0136 
0137 
0138 /****************************************************************************/
0139 
0140 /*
0141  * MMC-over-SPI protocol glue, used by the MMC stack interface
0142  */
0143 
0144 static inline int mmc_cs_off(struct mmc_spi_host *host)
0145 {
0146     /* chipselect will always be inactive after setup() */
0147     return spi_setup(host->spi);
0148 }
0149 
0150 static int
0151 mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len)
0152 {
0153     int status;
0154 
0155     if (len > sizeof(*host->data)) {
0156         WARN_ON(1);
0157         return -EIO;
0158     }
0159 
0160     host->status.len = len;
0161 
0162     if (host->dma_dev)
0163         dma_sync_single_for_device(host->dma_dev,
0164                 host->data_dma, sizeof(*host->data),
0165                 DMA_FROM_DEVICE);
0166 
0167     status = spi_sync_locked(host->spi, &host->readback);
0168 
0169     if (host->dma_dev)
0170         dma_sync_single_for_cpu(host->dma_dev,
0171                 host->data_dma, sizeof(*host->data),
0172                 DMA_FROM_DEVICE);
0173 
0174     return status;
0175 }
0176 
0177 static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
0178             unsigned n, u8 byte)
0179 {
0180     u8 *cp = host->data->status;
0181     unsigned long start = jiffies;
0182 
0183     do {
0184         int     status;
0185         unsigned    i;
0186 
0187         status = mmc_spi_readbytes(host, n);
0188         if (status < 0)
0189             return status;
0190 
0191         for (i = 0; i < n; i++) {
0192             if (cp[i] != byte)
0193                 return cp[i];
0194         }
0195 
0196         /* If we need long timeouts, we may release the CPU */
0197         cond_resched();
0198     } while (time_is_after_jiffies(start + timeout));
0199     return -ETIMEDOUT;
0200 }
0201 
0202 static inline int
0203 mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout)
0204 {
0205     return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0);
0206 }
0207 
0208 static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout)
0209 {
0210     return mmc_spi_skip(host, timeout, 1, 0xff);
0211 }
0212 
0213 
0214 /*
0215  * Note that for SPI, cmd->resp[0] is not the same data as "native" protocol
0216  * hosts return!  The low byte holds R1_SPI bits.  The next byte may hold
0217  * R2_SPI bits ... for SEND_STATUS, or after data read errors.
0218  *
0219  * cmd->resp[1] holds any four-byte response, for R3 (READ_OCR) and on
0220  * newer cards R7 (IF_COND).
0221  */
0222 
0223 static char *maptype(struct mmc_command *cmd)
0224 {
0225     switch (mmc_spi_resp_type(cmd)) {
0226     case MMC_RSP_SPI_R1:    return "R1";
0227     case MMC_RSP_SPI_R1B:   return "R1B";
0228     case MMC_RSP_SPI_R2:    return "R2/R5";
0229     case MMC_RSP_SPI_R3:    return "R3/R4/R7";
0230     default:        return "?";
0231     }
0232 }
0233 
0234 /* return zero, else negative errno after setting cmd->error */
0235 static int mmc_spi_response_get(struct mmc_spi_host *host,
0236         struct mmc_command *cmd, int cs_on)
0237 {
0238     unsigned long timeout_ms;
0239     u8  *cp = host->data->status;
0240     u8  *end = cp + host->t.len;
0241     int value = 0;
0242     int bitshift;
0243     u8  leftover = 0;
0244     unsigned short rotator;
0245     int     i;
0246     char    tag[32];
0247 
0248     snprintf(tag, sizeof(tag), "  ... CMD%d response SPI_%s",
0249         cmd->opcode, maptype(cmd));
0250 
0251     /* Except for data block reads, the whole response will already
0252      * be stored in the scratch buffer.  It's somewhere after the
0253      * command and the first byte we read after it.  We ignore that
0254      * first byte.  After STOP_TRANSMISSION command it may include
0255      * two data bits, but otherwise it's all ones.
0256      */
0257     cp += 8;
0258     while (cp < end && *cp == 0xff)
0259         cp++;
0260 
0261     /* Data block reads (R1 response types) may need more data... */
0262     if (cp == end) {
0263         cp = host->data->status;
0264         end = cp+1;
0265 
0266         /* Card sends N(CR) (== 1..8) bytes of all-ones then one
0267          * status byte ... and we already scanned 2 bytes.
0268          *
0269          * REVISIT block read paths use nasty byte-at-a-time I/O
0270          * so it can always DMA directly into the target buffer.
0271          * It'd probably be better to memcpy() the first chunk and
0272          * avoid extra i/o calls...
0273          *
0274          * Note we check for more than 8 bytes, because in practice,
0275          * some SD cards are slow...
0276          */
0277         for (i = 2; i < 16; i++) {
0278             value = mmc_spi_readbytes(host, 1);
0279             if (value < 0)
0280                 goto done;
0281             if (*cp != 0xff)
0282                 goto checkstatus;
0283         }
0284         value = -ETIMEDOUT;
0285         goto done;
0286     }
0287 
0288 checkstatus:
0289     bitshift = 0;
0290     if (*cp & 0x80) {
0291         /* Houston, we have an ugly card with a bit-shifted response */
0292         rotator = *cp++ << 8;
0293         /* read the next byte */
0294         if (cp == end) {
0295             value = mmc_spi_readbytes(host, 1);
0296             if (value < 0)
0297                 goto done;
0298             cp = host->data->status;
0299             end = cp+1;
0300         }
0301         rotator |= *cp++;
0302         while (rotator & 0x8000) {
0303             bitshift++;
0304             rotator <<= 1;
0305         }
0306         cmd->resp[0] = rotator >> 8;
0307         leftover = rotator;
0308     } else {
0309         cmd->resp[0] = *cp++;
0310     }
0311     cmd->error = 0;
0312 
0313     /* Status byte: the entire seven-bit R1 response.  */
0314     if (cmd->resp[0] != 0) {
0315         if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS)
0316                 & cmd->resp[0])
0317             value = -EFAULT; /* Bad address */
0318         else if (R1_SPI_ILLEGAL_COMMAND & cmd->resp[0])
0319             value = -ENOSYS; /* Function not implemented */
0320         else if (R1_SPI_COM_CRC & cmd->resp[0])
0321             value = -EILSEQ; /* Illegal byte sequence */
0322         else if ((R1_SPI_ERASE_SEQ | R1_SPI_ERASE_RESET)
0323                 & cmd->resp[0])
0324             value = -EIO;    /* I/O error */
0325         /* else R1_SPI_IDLE, "it's resetting" */
0326     }
0327 
0328     switch (mmc_spi_resp_type(cmd)) {
0329 
0330     /* SPI R1B == R1 + busy; STOP_TRANSMISSION (for multiblock reads)
0331      * and less-common stuff like various erase operations.
0332      */
0333     case MMC_RSP_SPI_R1B:
0334         /* maybe we read all the busy tokens already */
0335         while (cp < end && *cp == 0)
0336             cp++;
0337         if (cp == end) {
0338             timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
0339                 MMC_SPI_R1B_TIMEOUT_MS;
0340             mmc_spi_wait_unbusy(host, msecs_to_jiffies(timeout_ms));
0341         }
0342         break;
0343 
0344     /* SPI R2 == R1 + second status byte; SEND_STATUS
0345      * SPI R5 == R1 + data byte; IO_RW_DIRECT
0346      */
0347     case MMC_RSP_SPI_R2:
0348         /* read the next byte */
0349         if (cp == end) {
0350             value = mmc_spi_readbytes(host, 1);
0351             if (value < 0)
0352                 goto done;
0353             cp = host->data->status;
0354             end = cp+1;
0355         }
0356         if (bitshift) {
0357             rotator = leftover << 8;
0358             rotator |= *cp << bitshift;
0359             cmd->resp[0] |= (rotator & 0xFF00);
0360         } else {
0361             cmd->resp[0] |= *cp << 8;
0362         }
0363         break;
0364 
0365     /* SPI R3, R4, or R7 == R1 + 4 bytes */
0366     case MMC_RSP_SPI_R3:
0367         rotator = leftover << 8;
0368         cmd->resp[1] = 0;
0369         for (i = 0; i < 4; i++) {
0370             cmd->resp[1] <<= 8;
0371             /* read the next byte */
0372             if (cp == end) {
0373                 value = mmc_spi_readbytes(host, 1);
0374                 if (value < 0)
0375                     goto done;
0376                 cp = host->data->status;
0377                 end = cp+1;
0378             }
0379             if (bitshift) {
0380                 rotator |= *cp++ << bitshift;
0381                 cmd->resp[1] |= (rotator >> 8);
0382                 rotator <<= 8;
0383             } else {
0384                 cmd->resp[1] |= *cp++;
0385             }
0386         }
0387         break;
0388 
0389     /* SPI R1 == just one status byte */
0390     case MMC_RSP_SPI_R1:
0391         break;
0392 
0393     default:
0394         dev_dbg(&host->spi->dev, "bad response type %04x\n",
0395             mmc_spi_resp_type(cmd));
0396         if (value >= 0)
0397             value = -EINVAL;
0398         goto done;
0399     }
0400 
0401     if (value < 0)
0402         dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n",
0403             tag, cmd->resp[0], cmd->resp[1]);
0404 
0405     /* disable chipselect on errors and some success cases */
0406     if (value >= 0 && cs_on)
0407         return value;
0408 done:
0409     if (value < 0)
0410         cmd->error = value;
0411     mmc_cs_off(host);
0412     return value;
0413 }
0414 
0415 /* Issue command and read its response.
0416  * Returns zero on success, negative for error.
0417  *
0418  * On error, caller must cope with mmc core retry mechanism.  That
0419  * means immediate low-level resubmit, which affects the bus lock...
0420  */
0421 static int
0422 mmc_spi_command_send(struct mmc_spi_host *host,
0423         struct mmc_request *mrq,
0424         struct mmc_command *cmd, int cs_on)
0425 {
0426     struct scratch      *data = host->data;
0427     u8          *cp = data->status;
0428     int         status;
0429     struct spi_transfer *t;
0430 
0431     /* We can handle most commands (except block reads) in one full
0432      * duplex I/O operation before either starting the next transfer
0433      * (data block or command) or else deselecting the card.
0434      *
0435      * First, write 7 bytes:
0436      *  - an all-ones byte to ensure the card is ready
0437      *  - opcode byte (plus start and transmission bits)
0438      *  - four bytes of big-endian argument
0439      *  - crc7 (plus end bit) ... always computed, it's cheap
0440      *
0441      * We init the whole buffer to all-ones, which is what we need
0442      * to write while we're reading (later) response data.
0443      */
0444     memset(cp, 0xff, sizeof(data->status));
0445 
0446     cp[1] = 0x40 | cmd->opcode;
0447     put_unaligned_be32(cmd->arg, cp + 2);
0448     cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
0449     cp += 7;
0450 
0451     /* Then, read up to 13 bytes (while writing all-ones):
0452      *  - N(CR) (== 1..8) bytes of all-ones
0453      *  - status byte (for all response types)
0454      *  - the rest of the response, either:
0455      *      + nothing, for R1 or R1B responses
0456      *  + second status byte, for R2 responses
0457      *  + four data bytes, for R3 and R7 responses
0458      *
0459      * Finally, read some more bytes ... in the nice cases we know in
0460      * advance how many, and reading 1 more is always OK:
0461      *  - N(EC) (== 0..N) bytes of all-ones, before deselect/finish
0462      *  - N(RC) (== 1..N) bytes of all-ones, before next command
0463      *  - N(WR) (== 1..N) bytes of all-ones, before data write
0464      *
0465      * So in those cases one full duplex I/O of at most 21 bytes will
0466      * handle the whole command, leaving the card ready to receive a
0467      * data block or new command.  We do that whenever we can, shaving
0468      * CPU and IRQ costs (especially when using DMA or FIFOs).
0469      *
0470      * There are two other cases, where it's not generally practical
0471      * to rely on a single I/O:
0472      *
0473      *  - R1B responses need at least N(EC) bytes of all-zeroes.
0474      *
0475      *    In this case we can *try* to fit it into one I/O, then
0476      *    maybe read more data later.
0477      *
0478      *  - Data block reads are more troublesome, since a variable
0479      *    number of padding bytes precede the token and data.
0480      *      + N(CX) (== 0..8) bytes of all-ones, before CSD or CID
0481      *      + N(AC) (== 1..many) bytes of all-ones
0482      *
0483      *    In this case we currently only have minimal speedups here:
0484      *    when N(CR) == 1 we can avoid I/O in response_get().
0485      */
0486     if (cs_on && (mrq->data->flags & MMC_DATA_READ)) {
0487         cp += 2;    /* min(N(CR)) + status */
0488         /* R1 */
0489     } else {
0490         cp += 10;   /* max(N(CR)) + status + min(N(RC),N(WR)) */
0491         if (cmd->flags & MMC_RSP_SPI_S2)    /* R2/R5 */
0492             cp++;
0493         else if (cmd->flags & MMC_RSP_SPI_B4)   /* R3/R4/R7 */
0494             cp += 4;
0495         else if (cmd->flags & MMC_RSP_BUSY) /* R1B */
0496             cp = data->status + sizeof(data->status);
0497         /* else:  R1 (most commands) */
0498     }
0499 
0500     dev_dbg(&host->spi->dev, "  CMD%d, resp %s\n",
0501         cmd->opcode, maptype(cmd));
0502 
0503     /* send command, leaving chipselect active */
0504     spi_message_init(&host->m);
0505 
0506     t = &host->t;
0507     memset(t, 0, sizeof(*t));
0508     t->tx_buf = t->rx_buf = data->status;
0509     t->tx_dma = t->rx_dma = host->data_dma;
0510     t->len = cp - data->status;
0511     t->cs_change = 1;
0512     spi_message_add_tail(t, &host->m);
0513 
0514     if (host->dma_dev) {
0515         host->m.is_dma_mapped = 1;
0516         dma_sync_single_for_device(host->dma_dev,
0517                 host->data_dma, sizeof(*host->data),
0518                 DMA_BIDIRECTIONAL);
0519     }
0520     status = spi_sync_locked(host->spi, &host->m);
0521 
0522     if (host->dma_dev)
0523         dma_sync_single_for_cpu(host->dma_dev,
0524                 host->data_dma, sizeof(*host->data),
0525                 DMA_BIDIRECTIONAL);
0526     if (status < 0) {
0527         dev_dbg(&host->spi->dev, "  ... write returned %d\n", status);
0528         cmd->error = status;
0529         return status;
0530     }
0531 
0532     /* after no-data commands and STOP_TRANSMISSION, chipselect off */
0533     return mmc_spi_response_get(host, cmd, cs_on);
0534 }
0535 
0536 /* Build data message with up to four separate transfers.  For TX, we
0537  * start by writing the data token.  And in most cases, we finish with
0538  * a status transfer.
0539  *
0540  * We always provide TX data for data and CRC.  The MMC/SD protocol
0541  * requires us to write ones; but Linux defaults to writing zeroes;
0542  * so we explicitly initialize it to all ones on RX paths.
0543  *
0544  * We also handle DMA mapping, so the underlying SPI controller does
0545  * not need to (re)do it for each message.
0546  */
0547 static void
0548 mmc_spi_setup_data_message(
0549     struct mmc_spi_host *host,
0550     bool            multiple,
0551     enum dma_data_direction direction)
0552 {
0553     struct spi_transfer *t;
0554     struct scratch      *scratch = host->data;
0555     dma_addr_t      dma = host->data_dma;
0556 
0557     spi_message_init(&host->m);
0558     if (dma)
0559         host->m.is_dma_mapped = 1;
0560 
0561     /* for reads, readblock() skips 0xff bytes before finding
0562      * the token; for writes, this transfer issues that token.
0563      */
0564     if (direction == DMA_TO_DEVICE) {
0565         t = &host->token;
0566         memset(t, 0, sizeof(*t));
0567         t->len = 1;
0568         if (multiple)
0569             scratch->data_token = SPI_TOKEN_MULTI_WRITE;
0570         else
0571             scratch->data_token = SPI_TOKEN_SINGLE;
0572         t->tx_buf = &scratch->data_token;
0573         if (dma)
0574             t->tx_dma = dma + offsetof(struct scratch, data_token);
0575         spi_message_add_tail(t, &host->m);
0576     }
0577 
0578     /* Body of transfer is buffer, then CRC ...
0579      * either TX-only, or RX with TX-ones.
0580      */
0581     t = &host->t;
0582     memset(t, 0, sizeof(*t));
0583     t->tx_buf = host->ones;
0584     t->tx_dma = host->ones_dma;
0585     /* length and actual buffer info are written later */
0586     spi_message_add_tail(t, &host->m);
0587 
0588     t = &host->crc;
0589     memset(t, 0, sizeof(*t));
0590     t->len = 2;
0591     if (direction == DMA_TO_DEVICE) {
0592         /* the actual CRC may get written later */
0593         t->tx_buf = &scratch->crc_val;
0594         if (dma)
0595             t->tx_dma = dma + offsetof(struct scratch, crc_val);
0596     } else {
0597         t->tx_buf = host->ones;
0598         t->tx_dma = host->ones_dma;
0599         t->rx_buf = &scratch->crc_val;
0600         if (dma)
0601             t->rx_dma = dma + offsetof(struct scratch, crc_val);
0602     }
0603     spi_message_add_tail(t, &host->m);
0604 
0605     /*
0606      * A single block read is followed by N(EC) [0+] all-ones bytes
0607      * before deselect ... don't bother.
0608      *
0609      * Multiblock reads are followed by N(AC) [1+] all-ones bytes before
0610      * the next block is read, or a STOP_TRANSMISSION is issued.  We'll
0611      * collect that single byte, so readblock() doesn't need to.
0612      *
0613      * For a write, the one-byte data response follows immediately, then
0614      * come zero or more busy bytes, then N(WR) [1+] all-ones bytes.
0615      * Then single block reads may deselect, and multiblock ones issue
0616      * the next token (next data block, or STOP_TRAN).  We can try to
0617      * minimize I/O ops by using a single read to collect end-of-busy.
0618      */
0619     if (multiple || direction == DMA_TO_DEVICE) {
0620         t = &host->early_status;
0621         memset(t, 0, sizeof(*t));
0622         t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
0623         t->tx_buf = host->ones;
0624         t->tx_dma = host->ones_dma;
0625         t->rx_buf = scratch->status;
0626         if (dma)
0627             t->rx_dma = dma + offsetof(struct scratch, status);
0628         t->cs_change = 1;
0629         spi_message_add_tail(t, &host->m);
0630     }
0631 }
0632 
0633 /*
0634  * Write one block:
0635  *  - caller handled preceding N(WR) [1+] all-ones bytes
0636  *  - data block
0637  *  + token
0638  *  + data bytes
0639  *  + crc16
0640  *  - an all-ones byte ... card writes a data-response byte
0641  *  - followed by N(EC) [0+] all-ones bytes, card writes zero/'busy'
0642  *
0643  * Return negative errno, else success.
0644  */
0645 static int
0646 mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
0647     unsigned long timeout)
0648 {
0649     struct spi_device   *spi = host->spi;
0650     int         status, i;
0651     struct scratch      *scratch = host->data;
0652     u32         pattern;
0653 
0654     if (host->mmc->use_spi_crc)
0655         scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
0656     if (host->dma_dev)
0657         dma_sync_single_for_device(host->dma_dev,
0658                 host->data_dma, sizeof(*scratch),
0659                 DMA_BIDIRECTIONAL);
0660 
0661     status = spi_sync_locked(spi, &host->m);
0662 
0663     if (status != 0) {
0664         dev_dbg(&spi->dev, "write error (%d)\n", status);
0665         return status;
0666     }
0667 
0668     if (host->dma_dev)
0669         dma_sync_single_for_cpu(host->dma_dev,
0670                 host->data_dma, sizeof(*scratch),
0671                 DMA_BIDIRECTIONAL);
0672 
0673     /*
0674      * Get the transmission data-response reply.  It must follow
0675      * immediately after the data block we transferred.  This reply
0676      * doesn't necessarily tell whether the write operation succeeded;
0677      * it just says if the transmission was ok and whether *earlier*
0678      * writes succeeded; see the standard.
0679      *
0680      * In practice, there are (even modern SDHC-)cards which are late
0681      * in sending the response, and miss the time frame by a few bits,
0682      * so we have to cope with this situation and check the response
0683      * bit-by-bit. Arggh!!!
0684      */
0685     pattern = get_unaligned_be32(scratch->status);
0686 
0687     /* First 3 bit of pattern are undefined */
0688     pattern |= 0xE0000000;
0689 
0690     /* left-adjust to leading 0 bit */
0691     while (pattern & 0x80000000)
0692         pattern <<= 1;
0693     /* right-adjust for pattern matching. Code is in bit 4..0 now. */
0694     pattern >>= 27;
0695 
0696     switch (pattern) {
0697     case SPI_RESPONSE_ACCEPTED:
0698         status = 0;
0699         break;
0700     case SPI_RESPONSE_CRC_ERR:
0701         /* host shall then issue MMC_STOP_TRANSMISSION */
0702         status = -EILSEQ;
0703         break;
0704     case SPI_RESPONSE_WRITE_ERR:
0705         /* host shall then issue MMC_STOP_TRANSMISSION,
0706          * and should MMC_SEND_STATUS to sort it out
0707          */
0708         status = -EIO;
0709         break;
0710     default:
0711         status = -EPROTO;
0712         break;
0713     }
0714     if (status != 0) {
0715         dev_dbg(&spi->dev, "write error %02x (%d)\n",
0716             scratch->status[0], status);
0717         return status;
0718     }
0719 
0720     t->tx_buf += t->len;
0721     if (host->dma_dev)
0722         t->tx_dma += t->len;
0723 
0724     /* Return when not busy.  If we didn't collect that status yet,
0725      * we'll need some more I/O.
0726      */
0727     for (i = 4; i < sizeof(scratch->status); i++) {
0728         /* card is non-busy if the most recent bit is 1 */
0729         if (scratch->status[i] & 0x01)
0730             return 0;
0731     }
0732     return mmc_spi_wait_unbusy(host, timeout);
0733 }
0734 
0735 /*
0736  * Read one block:
0737  *  - skip leading all-ones bytes ... either
0738  *      + N(AC) [1..f(clock,CSD)] usually, else
0739  *      + N(CX) [0..8] when reading CSD or CID
0740  *  - data block
0741  *  + token ... if error token, no data or crc
0742  *  + data bytes
0743  *  + crc16
0744  *
0745  * After single block reads, we're done; N(EC) [0+] all-ones bytes follow
0746  * before dropping chipselect.
0747  *
0748  * For multiblock reads, caller either reads the next block or issues a
0749  * STOP_TRANSMISSION command.
0750  */
0751 static int
0752 mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
0753     unsigned long timeout)
0754 {
0755     struct spi_device   *spi = host->spi;
0756     int         status;
0757     struct scratch      *scratch = host->data;
0758     unsigned int        bitshift;
0759     u8          leftover;
0760 
0761     /* At least one SD card sends an all-zeroes byte when N(CX)
0762      * applies, before the all-ones bytes ... just cope with that.
0763      */
0764     status = mmc_spi_readbytes(host, 1);
0765     if (status < 0)
0766         return status;
0767     status = scratch->status[0];
0768     if (status == 0xff || status == 0)
0769         status = mmc_spi_readtoken(host, timeout);
0770 
0771     if (status < 0) {
0772         dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status);
0773         return status;
0774     }
0775 
0776     /* The token may be bit-shifted...
0777      * the first 0-bit precedes the data stream.
0778      */
0779     bitshift = 7;
0780     while (status & 0x80) {
0781         status <<= 1;
0782         bitshift--;
0783     }
0784     leftover = status << 1;
0785 
0786     if (host->dma_dev) {
0787         dma_sync_single_for_device(host->dma_dev,
0788                 host->data_dma, sizeof(*scratch),
0789                 DMA_BIDIRECTIONAL);
0790         dma_sync_single_for_device(host->dma_dev,
0791                 t->rx_dma, t->len,
0792                 DMA_FROM_DEVICE);
0793     }
0794 
0795     status = spi_sync_locked(spi, &host->m);
0796     if (status < 0) {
0797         dev_dbg(&spi->dev, "read error %d\n", status);
0798         return status;
0799     }
0800 
0801     if (host->dma_dev) {
0802         dma_sync_single_for_cpu(host->dma_dev,
0803                 host->data_dma, sizeof(*scratch),
0804                 DMA_BIDIRECTIONAL);
0805         dma_sync_single_for_cpu(host->dma_dev,
0806                 t->rx_dma, t->len,
0807                 DMA_FROM_DEVICE);
0808     }
0809 
0810     if (bitshift) {
0811         /* Walk through the data and the crc and do
0812          * all the magic to get byte-aligned data.
0813          */
0814         u8 *cp = t->rx_buf;
0815         unsigned int len;
0816         unsigned int bitright = 8 - bitshift;
0817         u8 temp;
0818         for (len = t->len; len; len--) {
0819             temp = *cp;
0820             *cp++ = leftover | (temp >> bitshift);
0821             leftover = temp << bitright;
0822         }
0823         cp = (u8 *) &scratch->crc_val;
0824         temp = *cp;
0825         *cp++ = leftover | (temp >> bitshift);
0826         leftover = temp << bitright;
0827         temp = *cp;
0828         *cp = leftover | (temp >> bitshift);
0829     }
0830 
0831     if (host->mmc->use_spi_crc) {
0832         u16 crc = crc_itu_t(0, t->rx_buf, t->len);
0833 
0834         be16_to_cpus(&scratch->crc_val);
0835         if (scratch->crc_val != crc) {
0836             dev_dbg(&spi->dev,
0837                 "read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
0838                 scratch->crc_val, crc, t->len);
0839             return -EILSEQ;
0840         }
0841     }
0842 
0843     t->rx_buf += t->len;
0844     if (host->dma_dev)
0845         t->rx_dma += t->len;
0846 
0847     return 0;
0848 }
0849 
0850 /*
0851  * An MMC/SD data stage includes one or more blocks, optional CRCs,
0852  * and inline handshaking.  That handhaking makes it unlike most
0853  * other SPI protocol stacks.
0854  */
0855 static void
0856 mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
0857         struct mmc_data *data, u32 blk_size)
0858 {
0859     struct spi_device   *spi = host->spi;
0860     struct device       *dma_dev = host->dma_dev;
0861     struct spi_transfer *t;
0862     enum dma_data_direction direction = mmc_get_dma_dir(data);
0863     struct scatterlist  *sg;
0864     unsigned        n_sg;
0865     bool            multiple = (data->blocks > 1);
0866     const char      *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
0867     u32         clock_rate;
0868     unsigned long       timeout;
0869 
0870     mmc_spi_setup_data_message(host, multiple, direction);
0871     t = &host->t;
0872 
0873     if (t->speed_hz)
0874         clock_rate = t->speed_hz;
0875     else
0876         clock_rate = spi->max_speed_hz;
0877 
0878     timeout = data->timeout_ns / 1000 +
0879           data->timeout_clks * 1000000 / clock_rate;
0880     timeout = usecs_to_jiffies((unsigned int)timeout) + 1;
0881 
0882     /* Handle scatterlist segments one at a time, with synch for
0883      * each 512-byte block
0884      */
0885     for_each_sg(data->sg, sg, data->sg_len, n_sg) {
0886         int         status = 0;
0887         dma_addr_t      dma_addr = 0;
0888         void            *kmap_addr;
0889         unsigned        length = sg->length;
0890         enum dma_data_direction dir = direction;
0891 
0892         /* set up dma mapping for controller drivers that might
0893          * use DMA ... though they may fall back to PIO
0894          */
0895         if (dma_dev) {
0896             /* never invalidate whole *shared* pages ... */
0897             if ((sg->offset != 0 || length != PAGE_SIZE)
0898                     && dir == DMA_FROM_DEVICE)
0899                 dir = DMA_BIDIRECTIONAL;
0900 
0901             dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
0902                         PAGE_SIZE, dir);
0903             if (dma_mapping_error(dma_dev, dma_addr)) {
0904                 data->error = -EFAULT;
0905                 break;
0906             }
0907             if (direction == DMA_TO_DEVICE)
0908                 t->tx_dma = dma_addr + sg->offset;
0909             else
0910                 t->rx_dma = dma_addr + sg->offset;
0911         }
0912 
0913         /* allow pio too; we don't allow highmem */
0914         kmap_addr = kmap(sg_page(sg));
0915         if (direction == DMA_TO_DEVICE)
0916             t->tx_buf = kmap_addr + sg->offset;
0917         else
0918             t->rx_buf = kmap_addr + sg->offset;
0919 
0920         /* transfer each block, and update request status */
0921         while (length) {
0922             t->len = min(length, blk_size);
0923 
0924             dev_dbg(&spi->dev, "    %s block, %d bytes\n", write_or_read, t->len);
0925 
0926             if (direction == DMA_TO_DEVICE)
0927                 status = mmc_spi_writeblock(host, t, timeout);
0928             else
0929                 status = mmc_spi_readblock(host, t, timeout);
0930             if (status < 0)
0931                 break;
0932 
0933             data->bytes_xfered += t->len;
0934             length -= t->len;
0935 
0936             if (!multiple)
0937                 break;
0938         }
0939 
0940         /* discard mappings */
0941         if (direction == DMA_FROM_DEVICE)
0942             flush_dcache_page(sg_page(sg));
0943         kunmap(sg_page(sg));
0944         if (dma_dev)
0945             dma_unmap_page(dma_dev, dma_addr, PAGE_SIZE, dir);
0946 
0947         if (status < 0) {
0948             data->error = status;
0949             dev_dbg(&spi->dev, "%s status %d\n", write_or_read, status);
0950             break;
0951         }
0952     }
0953 
0954     /* NOTE some docs describe an MMC-only SET_BLOCK_COUNT (CMD23) that
0955      * can be issued before multiblock writes.  Unlike its more widely
0956      * documented analogue for SD cards (SET_WR_BLK_ERASE_COUNT, ACMD23),
0957      * that can affect the STOP_TRAN logic.   Complete (and current)
0958      * MMC specs should sort that out before Linux starts using CMD23.
0959      */
0960     if (direction == DMA_TO_DEVICE && multiple) {
0961         struct scratch  *scratch = host->data;
0962         int     tmp;
0963         const unsigned  statlen = sizeof(scratch->status);
0964 
0965         dev_dbg(&spi->dev, "    STOP_TRAN\n");
0966 
0967         /* Tweak the per-block message we set up earlier by morphing
0968          * it to hold single buffer with the token followed by some
0969          * all-ones bytes ... skip N(BR) (0..1), scan the rest for
0970          * "not busy any longer" status, and leave chip selected.
0971          */
0972         INIT_LIST_HEAD(&host->m.transfers);
0973         list_add(&host->early_status.transfer_list,
0974                 &host->m.transfers);
0975 
0976         memset(scratch->status, 0xff, statlen);
0977         scratch->status[0] = SPI_TOKEN_STOP_TRAN;
0978 
0979         host->early_status.tx_buf = host->early_status.rx_buf;
0980         host->early_status.tx_dma = host->early_status.rx_dma;
0981         host->early_status.len = statlen;
0982 
0983         if (host->dma_dev)
0984             dma_sync_single_for_device(host->dma_dev,
0985                     host->data_dma, sizeof(*scratch),
0986                     DMA_BIDIRECTIONAL);
0987 
0988         tmp = spi_sync_locked(spi, &host->m);
0989 
0990         if (host->dma_dev)
0991             dma_sync_single_for_cpu(host->dma_dev,
0992                     host->data_dma, sizeof(*scratch),
0993                     DMA_BIDIRECTIONAL);
0994 
0995         if (tmp < 0) {
0996             if (!data->error)
0997                 data->error = tmp;
0998             return;
0999         }
1000 
1001         /* Ideally we collected "not busy" status with one I/O,
1002          * avoiding wasteful byte-at-a-time scanning... but more
1003          * I/O is often needed.
1004          */
1005         for (tmp = 2; tmp < statlen; tmp++) {
1006             if (scratch->status[tmp] != 0)
1007                 return;
1008         }
1009         tmp = mmc_spi_wait_unbusy(host, timeout);
1010         if (tmp < 0 && !data->error)
1011             data->error = tmp;
1012     }
1013 }
1014 
1015 /****************************************************************************/
1016 
1017 /*
1018  * MMC driver implementation -- the interface to the MMC stack
1019  */
1020 
1021 static void mmc_spi_request(struct mmc_host *mmc, struct mmc_request *mrq)
1022 {
1023     struct mmc_spi_host *host = mmc_priv(mmc);
1024     int         status = -EINVAL;
1025     int         crc_retry = 5;
1026     struct mmc_command  stop;
1027 
1028 #ifdef DEBUG
1029     /* MMC core and layered drivers *MUST* issue SPI-aware commands */
1030     {
1031         struct mmc_command  *cmd;
1032         int         invalid = 0;
1033 
1034         cmd = mrq->cmd;
1035         if (!mmc_spi_resp_type(cmd)) {
1036             dev_dbg(&host->spi->dev, "bogus command\n");
1037             cmd->error = -EINVAL;
1038             invalid = 1;
1039         }
1040 
1041         cmd = mrq->stop;
1042         if (cmd && !mmc_spi_resp_type(cmd)) {
1043             dev_dbg(&host->spi->dev, "bogus STOP command\n");
1044             cmd->error = -EINVAL;
1045             invalid = 1;
1046         }
1047 
1048         if (invalid) {
1049             dump_stack();
1050             mmc_request_done(host->mmc, mrq);
1051             return;
1052         }
1053     }
1054 #endif
1055 
1056     /* request exclusive bus access */
1057     spi_bus_lock(host->spi->master);
1058 
1059 crc_recover:
1060     /* issue command; then optionally data and stop */
1061     status = mmc_spi_command_send(host, mrq, mrq->cmd, mrq->data != NULL);
1062     if (status == 0 && mrq->data) {
1063         mmc_spi_data_do(host, mrq->cmd, mrq->data, mrq->data->blksz);
1064 
1065         /*
1066          * The SPI bus is not always reliable for large data transfers.
1067          * If an occasional crc error is reported by the SD device with
1068          * data read/write over SPI, it may be recovered by repeating
1069          * the last SD command again. The retry count is set to 5 to
1070          * ensure the driver passes stress tests.
1071          */
1072         if (mrq->data->error == -EILSEQ && crc_retry) {
1073             stop.opcode = MMC_STOP_TRANSMISSION;
1074             stop.arg = 0;
1075             stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1076             status = mmc_spi_command_send(host, mrq, &stop, 0);
1077             crc_retry--;
1078             mrq->data->error = 0;
1079             goto crc_recover;
1080         }
1081 
1082         if (mrq->stop)
1083             status = mmc_spi_command_send(host, mrq, mrq->stop, 0);
1084         else
1085             mmc_cs_off(host);
1086     }
1087 
1088     /* release the bus */
1089     spi_bus_unlock(host->spi->master);
1090 
1091     mmc_request_done(host->mmc, mrq);
1092 }
1093 
1094 /* See Section 6.4.1, in SD "Simplified Physical Layer Specification 2.0"
1095  *
1096  * NOTE that here we can't know that the card has just been powered up;
1097  * not all MMC/SD sockets support power switching.
1098  *
1099  * FIXME when the card is still in SPI mode, e.g. from a previous kernel,
1100  * this doesn't seem to do the right thing at all...
1101  */
1102 static void mmc_spi_initsequence(struct mmc_spi_host *host)
1103 {
1104     /* Try to be very sure any previous command has completed;
1105      * wait till not-busy, skip debris from any old commands.
1106      */
1107     mmc_spi_wait_unbusy(host, msecs_to_jiffies(MMC_SPI_INIT_TIMEOUT_MS));
1108     mmc_spi_readbytes(host, 10);
1109 
1110     /*
1111      * Do a burst with chipselect active-high.  We need to do this to
1112      * meet the requirement of 74 clock cycles with both chipselect
1113      * and CMD (MOSI) high before CMD0 ... after the card has been
1114      * powered up to Vdd(min), and so is ready to take commands.
1115      *
1116      * Some cards are particularly needy of this (e.g. Viking "SD256")
1117      * while most others don't seem to care.
1118      *
1119      * Note that this is one of the places MMC/SD plays games with the
1120      * SPI protocol.  Another is that when chipselect is released while
1121      * the card returns BUSY status, the clock must issue several cycles
1122      * with chipselect high before the card will stop driving its output.
1123      *
1124      * SPI_CS_HIGH means "asserted" here. In some cases like when using
1125      * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically
1126      * inverted by gpiolib, so if we want to ascertain to drive it high
1127      * we should toggle the default with an XOR as we do here.
1128      */
1129     host->spi->mode ^= SPI_CS_HIGH;
1130     if (spi_setup(host->spi) != 0) {
1131         /* Just warn; most cards work without it. */
1132         dev_warn(&host->spi->dev,
1133                 "can't change chip-select polarity\n");
1134         host->spi->mode ^= SPI_CS_HIGH;
1135     } else {
1136         mmc_spi_readbytes(host, 18);
1137 
1138         host->spi->mode ^= SPI_CS_HIGH;
1139         if (spi_setup(host->spi) != 0) {
1140             /* Wot, we can't get the same setup we had before? */
1141             dev_err(&host->spi->dev,
1142                     "can't restore chip-select polarity\n");
1143         }
1144     }
1145 }
1146 
1147 static char *mmc_powerstring(u8 power_mode)
1148 {
1149     switch (power_mode) {
1150     case MMC_POWER_OFF: return "off";
1151     case MMC_POWER_UP:  return "up";
1152     case MMC_POWER_ON:  return "on";
1153     }
1154     return "?";
1155 }
1156 
1157 static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1158 {
1159     struct mmc_spi_host *host = mmc_priv(mmc);
1160 
1161     if (host->power_mode != ios->power_mode) {
1162         int     canpower;
1163 
1164         canpower = host->pdata && host->pdata->setpower;
1165 
1166         dev_dbg(&host->spi->dev, "power %s (%d)%s\n",
1167                 mmc_powerstring(ios->power_mode),
1168                 ios->vdd,
1169                 canpower ? ", can switch" : "");
1170 
1171         /* switch power on/off if possible, accounting for
1172          * max 250msec powerup time if needed.
1173          */
1174         if (canpower) {
1175             switch (ios->power_mode) {
1176             case MMC_POWER_OFF:
1177             case MMC_POWER_UP:
1178                 host->pdata->setpower(&host->spi->dev,
1179                         ios->vdd);
1180                 if (ios->power_mode == MMC_POWER_UP)
1181                     msleep(host->powerup_msecs);
1182             }
1183         }
1184 
1185         /* See 6.4.1 in the simplified SD card physical spec 2.0 */
1186         if (ios->power_mode == MMC_POWER_ON)
1187             mmc_spi_initsequence(host);
1188 
1189         /* If powering down, ground all card inputs to avoid power
1190          * delivery from data lines!  On a shared SPI bus, this
1191          * will probably be temporary; 6.4.2 of the simplified SD
1192          * spec says this must last at least 1msec.
1193          *
1194          *   - Clock low means CPOL 0, e.g. mode 0
1195          *   - MOSI low comes from writing zero
1196          *   - Chipselect is usually active low...
1197          */
1198         if (canpower && ios->power_mode == MMC_POWER_OFF) {
1199             int mres;
1200             u8 nullbyte = 0;
1201 
1202             host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1203             mres = spi_setup(host->spi);
1204             if (mres < 0)
1205                 dev_dbg(&host->spi->dev,
1206                     "switch to SPI mode 0 failed\n");
1207 
1208             if (spi_write(host->spi, &nullbyte, 1) < 0)
1209                 dev_dbg(&host->spi->dev,
1210                     "put spi signals to low failed\n");
1211 
1212             /*
1213              * Now clock should be low due to spi mode 0;
1214              * MOSI should be low because of written 0x00;
1215              * chipselect should be low (it is active low)
1216              * power supply is off, so now MMC is off too!
1217              *
1218              * FIXME no, chipselect can be high since the
1219              * device is inactive and SPI_CS_HIGH is clear...
1220              */
1221             msleep(10);
1222             if (mres == 0) {
1223                 host->spi->mode |= (SPI_CPOL|SPI_CPHA);
1224                 mres = spi_setup(host->spi);
1225                 if (mres < 0)
1226                     dev_dbg(&host->spi->dev,
1227                         "switch back to SPI mode 3 failed\n");
1228             }
1229         }
1230 
1231         host->power_mode = ios->power_mode;
1232     }
1233 
1234     if (host->spi->max_speed_hz != ios->clock && ios->clock != 0) {
1235         int     status;
1236 
1237         host->spi->max_speed_hz = ios->clock;
1238         status = spi_setup(host->spi);
1239         dev_dbg(&host->spi->dev, "  clock to %d Hz, %d\n",
1240             host->spi->max_speed_hz, status);
1241     }
1242 }
1243 
1244 static const struct mmc_host_ops mmc_spi_ops = {
1245     .request    = mmc_spi_request,
1246     .set_ios    = mmc_spi_set_ios,
1247     .get_ro     = mmc_gpio_get_ro,
1248     .get_cd     = mmc_gpio_get_cd,
1249 };
1250 
1251 
1252 /****************************************************************************/
1253 
1254 /*
1255  * SPI driver implementation
1256  */
1257 
1258 static irqreturn_t
1259 mmc_spi_detect_irq(int irq, void *mmc)
1260 {
1261     struct mmc_spi_host *host = mmc_priv(mmc);
1262     u16 delay_msec = max(host->pdata->detect_delay, (u16)100);
1263 
1264     mmc_detect_change(mmc, msecs_to_jiffies(delay_msec));
1265     return IRQ_HANDLED;
1266 }
1267 
1268 #ifdef CONFIG_HAS_DMA
1269 static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
1270 {
1271     struct spi_device *spi = host->spi;
1272     struct device *dev;
1273 
1274     if (!spi->master->dev.parent->dma_mask)
1275         return 0;
1276 
1277     dev = spi->master->dev.parent;
1278 
1279     host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
1280                     DMA_TO_DEVICE);
1281     if (dma_mapping_error(dev, host->ones_dma))
1282         return -ENOMEM;
1283 
1284     host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
1285                     DMA_BIDIRECTIONAL);
1286     if (dma_mapping_error(dev, host->data_dma)) {
1287         dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1288                  DMA_TO_DEVICE);
1289         return -ENOMEM;
1290     }
1291 
1292     dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
1293                 DMA_BIDIRECTIONAL);
1294 
1295     host->dma_dev = dev;
1296     return 0;
1297 }
1298 
1299 static void mmc_spi_dma_free(struct mmc_spi_host *host)
1300 {
1301     if (!host->dma_dev)
1302         return;
1303 
1304     dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
1305              DMA_TO_DEVICE);
1306     dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
1307              DMA_BIDIRECTIONAL);
1308 }
1309 #else
1310 static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
1311 static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
1312 #endif
1313 
1314 static int mmc_spi_probe(struct spi_device *spi)
1315 {
1316     void            *ones;
1317     struct mmc_host     *mmc;
1318     struct mmc_spi_host *host;
1319     int         status;
1320     bool            has_ro = false;
1321 
1322     /* We rely on full duplex transfers, mostly to reduce
1323      * per-transfer overheads (by making fewer transfers).
1324      */
1325     if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
1326         return -EINVAL;
1327 
1328     /* MMC and SD specs only seem to care that sampling is on the
1329      * rising edge ... meaning SPI modes 0 or 3.  So either SPI mode
1330      * should be legit.  We'll use mode 0 since the steady state is 0,
1331      * which is appropriate for hotplugging, unless the platform data
1332      * specify mode 3 (if hardware is not compatible to mode 0).
1333      */
1334     if (spi->mode != SPI_MODE_3)
1335         spi->mode = SPI_MODE_0;
1336     spi->bits_per_word = 8;
1337 
1338     status = spi_setup(spi);
1339     if (status < 0) {
1340         dev_dbg(&spi->dev, "needs SPI mode %02x, %d KHz; %d\n",
1341                 spi->mode, spi->max_speed_hz / 1000,
1342                 status);
1343         return status;
1344     }
1345 
1346     /* We need a supply of ones to transmit.  This is the only time
1347      * the CPU touches these, so cache coherency isn't a concern.
1348      *
1349      * NOTE if many systems use more than one MMC-over-SPI connector
1350      * it'd save some memory to share this.  That's evidently rare.
1351      */
1352     status = -ENOMEM;
1353     ones = kmalloc(MMC_SPI_BLOCKSIZE, GFP_KERNEL);
1354     if (!ones)
1355         goto nomem;
1356     memset(ones, 0xff, MMC_SPI_BLOCKSIZE);
1357 
1358     mmc = mmc_alloc_host(sizeof(*host), &spi->dev);
1359     if (!mmc)
1360         goto nomem;
1361 
1362     mmc->ops = &mmc_spi_ops;
1363     mmc->max_blk_size = MMC_SPI_BLOCKSIZE;
1364     mmc->max_segs = MMC_SPI_BLOCKSATONCE;
1365     mmc->max_req_size = MMC_SPI_BLOCKSATONCE * MMC_SPI_BLOCKSIZE;
1366     mmc->max_blk_count = MMC_SPI_BLOCKSATONCE;
1367 
1368     mmc->caps = MMC_CAP_SPI;
1369 
1370     /* SPI doesn't need the lowspeed device identification thing for
1371      * MMC or SD cards, since it never comes up in open drain mode.
1372      * That's good; some SPI masters can't handle very low speeds!
1373      *
1374      * However, low speed SDIO cards need not handle over 400 KHz;
1375      * that's the only reason not to use a few MHz for f_min (until
1376      * the upper layer reads the target frequency from the CSD).
1377      */
1378     mmc->f_min = 400000;
1379     mmc->f_max = spi->max_speed_hz;
1380 
1381     host = mmc_priv(mmc);
1382     host->mmc = mmc;
1383     host->spi = spi;
1384 
1385     host->ones = ones;
1386 
1387     dev_set_drvdata(&spi->dev, mmc);
1388 
1389     /* Platform data is used to hook up things like card sensing
1390      * and power switching gpios.
1391      */
1392     host->pdata = mmc_spi_get_pdata(spi);
1393     if (host->pdata)
1394         mmc->ocr_avail = host->pdata->ocr_mask;
1395     if (!mmc->ocr_avail) {
1396         dev_warn(&spi->dev, "ASSUMING 3.2-3.4 V slot power\n");
1397         mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1398     }
1399     if (host->pdata && host->pdata->setpower) {
1400         host->powerup_msecs = host->pdata->powerup_msecs;
1401         if (!host->powerup_msecs || host->powerup_msecs > 250)
1402             host->powerup_msecs = 250;
1403     }
1404 
1405     /* preallocate dma buffers */
1406     host->data = kmalloc(sizeof(*host->data), GFP_KERNEL);
1407     if (!host->data)
1408         goto fail_nobuf1;
1409 
1410     status = mmc_spi_dma_alloc(host);
1411     if (status)
1412         goto fail_dma;
1413 
1414     /* setup message for status/busy readback */
1415     spi_message_init(&host->readback);
1416     host->readback.is_dma_mapped = (host->dma_dev != NULL);
1417 
1418     spi_message_add_tail(&host->status, &host->readback);
1419     host->status.tx_buf = host->ones;
1420     host->status.tx_dma = host->ones_dma;
1421     host->status.rx_buf = &host->data->status;
1422     host->status.rx_dma = host->data_dma + offsetof(struct scratch, status);
1423     host->status.cs_change = 1;
1424 
1425     /* register card detect irq */
1426     if (host->pdata && host->pdata->init) {
1427         status = host->pdata->init(&spi->dev, mmc_spi_detect_irq, mmc);
1428         if (status != 0)
1429             goto fail_glue_init;
1430     }
1431 
1432     /* pass platform capabilities, if any */
1433     if (host->pdata) {
1434         mmc->caps |= host->pdata->caps;
1435         mmc->caps2 |= host->pdata->caps2;
1436     }
1437 
1438     status = mmc_add_host(mmc);
1439     if (status != 0)
1440         goto fail_add_host;
1441 
1442     /*
1443      * Index 0 is card detect
1444      * Old boardfiles were specifying 1 ms as debounce
1445      */
1446     status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
1447     if (status == -EPROBE_DEFER)
1448         goto fail_add_host;
1449     if (!status) {
1450         /*
1451          * The platform has a CD GPIO signal that may support
1452          * interrupts, so let mmc_gpiod_request_cd_irq() decide
1453          * if polling is needed or not.
1454          */
1455         mmc->caps &= ~MMC_CAP_NEEDS_POLL;
1456         mmc_gpiod_request_cd_irq(mmc);
1457     }
1458     mmc_detect_change(mmc, 0);
1459 
1460     /* Index 1 is write protect/read only */
1461     status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
1462     if (status == -EPROBE_DEFER)
1463         goto fail_add_host;
1464     if (!status)
1465         has_ro = true;
1466 
1467     dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
1468             dev_name(&mmc->class_dev),
1469             host->dma_dev ? "" : ", no DMA",
1470             has_ro ? "" : ", no WP",
1471             (host->pdata && host->pdata->setpower)
1472                 ? "" : ", no poweroff",
1473             (mmc->caps & MMC_CAP_NEEDS_POLL)
1474                 ? ", cd polling" : "");
1475     return 0;
1476 
1477 fail_add_host:
1478     mmc_remove_host(mmc);
1479 fail_glue_init:
1480     mmc_spi_dma_free(host);
1481 fail_dma:
1482     kfree(host->data);
1483 fail_nobuf1:
1484     mmc_spi_put_pdata(spi);
1485     mmc_free_host(mmc);
1486 nomem:
1487     kfree(ones);
1488     return status;
1489 }
1490 
1491 
1492 static void mmc_spi_remove(struct spi_device *spi)
1493 {
1494     struct mmc_host     *mmc = dev_get_drvdata(&spi->dev);
1495     struct mmc_spi_host *host = mmc_priv(mmc);
1496 
1497     /* prevent new mmc_detect_change() calls */
1498     if (host->pdata && host->pdata->exit)
1499         host->pdata->exit(&spi->dev, mmc);
1500 
1501     mmc_remove_host(mmc);
1502 
1503     mmc_spi_dma_free(host);
1504     kfree(host->data);
1505     kfree(host->ones);
1506 
1507     spi->max_speed_hz = mmc->f_max;
1508     mmc_spi_put_pdata(spi);
1509     mmc_free_host(mmc);
1510 }
1511 
1512 static const struct spi_device_id mmc_spi_dev_ids[] = {
1513     { "mmc-spi-slot"},
1514     { },
1515 };
1516 MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
1517 
1518 static const struct of_device_id mmc_spi_of_match_table[] = {
1519     { .compatible = "mmc-spi-slot", },
1520     {},
1521 };
1522 MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
1523 
1524 static struct spi_driver mmc_spi_driver = {
1525     .driver = {
1526         .name =     "mmc_spi",
1527         .of_match_table = mmc_spi_of_match_table,
1528     },
1529     .id_table = mmc_spi_dev_ids,
1530     .probe =    mmc_spi_probe,
1531     .remove =   mmc_spi_remove,
1532 };
1533 
1534 module_spi_driver(mmc_spi_driver);
1535 
1536 MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
1537 MODULE_DESCRIPTION("SPI SD/MMC host driver");
1538 MODULE_LICENSE("GPL");
1539 MODULE_ALIAS("spi:mmc_spi");