Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
0004  * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
0005  */
0006 
0007 #include <linux/clk.h>
0008 #include <linux/delay.h>
0009 #include <linux/device.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/dmaengine.h>
0012 #include <linux/highmem.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/io.h>
0015 #include <linux/log2.h>
0016 #include <linux/mmc/host.h>
0017 #include <linux/mmc/mmc.h>
0018 #include <linux/mmc/sd.h>
0019 #include <linux/mmc/sdio.h>
0020 #include <linux/module.h>
0021 #include <linux/pagemap.h>
0022 #include <linux/pinctrl/consumer.h>
0023 #include <linux/platform_device.h>
0024 #include <linux/scatterlist.h>
0025 #include <linux/string.h>
0026 #include <linux/time.h>
0027 #include <linux/virtio.h>
0028 #include <linux/workqueue.h>
0029 
0030 #define USDHI6_SD_CMD       0x0000
0031 #define USDHI6_SD_PORT_SEL  0x0004
0032 #define USDHI6_SD_ARG       0x0008
0033 #define USDHI6_SD_STOP      0x0010
0034 #define USDHI6_SD_SECCNT    0x0014
0035 #define USDHI6_SD_RSP10     0x0018
0036 #define USDHI6_SD_RSP32     0x0020
0037 #define USDHI6_SD_RSP54     0x0028
0038 #define USDHI6_SD_RSP76     0x0030
0039 #define USDHI6_SD_INFO1     0x0038
0040 #define USDHI6_SD_INFO2     0x003c
0041 #define USDHI6_SD_INFO1_MASK    0x0040
0042 #define USDHI6_SD_INFO2_MASK    0x0044
0043 #define USDHI6_SD_CLK_CTRL  0x0048
0044 #define USDHI6_SD_SIZE      0x004c
0045 #define USDHI6_SD_OPTION    0x0050
0046 #define USDHI6_SD_ERR_STS1  0x0058
0047 #define USDHI6_SD_ERR_STS2  0x005c
0048 #define USDHI6_SD_BUF0      0x0060
0049 #define USDHI6_SDIO_MODE    0x0068
0050 #define USDHI6_SDIO_INFO1   0x006c
0051 #define USDHI6_SDIO_INFO1_MASK  0x0070
0052 #define USDHI6_CC_EXT_MODE  0x01b0
0053 #define USDHI6_SOFT_RST     0x01c0
0054 #define USDHI6_VERSION      0x01c4
0055 #define USDHI6_HOST_MODE    0x01c8
0056 #define USDHI6_SDIF_MODE    0x01cc
0057 
0058 #define USDHI6_SD_CMD_APP       0x0040
0059 #define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000
0060 #define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300
0061 #define USDHI6_SD_CMD_MODE_RSP_R1   0x0400  /* Also R5, R6, R7 */
0062 #define USDHI6_SD_CMD_MODE_RSP_R1B  0x0500  /* R1b */
0063 #define USDHI6_SD_CMD_MODE_RSP_R2   0x0600
0064 #define USDHI6_SD_CMD_MODE_RSP_R3   0x0700  /* Also R4 */
0065 #define USDHI6_SD_CMD_DATA      0x0800
0066 #define USDHI6_SD_CMD_READ      0x1000
0067 #define USDHI6_SD_CMD_MULTI     0x2000
0068 #define USDHI6_SD_CMD_CMD12_AUTO_OFF    0x4000
0069 
0070 #define USDHI6_CC_EXT_MODE_SDRW     BIT(1)
0071 
0072 #define USDHI6_SD_INFO1_RSP_END     BIT(0)
0073 #define USDHI6_SD_INFO1_ACCESS_END  BIT(2)
0074 #define USDHI6_SD_INFO1_CARD_OUT    BIT(3)
0075 #define USDHI6_SD_INFO1_CARD_IN     BIT(4)
0076 #define USDHI6_SD_INFO1_CD      BIT(5)
0077 #define USDHI6_SD_INFO1_WP      BIT(7)
0078 #define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8)
0079 #define USDHI6_SD_INFO1_D3_CARD_IN  BIT(9)
0080 
0081 #define USDHI6_SD_INFO2_CMD_ERR     BIT(0)
0082 #define USDHI6_SD_INFO2_CRC_ERR     BIT(1)
0083 #define USDHI6_SD_INFO2_END_ERR     BIT(2)
0084 #define USDHI6_SD_INFO2_TOUT        BIT(3)
0085 #define USDHI6_SD_INFO2_IWA_ERR     BIT(4)
0086 #define USDHI6_SD_INFO2_IRA_ERR     BIT(5)
0087 #define USDHI6_SD_INFO2_RSP_TOUT    BIT(6)
0088 #define USDHI6_SD_INFO2_SDDAT0      BIT(7)
0089 #define USDHI6_SD_INFO2_BRE     BIT(8)
0090 #define USDHI6_SD_INFO2_BWE     BIT(9)
0091 #define USDHI6_SD_INFO2_SCLKDIVEN   BIT(13)
0092 #define USDHI6_SD_INFO2_CBSY        BIT(14)
0093 #define USDHI6_SD_INFO2_ILA     BIT(15)
0094 
0095 #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
0096 #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
0097 #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
0098 #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
0099 
0100 #define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR |  \
0101     USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \
0102     USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR |    \
0103     USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT |    \
0104     USDHI6_SD_INFO2_ILA)
0105 
0106 #define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
0107                  USDHI6_SD_INFO1_CARD)
0108 
0109 #define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
0110                  USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
0111 
0112 #define USDHI6_SD_CLK_CTRL_SCLKEN   BIT(8)
0113 
0114 #define USDHI6_SD_STOP_STP      BIT(0)
0115 #define USDHI6_SD_STOP_SEC      BIT(8)
0116 
0117 #define USDHI6_SDIO_INFO1_IOIRQ     BIT(0)
0118 #define USDHI6_SDIO_INFO1_EXPUB52   BIT(14)
0119 #define USDHI6_SDIO_INFO1_EXWT      BIT(15)
0120 
0121 #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13)
0122 
0123 #define USDHI6_SOFT_RST_RESERVED    (BIT(1) | BIT(2))
0124 #define USDHI6_SOFT_RST_RESET       BIT(0)
0125 
0126 #define USDHI6_SD_OPTION_TIMEOUT_SHIFT  4
0127 #define USDHI6_SD_OPTION_TIMEOUT_MASK   (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
0128 #define USDHI6_SD_OPTION_WIDTH_1    BIT(15)
0129 
0130 #define USDHI6_SD_PORT_SEL_PORTS_SHIFT  8
0131 
0132 #define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff
0133 
0134 #define USDHI6_SDIO_INFO1_IRQ   (USDHI6_SDIO_INFO1_IOIRQ | 3 | \
0135                  USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
0136 
0137 #define USDHI6_MIN_DMA 64
0138 
0139 #define USDHI6_REQ_TIMEOUT_MS 4000
0140 
0141 enum usdhi6_wait_for {
0142     USDHI6_WAIT_FOR_REQUEST,
0143     USDHI6_WAIT_FOR_CMD,
0144     USDHI6_WAIT_FOR_MREAD,
0145     USDHI6_WAIT_FOR_MWRITE,
0146     USDHI6_WAIT_FOR_READ,
0147     USDHI6_WAIT_FOR_WRITE,
0148     USDHI6_WAIT_FOR_DATA_END,
0149     USDHI6_WAIT_FOR_STOP,
0150     USDHI6_WAIT_FOR_DMA,
0151 };
0152 
0153 struct usdhi6_page {
0154     struct page *page;
0155     void *mapped;       /* mapped page */
0156 };
0157 
0158 struct usdhi6_host {
0159     struct mmc_host *mmc;
0160     struct mmc_request *mrq;
0161     void __iomem *base;
0162     struct clk *clk;
0163 
0164     /* SG memory handling */
0165 
0166     /* Common for multiple and single block requests */
0167     struct usdhi6_page pg;  /* current page from an SG */
0168     void *blk_page;     /* either a mapped page, or the bounce buffer */
0169     size_t offset;      /* offset within a page, including sg->offset */
0170 
0171     /* Blocks, crossing a page boundary */
0172     size_t head_len;
0173     struct usdhi6_page head_pg;
0174 
0175     /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
0176     struct scatterlist bounce_sg;
0177     u8 bounce_buf[512];
0178 
0179     /* Multiple block requests only */
0180     struct scatterlist *sg; /* current SG segment */
0181     int page_idx;       /* page index within an SG segment */
0182 
0183     enum usdhi6_wait_for wait;
0184     u32 status_mask;
0185     u32 status2_mask;
0186     u32 sdio_mask;
0187     u32 io_error;
0188     u32 irq_status;
0189     unsigned long imclk;
0190     unsigned long rate;
0191     bool app_cmd;
0192 
0193     /* Timeout handling */
0194     struct delayed_work timeout_work;
0195     unsigned long timeout;
0196 
0197     /* DMA support */
0198     struct dma_chan *chan_rx;
0199     struct dma_chan *chan_tx;
0200     bool dma_active;
0201 
0202     /* Pin control */
0203     struct pinctrl *pinctrl;
0204     struct pinctrl_state *pins_uhs;
0205 };
0206 
0207 /*          I/O primitives                  */
0208 
0209 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
0210 {
0211     iowrite32(data, host->base + reg);
0212     dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
0213         host->base, reg, data);
0214 }
0215 
0216 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
0217 {
0218     iowrite16(data, host->base + reg);
0219     dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
0220         host->base, reg, data);
0221 }
0222 
0223 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
0224 {
0225     u32 data = ioread32(host->base + reg);
0226     dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
0227         host->base, reg, data);
0228     return data;
0229 }
0230 
0231 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
0232 {
0233     u16 data = ioread16(host->base + reg);
0234     dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
0235         host->base, reg, data);
0236     return data;
0237 }
0238 
0239 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
0240 {
0241     host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
0242     host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
0243     usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
0244     usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
0245 }
0246 
0247 static void usdhi6_wait_for_resp(struct usdhi6_host *host)
0248 {
0249     usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
0250               USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
0251               USDHI6_SD_INFO2_ERR);
0252 }
0253 
0254 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
0255 {
0256     usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
0257               USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
0258               (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
0259 }
0260 
0261 static void usdhi6_only_cd(struct usdhi6_host *host)
0262 {
0263     /* Mask all except card hotplug */
0264     usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
0265 }
0266 
0267 static void usdhi6_mask_all(struct usdhi6_host *host)
0268 {
0269     usdhi6_irq_enable(host, 0, 0);
0270 }
0271 
0272 static int usdhi6_error_code(struct usdhi6_host *host)
0273 {
0274     u32 err;
0275 
0276     usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
0277 
0278     if (host->io_error &
0279         (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
0280         u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
0281         int opc = host->mrq ? host->mrq->cmd->opcode : -1;
0282 
0283         err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
0284         /* Response timeout is often normal, don't spam the log */
0285         if (host->wait == USDHI6_WAIT_FOR_CMD)
0286             dev_dbg(mmc_dev(host->mmc),
0287                 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
0288                 err, rsp54, host->wait, opc);
0289         else
0290             dev_warn(mmc_dev(host->mmc),
0291                  "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
0292                  err, rsp54, host->wait, opc);
0293         return -ETIMEDOUT;
0294     }
0295 
0296     err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
0297     if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
0298         dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
0299              err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
0300     if (host->io_error & USDHI6_SD_INFO2_ILA)
0301         return -EILSEQ;
0302 
0303     return -EIO;
0304 }
0305 
0306 /*          Scatter-Gather management           */
0307 
0308 /*
0309  * In PIO mode we have to map each page separately, using kmap(). That way
0310  * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
0311  * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
0312  * have been observed with an SDIO WiFi card (b43 driver).
0313  */
0314 static void usdhi6_blk_bounce(struct usdhi6_host *host,
0315                   struct scatterlist *sg)
0316 {
0317     struct mmc_data *data = host->mrq->data;
0318     size_t blk_head = host->head_len;
0319 
0320     dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
0321         __func__, host->mrq->cmd->opcode, data->sg_len,
0322         data->blksz, data->blocks, sg->offset);
0323 
0324     host->head_pg.page  = host->pg.page;
0325     host->head_pg.mapped    = host->pg.mapped;
0326     host->pg.page       = nth_page(host->pg.page, 1);
0327     host->pg.mapped     = kmap(host->pg.page);
0328 
0329     host->blk_page = host->bounce_buf;
0330     host->offset = 0;
0331 
0332     if (data->flags & MMC_DATA_READ)
0333         return;
0334 
0335     memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
0336            blk_head);
0337     memcpy(host->bounce_buf + blk_head, host->pg.mapped,
0338            data->blksz - blk_head);
0339 }
0340 
0341 /* Only called for multiple block IO */
0342 static void usdhi6_sg_prep(struct usdhi6_host *host)
0343 {
0344     struct mmc_request *mrq = host->mrq;
0345     struct mmc_data *data = mrq->data;
0346 
0347     usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
0348 
0349     host->sg = data->sg;
0350     /* TODO: if we always map, this is redundant */
0351     host->offset = host->sg->offset;
0352 }
0353 
0354 /* Map the first page in an SG segment: common for multiple and single block IO */
0355 static void *usdhi6_sg_map(struct usdhi6_host *host)
0356 {
0357     struct mmc_data *data = host->mrq->data;
0358     struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
0359     size_t head = PAGE_SIZE - sg->offset;
0360     size_t blk_head = head % data->blksz;
0361 
0362     WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
0363     if (WARN(sg_dma_len(sg) % data->blksz,
0364          "SG size %u isn't a multiple of block size %u\n",
0365          sg_dma_len(sg), data->blksz))
0366         return NULL;
0367 
0368     host->pg.page = sg_page(sg);
0369     host->pg.mapped = kmap(host->pg.page);
0370     host->offset = sg->offset;
0371 
0372     /*
0373      * Block size must be a power of 2 for multi-block transfers,
0374      * therefore blk_head is equal for all pages in this SG
0375      */
0376     host->head_len = blk_head;
0377 
0378     if (head < data->blksz)
0379         /*
0380          * The first block in the SG crosses a page boundary.
0381          * Max blksz = 512, so blocks can only span 2 pages
0382          */
0383         usdhi6_blk_bounce(host, sg);
0384     else
0385         host->blk_page = host->pg.mapped;
0386 
0387     dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
0388         host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
0389         sg->offset, host->mrq->cmd->opcode, host->mrq);
0390 
0391     return host->blk_page + host->offset;
0392 }
0393 
0394 /* Unmap the current page: common for multiple and single block IO */
0395 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
0396 {
0397     struct mmc_data *data = host->mrq->data;
0398     struct page *page = host->head_pg.page;
0399 
0400     if (page) {
0401         /* Previous block was cross-page boundary */
0402         struct scatterlist *sg = data->sg_len > 1 ?
0403             host->sg : data->sg;
0404         size_t blk_head = host->head_len;
0405 
0406         if (!data->error && data->flags & MMC_DATA_READ) {
0407             memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
0408                    host->bounce_buf, blk_head);
0409             memcpy(host->pg.mapped, host->bounce_buf + blk_head,
0410                    data->blksz - blk_head);
0411         }
0412 
0413         flush_dcache_page(page);
0414         kunmap(page);
0415 
0416         host->head_pg.page = NULL;
0417 
0418         if (!force && sg_dma_len(sg) + sg->offset >
0419             (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
0420             /* More blocks in this SG, don't unmap the next page */
0421             return;
0422     }
0423 
0424     page = host->pg.page;
0425     if (!page)
0426         return;
0427 
0428     flush_dcache_page(page);
0429     kunmap(page);
0430 
0431     host->pg.page = NULL;
0432 }
0433 
0434 /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
0435 static void usdhi6_sg_advance(struct usdhi6_host *host)
0436 {
0437     struct mmc_data *data = host->mrq->data;
0438     size_t done, total;
0439 
0440     /* New offset: set at the end of the previous block */
0441     if (host->head_pg.page) {
0442         /* Finished a cross-page block, jump to the new page */
0443         host->page_idx++;
0444         host->offset = data->blksz - host->head_len;
0445         host->blk_page = host->pg.mapped;
0446         usdhi6_sg_unmap(host, false);
0447     } else {
0448         host->offset += data->blksz;
0449         /* The completed block didn't cross a page boundary */
0450         if (host->offset == PAGE_SIZE) {
0451             /* If required, we'll map the page below */
0452             host->offset = 0;
0453             host->page_idx++;
0454         }
0455     }
0456 
0457     /*
0458      * Now host->blk_page + host->offset point at the end of our last block
0459      * and host->page_idx is the index of the page, in which our new block
0460      * is located, if any
0461      */
0462 
0463     done = (host->page_idx << PAGE_SHIFT) + host->offset;
0464     total = host->sg->offset + sg_dma_len(host->sg);
0465 
0466     dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
0467         done, total, host->offset);
0468 
0469     if (done < total && host->offset) {
0470         /* More blocks in this page */
0471         if (host->offset + data->blksz > PAGE_SIZE)
0472             /* We approached at a block, that spans 2 pages */
0473             usdhi6_blk_bounce(host, host->sg);
0474 
0475         return;
0476     }
0477 
0478     /* Finished current page or an SG segment */
0479     usdhi6_sg_unmap(host, false);
0480 
0481     if (done == total) {
0482         /*
0483          * End of an SG segment or the complete SG: jump to the next
0484          * segment, we'll map it later in usdhi6_blk_read() or
0485          * usdhi6_blk_write()
0486          */
0487         struct scatterlist *next = sg_next(host->sg);
0488 
0489         host->page_idx = 0;
0490 
0491         if (!next)
0492             host->wait = USDHI6_WAIT_FOR_DATA_END;
0493         host->sg = next;
0494 
0495         if (WARN(next && sg_dma_len(next) % data->blksz,
0496              "SG size %u isn't a multiple of block size %u\n",
0497              sg_dma_len(next), data->blksz))
0498             data->error = -EINVAL;
0499 
0500         return;
0501     }
0502 
0503     /* We cannot get here after crossing a page border */
0504 
0505     /* Next page in the same SG */
0506     host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
0507     host->pg.mapped = kmap(host->pg.page);
0508     host->blk_page = host->pg.mapped;
0509 
0510     dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
0511         host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
0512         host->mrq->cmd->opcode, host->mrq);
0513 }
0514 
0515 /*          DMA handling                    */
0516 
0517 static void usdhi6_dma_release(struct usdhi6_host *host)
0518 {
0519     host->dma_active = false;
0520     if (host->chan_tx) {
0521         struct dma_chan *chan = host->chan_tx;
0522         host->chan_tx = NULL;
0523         dma_release_channel(chan);
0524     }
0525     if (host->chan_rx) {
0526         struct dma_chan *chan = host->chan_rx;
0527         host->chan_rx = NULL;
0528         dma_release_channel(chan);
0529     }
0530 }
0531 
0532 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
0533 {
0534     struct mmc_data *data = host->mrq->data;
0535 
0536     if (!host->dma_active)
0537         return;
0538 
0539     usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
0540     host->dma_active = false;
0541 
0542     if (data->flags & MMC_DATA_READ)
0543         dma_unmap_sg(host->chan_rx->device->dev, data->sg,
0544                  data->sg_len, DMA_FROM_DEVICE);
0545     else
0546         dma_unmap_sg(host->chan_tx->device->dev, data->sg,
0547                  data->sg_len, DMA_TO_DEVICE);
0548 }
0549 
0550 static void usdhi6_dma_complete(void *arg)
0551 {
0552     struct usdhi6_host *host = arg;
0553     struct mmc_request *mrq = host->mrq;
0554 
0555     if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
0556          dev_name(mmc_dev(host->mmc)), mrq))
0557         return;
0558 
0559     dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
0560         mrq->cmd->opcode);
0561 
0562     usdhi6_dma_stop_unmap(host);
0563     usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
0564 }
0565 
0566 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
0567                 enum dma_transfer_direction dir)
0568 {
0569     struct mmc_data *data = host->mrq->data;
0570     struct scatterlist *sg = data->sg;
0571     struct dma_async_tx_descriptor *desc = NULL;
0572     dma_cookie_t cookie = -EINVAL;
0573     enum dma_data_direction data_dir;
0574     int ret;
0575 
0576     switch (dir) {
0577     case DMA_MEM_TO_DEV:
0578         data_dir = DMA_TO_DEVICE;
0579         break;
0580     case DMA_DEV_TO_MEM:
0581         data_dir = DMA_FROM_DEVICE;
0582         break;
0583     default:
0584         return -EINVAL;
0585     }
0586 
0587     ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
0588     if (ret > 0) {
0589         host->dma_active = true;
0590         desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
0591                     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
0592     }
0593 
0594     if (desc) {
0595         desc->callback = usdhi6_dma_complete;
0596         desc->callback_param = host;
0597         cookie = dmaengine_submit(desc);
0598     }
0599 
0600     dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
0601         __func__, data->sg_len, ret, cookie, desc);
0602 
0603     if (cookie < 0) {
0604         /* DMA failed, fall back to PIO */
0605         if (ret >= 0)
0606             ret = cookie;
0607         usdhi6_dma_release(host);
0608         dev_warn(mmc_dev(host->mmc),
0609              "DMA failed: %d, falling back to PIO\n", ret);
0610     }
0611 
0612     return cookie;
0613 }
0614 
0615 static int usdhi6_dma_start(struct usdhi6_host *host)
0616 {
0617     if (!host->chan_rx || !host->chan_tx)
0618         return -ENODEV;
0619 
0620     if (host->mrq->data->flags & MMC_DATA_READ)
0621         return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
0622 
0623     return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
0624 }
0625 
0626 static void usdhi6_dma_kill(struct usdhi6_host *host)
0627 {
0628     struct mmc_data *data = host->mrq->data;
0629 
0630     dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
0631         __func__, data->sg_len, data->blocks, data->blksz);
0632     /* Abort DMA */
0633     if (data->flags & MMC_DATA_READ)
0634         dmaengine_terminate_sync(host->chan_rx);
0635     else
0636         dmaengine_terminate_sync(host->chan_tx);
0637 }
0638 
0639 static void usdhi6_dma_check_error(struct usdhi6_host *host)
0640 {
0641     struct mmc_data *data = host->mrq->data;
0642 
0643     dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
0644         __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
0645 
0646     if (host->io_error) {
0647         data->error = usdhi6_error_code(host);
0648         data->bytes_xfered = 0;
0649         usdhi6_dma_kill(host);
0650         usdhi6_dma_release(host);
0651         dev_warn(mmc_dev(host->mmc),
0652              "DMA failed: %d, falling back to PIO\n", data->error);
0653         return;
0654     }
0655 
0656     /*
0657      * The datasheet tells us to check a response from the card, whereas
0658      * responses only come after the command phase, not after the data
0659      * phase. Let's check anyway.
0660      */
0661     if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
0662         dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
0663 }
0664 
0665 static void usdhi6_dma_kick(struct usdhi6_host *host)
0666 {
0667     if (host->mrq->data->flags & MMC_DATA_READ)
0668         dma_async_issue_pending(host->chan_rx);
0669     else
0670         dma_async_issue_pending(host->chan_tx);
0671 }
0672 
0673 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
0674 {
0675     struct dma_slave_config cfg = {
0676         .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
0677         .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
0678     };
0679     int ret;
0680 
0681     host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
0682     dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
0683         host->chan_tx);
0684 
0685     if (IS_ERR(host->chan_tx)) {
0686         host->chan_tx = NULL;
0687         return;
0688     }
0689 
0690     cfg.direction = DMA_MEM_TO_DEV;
0691     cfg.dst_addr = start + USDHI6_SD_BUF0;
0692     cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
0693     cfg.src_addr = 0;
0694     ret = dmaengine_slave_config(host->chan_tx, &cfg);
0695     if (ret < 0)
0696         goto e_release_tx;
0697 
0698     host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
0699     dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
0700         host->chan_rx);
0701 
0702     if (IS_ERR(host->chan_rx)) {
0703         host->chan_rx = NULL;
0704         goto e_release_tx;
0705     }
0706 
0707     cfg.direction = DMA_DEV_TO_MEM;
0708     cfg.src_addr = cfg.dst_addr;
0709     cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */
0710     cfg.dst_addr = 0;
0711     ret = dmaengine_slave_config(host->chan_rx, &cfg);
0712     if (ret < 0)
0713         goto e_release_rx;
0714 
0715     return;
0716 
0717 e_release_rx:
0718     dma_release_channel(host->chan_rx);
0719     host->chan_rx = NULL;
0720 e_release_tx:
0721     dma_release_channel(host->chan_tx);
0722     host->chan_tx = NULL;
0723 }
0724 
0725 /*          API helpers                 */
0726 
0727 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
0728 {
0729     unsigned long rate = ios->clock;
0730     u32 val;
0731     unsigned int i;
0732 
0733     for (i = 1000; i; i--) {
0734         if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
0735             break;
0736         usleep_range(10, 100);
0737     }
0738 
0739     if (!i) {
0740         dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
0741         return;
0742     }
0743 
0744     val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
0745 
0746     if (rate) {
0747         unsigned long new_rate;
0748 
0749         if (host->imclk <= rate) {
0750             if (ios->timing != MMC_TIMING_UHS_DDR50) {
0751                 /* Cannot have 1-to-1 clock in DDR mode */
0752                 new_rate = host->imclk;
0753                 val |= 0xff;
0754             } else {
0755                 new_rate = host->imclk / 2;
0756             }
0757         } else {
0758             unsigned long div =
0759                 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
0760             val |= div >> 2;
0761             new_rate = host->imclk / div;
0762         }
0763 
0764         if (host->rate == new_rate)
0765             return;
0766 
0767         host->rate = new_rate;
0768 
0769         dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
0770             rate, (val & 0xff) << 2, new_rate);
0771     }
0772 
0773     /*
0774      * if old or new rate is equal to input rate, have to switch the clock
0775      * off before changing and on after
0776      */
0777     if (host->imclk == rate || host->imclk == host->rate || !rate)
0778         usdhi6_write(host, USDHI6_SD_CLK_CTRL,
0779                  val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
0780 
0781     if (!rate) {
0782         host->rate = 0;
0783         return;
0784     }
0785 
0786     usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
0787 
0788     if (host->imclk == rate || host->imclk == host->rate ||
0789         !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
0790         usdhi6_write(host, USDHI6_SD_CLK_CTRL,
0791                  val | USDHI6_SD_CLK_CTRL_SCLKEN);
0792 }
0793 
0794 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
0795 {
0796     struct mmc_host *mmc = host->mmc;
0797 
0798     if (!IS_ERR(mmc->supply.vmmc))
0799         /* Errors ignored... */
0800         mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
0801                       ios->power_mode ? ios->vdd : 0);
0802 }
0803 
0804 static int usdhi6_reset(struct usdhi6_host *host)
0805 {
0806     int i;
0807 
0808     usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
0809     cpu_relax();
0810     usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
0811     for (i = 1000; i; i--)
0812         if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
0813             break;
0814 
0815     return i ? 0 : -ETIMEDOUT;
0816 }
0817 
0818 static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0819 {
0820     struct usdhi6_host *host = mmc_priv(mmc);
0821     u32 option, mode;
0822     int ret;
0823 
0824     dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
0825         ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
0826 
0827     switch (ios->power_mode) {
0828     case MMC_POWER_OFF:
0829         usdhi6_set_power(host, ios);
0830         usdhi6_only_cd(host);
0831         break;
0832     case MMC_POWER_UP:
0833         /*
0834          * We only also touch USDHI6_SD_OPTION from .request(), which
0835          * cannot race with MMC_POWER_UP
0836          */
0837         ret = usdhi6_reset(host);
0838         if (ret < 0) {
0839             dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
0840         } else {
0841             usdhi6_set_power(host, ios);
0842             usdhi6_only_cd(host);
0843         }
0844         break;
0845     case MMC_POWER_ON:
0846         option = usdhi6_read(host, USDHI6_SD_OPTION);
0847         /*
0848          * The eMMC standard only allows 4 or 8 bits in the DDR mode,
0849          * the same probably holds for SD cards. We check here anyway,
0850          * since the datasheet explicitly requires 4 bits for DDR.
0851          */
0852         if (ios->bus_width == MMC_BUS_WIDTH_1) {
0853             if (ios->timing == MMC_TIMING_UHS_DDR50)
0854                 dev_err(mmc_dev(mmc),
0855                     "4 bits are required for DDR\n");
0856             option |= USDHI6_SD_OPTION_WIDTH_1;
0857             mode = 0;
0858         } else {
0859             option &= ~USDHI6_SD_OPTION_WIDTH_1;
0860             mode = ios->timing == MMC_TIMING_UHS_DDR50;
0861         }
0862         usdhi6_write(host, USDHI6_SD_OPTION, option);
0863         usdhi6_write(host, USDHI6_SDIF_MODE, mode);
0864         break;
0865     }
0866 
0867     if (host->rate != ios->clock)
0868         usdhi6_clk_set(host, ios);
0869 }
0870 
0871 /* This is data timeout. Response timeout is fixed to 640 clock cycles */
0872 static void usdhi6_timeout_set(struct usdhi6_host *host)
0873 {
0874     struct mmc_request *mrq = host->mrq;
0875     u32 val;
0876     unsigned long ticks;
0877 
0878     if (!mrq->data)
0879         ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
0880     else
0881         ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
0882             mrq->data->timeout_clks;
0883 
0884     if (!ticks || ticks > 1 << 27)
0885         /* Max timeout */
0886         val = 14;
0887     else if (ticks < 1 << 13)
0888         /* Min timeout */
0889         val = 0;
0890     else
0891         val = order_base_2(ticks) - 13;
0892 
0893     dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
0894         mrq->data ? "data" : "cmd", ticks, host->rate);
0895 
0896     /* Timeout Counter mask: 0xf0 */
0897     usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
0898              (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
0899 }
0900 
0901 static void usdhi6_request_done(struct usdhi6_host *host)
0902 {
0903     struct mmc_request *mrq = host->mrq;
0904     struct mmc_data *data = mrq->data;
0905 
0906     if (WARN(host->pg.page || host->head_pg.page,
0907          "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
0908          host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
0909          data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
0910          data ? host->offset : 0, data ? data->blocks : 0,
0911          data ? data->blksz : 0, data ? data->sg_len : 0))
0912         usdhi6_sg_unmap(host, true);
0913 
0914     if (mrq->cmd->error ||
0915         (data && data->error) ||
0916         (mrq->stop && mrq->stop->error))
0917         dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
0918             __func__, mrq->cmd->opcode, data ? data->blocks : 0,
0919             data ? data->blksz : 0,
0920             mrq->cmd->error,
0921             data ? data->error : 1,
0922             mrq->stop ? mrq->stop->error : 1);
0923 
0924     /* Disable DMA */
0925     usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
0926     host->wait = USDHI6_WAIT_FOR_REQUEST;
0927     host->mrq = NULL;
0928 
0929     mmc_request_done(host->mmc, mrq);
0930 }
0931 
0932 static int usdhi6_cmd_flags(struct usdhi6_host *host)
0933 {
0934     struct mmc_request *mrq = host->mrq;
0935     struct mmc_command *cmd = mrq->cmd;
0936     u16 opc = cmd->opcode;
0937 
0938     if (host->app_cmd) {
0939         host->app_cmd = false;
0940         opc |= USDHI6_SD_CMD_APP;
0941     }
0942 
0943     if (mrq->data) {
0944         opc |= USDHI6_SD_CMD_DATA;
0945 
0946         if (mrq->data->flags & MMC_DATA_READ)
0947             opc |= USDHI6_SD_CMD_READ;
0948 
0949         if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
0950             cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
0951             (cmd->opcode == SD_IO_RW_EXTENDED &&
0952              mrq->data->blocks > 1)) {
0953             opc |= USDHI6_SD_CMD_MULTI;
0954             if (!mrq->stop)
0955                 opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
0956         }
0957 
0958         switch (mmc_resp_type(cmd)) {
0959         case MMC_RSP_NONE:
0960             opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
0961             break;
0962         case MMC_RSP_R1:
0963             opc |= USDHI6_SD_CMD_MODE_RSP_R1;
0964             break;
0965         case MMC_RSP_R1B:
0966             opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
0967             break;
0968         case MMC_RSP_R2:
0969             opc |= USDHI6_SD_CMD_MODE_RSP_R2;
0970             break;
0971         case MMC_RSP_R3:
0972             opc |= USDHI6_SD_CMD_MODE_RSP_R3;
0973             break;
0974         default:
0975             dev_warn(mmc_dev(host->mmc),
0976                  "Unknown response type %d\n",
0977                  mmc_resp_type(cmd));
0978             return -EINVAL;
0979         }
0980     }
0981 
0982     return opc;
0983 }
0984 
0985 static int usdhi6_rq_start(struct usdhi6_host *host)
0986 {
0987     struct mmc_request *mrq = host->mrq;
0988     struct mmc_command *cmd = mrq->cmd;
0989     struct mmc_data *data = mrq->data;
0990     int opc = usdhi6_cmd_flags(host);
0991     int i;
0992 
0993     if (opc < 0)
0994         return opc;
0995 
0996     for (i = 1000; i; i--) {
0997         if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
0998             break;
0999         usleep_range(10, 100);
1000     }
1001 
1002     if (!i) {
1003         dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
1004         return -EAGAIN;
1005     }
1006 
1007     if (data) {
1008         bool use_dma;
1009         int ret = 0;
1010 
1011         host->page_idx = 0;
1012 
1013         if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1014             switch (data->blksz) {
1015             case 512:
1016                 break;
1017             case 32:
1018             case 64:
1019             case 128:
1020             case 256:
1021                 if (mrq->stop)
1022                     ret = -EINVAL;
1023                 break;
1024             default:
1025                 ret = -EINVAL;
1026             }
1027         } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1028                 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
1029                data->blksz != 512) {
1030             ret = -EINVAL;
1031         }
1032 
1033         if (ret < 0) {
1034             dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1035                  __func__, data->blocks, data->blksz);
1036             return -EINVAL;
1037         }
1038 
1039         if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1040             cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1041             (cmd->opcode == SD_IO_RW_EXTENDED &&
1042              data->blocks > 1))
1043             usdhi6_sg_prep(host);
1044 
1045         usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1046 
1047         if ((data->blksz >= USDHI6_MIN_DMA ||
1048              data->blocks > 1) &&
1049             (data->blksz % 4 ||
1050              data->sg->offset % 4))
1051             dev_dbg(mmc_dev(host->mmc),
1052                 "Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1053                 data->blksz, data->blocks, data->sg->offset);
1054 
1055         /* Enable DMA for USDHI6_MIN_DMA bytes or more */
1056         use_dma = data->blksz >= USDHI6_MIN_DMA &&
1057             !(data->blksz % 4) &&
1058             usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1059 
1060         if (use_dma)
1061             usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1062 
1063         dev_dbg(mmc_dev(host->mmc),
1064             "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
1065             __func__, cmd->opcode, data->blocks, data->blksz,
1066             data->sg_len, use_dma ? "DMA" : "PIO",
1067             data->flags & MMC_DATA_READ ? "read" : "write",
1068             data->sg->offset, mrq->stop ? " + stop" : "");
1069     } else {
1070         dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1071             __func__, cmd->opcode);
1072     }
1073 
1074     /* We have to get a command completion interrupt with DMA too */
1075     usdhi6_wait_for_resp(host);
1076 
1077     host->wait = USDHI6_WAIT_FOR_CMD;
1078     schedule_delayed_work(&host->timeout_work, host->timeout);
1079 
1080     /* SEC bit is required to enable block counting by the core */
1081     usdhi6_write(host, USDHI6_SD_STOP,
1082              data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1083     usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1084 
1085     /* Kick command execution */
1086     usdhi6_write(host, USDHI6_SD_CMD, opc);
1087 
1088     return 0;
1089 }
1090 
1091 static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
1092 {
1093     struct usdhi6_host *host = mmc_priv(mmc);
1094     int ret;
1095 
1096     cancel_delayed_work_sync(&host->timeout_work);
1097 
1098     host->mrq = mrq;
1099     host->sg = NULL;
1100 
1101     usdhi6_timeout_set(host);
1102     ret = usdhi6_rq_start(host);
1103     if (ret < 0) {
1104         mrq->cmd->error = ret;
1105         usdhi6_request_done(host);
1106     }
1107 }
1108 
1109 static int usdhi6_get_cd(struct mmc_host *mmc)
1110 {
1111     struct usdhi6_host *host = mmc_priv(mmc);
1112     /* Read is atomic, no need to lock */
1113     u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1114 
1115 /*
1116  *  level   status.CD   CD_ACTIVE_HIGH  card present
1117  *  1   0       0       0
1118  *  1   0       1       1
1119  *  0   1       0       1
1120  *  0   1       1       0
1121  */
1122     return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
1123 }
1124 
1125 static int usdhi6_get_ro(struct mmc_host *mmc)
1126 {
1127     struct usdhi6_host *host = mmc_priv(mmc);
1128     /* No locking as above */
1129     u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1130 
1131 /*
1132  *  level   status.WP   RO_ACTIVE_HIGH  card read-only
1133  *  1   0       0       0
1134  *  1   0       1       1
1135  *  0   1       0       1
1136  *  0   1       1       0
1137  */
1138     return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
1139 }
1140 
1141 static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
1142 {
1143     struct usdhi6_host *host = mmc_priv(mmc);
1144 
1145     dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
1146 
1147     if (enable) {
1148         host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1149         usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1150         usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1151     } else {
1152         usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1153         usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1154         host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1155     }
1156 }
1157 
1158 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1159 {
1160     if (IS_ERR(host->pins_uhs))
1161         return 0;
1162 
1163     switch (voltage) {
1164     case MMC_SIGNAL_VOLTAGE_180:
1165     case MMC_SIGNAL_VOLTAGE_120:
1166         return pinctrl_select_state(host->pinctrl,
1167                         host->pins_uhs);
1168 
1169     default:
1170         return pinctrl_select_default_state(mmc_dev(host->mmc));
1171     }
1172 }
1173 
1174 static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1175 {
1176     int ret;
1177 
1178     ret = mmc_regulator_set_vqmmc(mmc, ios);
1179     if (ret < 0)
1180         return ret;
1181 
1182     ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
1183     if (ret)
1184         dev_warn_once(mmc_dev(mmc),
1185                   "Failed to set pinstate err=%d\n", ret);
1186     return ret;
1187 }
1188 
1189 static int usdhi6_card_busy(struct mmc_host *mmc)
1190 {
1191     struct usdhi6_host *host = mmc_priv(mmc);
1192     u32 tmp = usdhi6_read(host, USDHI6_SD_INFO2);
1193 
1194     /* Card is busy if it is pulling dat[0] low */
1195     return !(tmp & USDHI6_SD_INFO2_SDDAT0);
1196 }
1197 
1198 static const struct mmc_host_ops usdhi6_ops = {
1199     .request    = usdhi6_request,
1200     .set_ios    = usdhi6_set_ios,
1201     .get_cd     = usdhi6_get_cd,
1202     .get_ro     = usdhi6_get_ro,
1203     .enable_sdio_irq = usdhi6_enable_sdio_irq,
1204     .start_signal_voltage_switch = usdhi6_sig_volt_switch,
1205     .card_busy = usdhi6_card_busy,
1206 };
1207 
1208 /*          State machine handlers              */
1209 
1210 static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1211 {
1212     struct mmc_command *cmd = host->mrq->stop;
1213     cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1214 }
1215 
1216 static void usdhi6_resp_read(struct usdhi6_host *host)
1217 {
1218     struct mmc_command *cmd = host->mrq->cmd;
1219     u32 *rsp = cmd->resp, tmp = 0;
1220     int i;
1221 
1222 /*
1223  * RSP10    39-8
1224  * RSP32    71-40
1225  * RSP54    103-72
1226  * RSP76    127-104
1227  * R2-type response:
1228  * resp[0]  = r[127..96]
1229  * resp[1]  = r[95..64]
1230  * resp[2]  = r[63..32]
1231  * resp[3]  = r[31..0]
1232  * Other responses:
1233  * resp[0]  = r[39..8]
1234  */
1235 
1236     if (mmc_resp_type(cmd) == MMC_RSP_NONE)
1237         return;
1238 
1239     if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1240         dev_err(mmc_dev(host->mmc),
1241             "CMD%d: response expected but is missing!\n", cmd->opcode);
1242         return;
1243     }
1244 
1245     if (mmc_resp_type(cmd) & MMC_RSP_136)
1246         for (i = 0; i < 4; i++) {
1247             if (i)
1248                 rsp[3 - i] = tmp >> 24;
1249             tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1250             rsp[3 - i] |= tmp << 8;
1251         }
1252     else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1253          cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1254         /* Read RSP54 to avoid conflict with auto CMD12 */
1255         rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1256     else
1257         rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1258 
1259     dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1260 }
1261 
1262 static int usdhi6_blk_read(struct usdhi6_host *host)
1263 {
1264     struct mmc_data *data = host->mrq->data;
1265     u32 *p;
1266     int i, rest;
1267 
1268     if (host->io_error) {
1269         data->error = usdhi6_error_code(host);
1270         goto error;
1271     }
1272 
1273     if (host->pg.page) {
1274         p = host->blk_page + host->offset;
1275     } else {
1276         p = usdhi6_sg_map(host);
1277         if (!p) {
1278             data->error = -ENOMEM;
1279             goto error;
1280         }
1281     }
1282 
1283     for (i = 0; i < data->blksz / 4; i++, p++)
1284         *p = usdhi6_read(host, USDHI6_SD_BUF0);
1285 
1286     rest = data->blksz % 4;
1287     for (i = 0; i < (rest + 1) / 2; i++) {
1288         u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1289         ((u8 *)p)[2 * i] = ((u8 *)&d)[0];
1290         if (rest > 1 && !i)
1291             ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
1292     }
1293 
1294     return 0;
1295 
1296 error:
1297     dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1298     host->wait = USDHI6_WAIT_FOR_REQUEST;
1299     return data->error;
1300 }
1301 
1302 static int usdhi6_blk_write(struct usdhi6_host *host)
1303 {
1304     struct mmc_data *data = host->mrq->data;
1305     u32 *p;
1306     int i, rest;
1307 
1308     if (host->io_error) {
1309         data->error = usdhi6_error_code(host);
1310         goto error;
1311     }
1312 
1313     if (host->pg.page) {
1314         p = host->blk_page + host->offset;
1315     } else {
1316         p = usdhi6_sg_map(host);
1317         if (!p) {
1318             data->error = -ENOMEM;
1319             goto error;
1320         }
1321     }
1322 
1323     for (i = 0; i < data->blksz / 4; i++, p++)
1324         usdhi6_write(host, USDHI6_SD_BUF0, *p);
1325 
1326     rest = data->blksz % 4;
1327     for (i = 0; i < (rest + 1) / 2; i++) {
1328         u16 d;
1329         ((u8 *)&d)[0] = ((u8 *)p)[2 * i];
1330         if (rest > 1 && !i)
1331             ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
1332         else
1333             ((u8 *)&d)[1] = 0;
1334         usdhi6_write16(host, USDHI6_SD_BUF0, d);
1335     }
1336 
1337     return 0;
1338 
1339 error:
1340     dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1341     host->wait = USDHI6_WAIT_FOR_REQUEST;
1342     return data->error;
1343 }
1344 
1345 static int usdhi6_stop_cmd(struct usdhi6_host *host)
1346 {
1347     struct mmc_request *mrq = host->mrq;
1348 
1349     switch (mrq->cmd->opcode) {
1350     case MMC_READ_MULTIPLE_BLOCK:
1351     case MMC_WRITE_MULTIPLE_BLOCK:
1352         if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
1353             host->wait = USDHI6_WAIT_FOR_STOP;
1354             return 0;
1355         }
1356         fallthrough;    /* Unsupported STOP command */
1357     default:
1358         dev_err(mmc_dev(host->mmc),
1359             "unsupported stop CMD%d for CMD%d\n",
1360             mrq->stop->opcode, mrq->cmd->opcode);
1361         mrq->stop->error = -EOPNOTSUPP;
1362     }
1363 
1364     return -EOPNOTSUPP;
1365 }
1366 
1367 static bool usdhi6_end_cmd(struct usdhi6_host *host)
1368 {
1369     struct mmc_request *mrq = host->mrq;
1370     struct mmc_command *cmd = mrq->cmd;
1371 
1372     if (host->io_error) {
1373         cmd->error = usdhi6_error_code(host);
1374         return false;
1375     }
1376 
1377     usdhi6_resp_read(host);
1378 
1379     if (!mrq->data)
1380         return false;
1381 
1382     if (host->dma_active) {
1383         usdhi6_dma_kick(host);
1384         if (!mrq->stop)
1385             host->wait = USDHI6_WAIT_FOR_DMA;
1386         else if (usdhi6_stop_cmd(host) < 0)
1387             return false;
1388     } else if (mrq->data->flags & MMC_DATA_READ) {
1389         if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1390             (cmd->opcode == SD_IO_RW_EXTENDED &&
1391              mrq->data->blocks > 1))
1392             host->wait = USDHI6_WAIT_FOR_MREAD;
1393         else
1394             host->wait = USDHI6_WAIT_FOR_READ;
1395     } else {
1396         if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1397             (cmd->opcode == SD_IO_RW_EXTENDED &&
1398              mrq->data->blocks > 1))
1399             host->wait = USDHI6_WAIT_FOR_MWRITE;
1400         else
1401             host->wait = USDHI6_WAIT_FOR_WRITE;
1402     }
1403 
1404     return true;
1405 }
1406 
1407 static bool usdhi6_read_block(struct usdhi6_host *host)
1408 {
1409     /* ACCESS_END IRQ is already unmasked */
1410     int ret = usdhi6_blk_read(host);
1411 
1412     /*
1413      * Have to force unmapping both pages: the single block could have been
1414      * cross-page, in which case for single-block IO host->page_idx == 0.
1415      * So, if we don't force, the second page won't be unmapped.
1416      */
1417     usdhi6_sg_unmap(host, true);
1418 
1419     if (ret < 0)
1420         return false;
1421 
1422     host->wait = USDHI6_WAIT_FOR_DATA_END;
1423     return true;
1424 }
1425 
1426 static bool usdhi6_mread_block(struct usdhi6_host *host)
1427 {
1428     int ret = usdhi6_blk_read(host);
1429 
1430     if (ret < 0)
1431         return false;
1432 
1433     usdhi6_sg_advance(host);
1434 
1435     return !host->mrq->data->error &&
1436         (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1437 }
1438 
1439 static bool usdhi6_write_block(struct usdhi6_host *host)
1440 {
1441     int ret = usdhi6_blk_write(host);
1442 
1443     /* See comment in usdhi6_read_block() */
1444     usdhi6_sg_unmap(host, true);
1445 
1446     if (ret < 0)
1447         return false;
1448 
1449     host->wait = USDHI6_WAIT_FOR_DATA_END;
1450     return true;
1451 }
1452 
1453 static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1454 {
1455     int ret = usdhi6_blk_write(host);
1456 
1457     if (ret < 0)
1458         return false;
1459 
1460     usdhi6_sg_advance(host);
1461 
1462     return !host->mrq->data->error &&
1463         (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1464 }
1465 
1466 /*          Interrupt & timeout handlers            */
1467 
1468 static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
1469 {
1470     struct usdhi6_host *host = dev_id;
1471     struct mmc_request *mrq;
1472     struct mmc_command *cmd;
1473     struct mmc_data *data;
1474     bool io_wait = false;
1475 
1476     cancel_delayed_work_sync(&host->timeout_work);
1477 
1478     mrq = host->mrq;
1479     if (!mrq)
1480         return IRQ_HANDLED;
1481 
1482     cmd = mrq->cmd;
1483     data = mrq->data;
1484 
1485     switch (host->wait) {
1486     case USDHI6_WAIT_FOR_REQUEST:
1487         /* We're too late, the timeout has already kicked in */
1488         return IRQ_HANDLED;
1489     case USDHI6_WAIT_FOR_CMD:
1490         /* Wait for data? */
1491         io_wait = usdhi6_end_cmd(host);
1492         break;
1493     case USDHI6_WAIT_FOR_MREAD:
1494         /* Wait for more data? */
1495         io_wait = usdhi6_mread_block(host);
1496         break;
1497     case USDHI6_WAIT_FOR_READ:
1498         /* Wait for data end? */
1499         io_wait = usdhi6_read_block(host);
1500         break;
1501     case USDHI6_WAIT_FOR_MWRITE:
1502         /* Wait data to write? */
1503         io_wait = usdhi6_mwrite_block(host);
1504         break;
1505     case USDHI6_WAIT_FOR_WRITE:
1506         /* Wait for data end? */
1507         io_wait = usdhi6_write_block(host);
1508         break;
1509     case USDHI6_WAIT_FOR_DMA:
1510         usdhi6_dma_check_error(host);
1511         break;
1512     case USDHI6_WAIT_FOR_STOP:
1513         usdhi6_write(host, USDHI6_SD_STOP, 0);
1514         if (host->io_error) {
1515             int ret = usdhi6_error_code(host);
1516             if (mrq->stop)
1517                 mrq->stop->error = ret;
1518             else
1519                 mrq->data->error = ret;
1520             dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1521             break;
1522         }
1523         usdhi6_resp_cmd12(host);
1524         mrq->stop->error = 0;
1525         break;
1526     case USDHI6_WAIT_FOR_DATA_END:
1527         if (host->io_error) {
1528             mrq->data->error = usdhi6_error_code(host);
1529             dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1530                  mrq->data->error);
1531         }
1532         break;
1533     default:
1534         cmd->error = -EFAULT;
1535         dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1536         usdhi6_request_done(host);
1537         return IRQ_HANDLED;
1538     }
1539 
1540     if (io_wait) {
1541         schedule_delayed_work(&host->timeout_work, host->timeout);
1542         /* Wait for more data or ACCESS_END */
1543         if (!host->dma_active)
1544             usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1545         return IRQ_HANDLED;
1546     }
1547 
1548     if (!cmd->error) {
1549         if (data) {
1550             if (!data->error) {
1551                 if (host->wait != USDHI6_WAIT_FOR_STOP &&
1552                     host->mrq->stop &&
1553                     !host->mrq->stop->error &&
1554                     !usdhi6_stop_cmd(host)) {
1555                     /* Sending STOP */
1556                     usdhi6_wait_for_resp(host);
1557 
1558                     schedule_delayed_work(&host->timeout_work,
1559                                   host->timeout);
1560 
1561                     return IRQ_HANDLED;
1562                 }
1563 
1564                 data->bytes_xfered = data->blocks * data->blksz;
1565             } else {
1566                 /* Data error: might need to unmap the last page */
1567                 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1568                      __func__, data->error);
1569                 usdhi6_sg_unmap(host, true);
1570             }
1571         } else if (cmd->opcode == MMC_APP_CMD) {
1572             host->app_cmd = true;
1573         }
1574     }
1575 
1576     usdhi6_request_done(host);
1577 
1578     return IRQ_HANDLED;
1579 }
1580 
1581 static irqreturn_t usdhi6_sd(int irq, void *dev_id)
1582 {
1583     struct usdhi6_host *host = dev_id;
1584     u16 status, status2, error;
1585 
1586     status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1587         ~USDHI6_SD_INFO1_CARD;
1588     status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1589 
1590     usdhi6_only_cd(host);
1591 
1592     dev_dbg(mmc_dev(host->mmc),
1593         "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
1594 
1595     if (!status && !status2)
1596         return IRQ_NONE;
1597 
1598     error = status2 & USDHI6_SD_INFO2_ERR;
1599 
1600     /* Ack / clear interrupts */
1601     if (USDHI6_SD_INFO1_IRQ & status)
1602         usdhi6_write(host, USDHI6_SD_INFO1,
1603                  0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
1604 
1605     if (USDHI6_SD_INFO2_IRQ & status2) {
1606         if (error)
1607             /* In error cases BWE and BRE aren't cleared automatically */
1608             status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
1609 
1610         usdhi6_write(host, USDHI6_SD_INFO2,
1611                  0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
1612     }
1613 
1614     host->io_error = error;
1615     host->irq_status = status;
1616 
1617     if (error) {
1618         /* Don't pollute the log with unsupported command timeouts */
1619         if (host->wait != USDHI6_WAIT_FOR_CMD ||
1620             error != USDHI6_SD_INFO2_RSP_TOUT)
1621             dev_warn(mmc_dev(host->mmc),
1622                  "%s(): INFO2 error bits 0x%08x\n",
1623                  __func__, error);
1624         else
1625             dev_dbg(mmc_dev(host->mmc),
1626                 "%s(): INFO2 error bits 0x%08x\n",
1627                 __func__, error);
1628     }
1629 
1630     return IRQ_WAKE_THREAD;
1631 }
1632 
1633 static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
1634 {
1635     struct usdhi6_host *host = dev_id;
1636     u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1637 
1638     dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1639 
1640     if (!status)
1641         return IRQ_NONE;
1642 
1643     usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1644 
1645     mmc_signal_sdio_irq(host->mmc);
1646 
1647     return IRQ_HANDLED;
1648 }
1649 
1650 static irqreturn_t usdhi6_cd(int irq, void *dev_id)
1651 {
1652     struct usdhi6_host *host = dev_id;
1653     struct mmc_host *mmc = host->mmc;
1654     u16 status;
1655 
1656     /* We're only interested in hotplug events here */
1657     status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1658         USDHI6_SD_INFO1_CARD;
1659 
1660     if (!status)
1661         return IRQ_NONE;
1662 
1663     /* Ack */
1664     usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1665 
1666     if (!work_pending(&mmc->detect.work) &&
1667         (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
1668           !mmc->card) ||
1669          ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
1670           mmc->card)))
1671         mmc_detect_change(mmc, msecs_to_jiffies(100));
1672 
1673     return IRQ_HANDLED;
1674 }
1675 
1676 /*
1677  * Actually this should not be needed, if the built-in timeout works reliably in
1678  * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
1679  * handler might be the only way to catch the error.
1680  */
1681 static void usdhi6_timeout_work(struct work_struct *work)
1682 {
1683     struct delayed_work *d = to_delayed_work(work);
1684     struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1685     struct mmc_request *mrq = host->mrq;
1686     struct mmc_data *data = mrq ? mrq->data : NULL;
1687     struct scatterlist *sg;
1688 
1689     dev_warn(mmc_dev(host->mmc),
1690          "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
1691          host->dma_active ? "DMA" : "PIO",
1692          host->wait, mrq ? mrq->cmd->opcode : -1,
1693          usdhi6_read(host, USDHI6_SD_INFO1),
1694          usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1695 
1696     if (host->dma_active) {
1697         usdhi6_dma_kill(host);
1698         usdhi6_dma_stop_unmap(host);
1699     }
1700 
1701     switch (host->wait) {
1702     default:
1703         dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1704         fallthrough;    /* mrq can be NULL, but is impossible */
1705     case USDHI6_WAIT_FOR_CMD:
1706         usdhi6_error_code(host);
1707         if (mrq)
1708             mrq->cmd->error = -ETIMEDOUT;
1709         break;
1710     case USDHI6_WAIT_FOR_STOP:
1711         usdhi6_error_code(host);
1712         mrq->stop->error = -ETIMEDOUT;
1713         break;
1714     case USDHI6_WAIT_FOR_DMA:
1715     case USDHI6_WAIT_FOR_MREAD:
1716     case USDHI6_WAIT_FOR_MWRITE:
1717     case USDHI6_WAIT_FOR_READ:
1718     case USDHI6_WAIT_FOR_WRITE:
1719         sg = host->sg ?: data->sg;
1720         dev_dbg(mmc_dev(host->mmc),
1721             "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
1722             data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1723             host->offset, data->blocks, data->blksz, data->sg_len,
1724             sg_dma_len(sg), sg->offset);
1725         usdhi6_sg_unmap(host, true);
1726         fallthrough;    /* page unmapped in USDHI6_WAIT_FOR_DATA_END */
1727     case USDHI6_WAIT_FOR_DATA_END:
1728         usdhi6_error_code(host);
1729         data->error = -ETIMEDOUT;
1730     }
1731 
1732     if (mrq)
1733         usdhi6_request_done(host);
1734 }
1735 
1736 /*           Probe / release                */
1737 
1738 static const struct of_device_id usdhi6_of_match[] = {
1739     {.compatible = "renesas,usdhi6rol0"},
1740     {}
1741 };
1742 MODULE_DEVICE_TABLE(of, usdhi6_of_match);
1743 
1744 static int usdhi6_probe(struct platform_device *pdev)
1745 {
1746     struct device *dev = &pdev->dev;
1747     struct mmc_host *mmc;
1748     struct usdhi6_host *host;
1749     struct resource *res;
1750     int irq_cd, irq_sd, irq_sdio;
1751     u32 version;
1752     int ret;
1753 
1754     if (!dev->of_node)
1755         return -ENODEV;
1756 
1757     irq_cd = platform_get_irq_byname(pdev, "card detect");
1758     irq_sd = platform_get_irq_byname(pdev, "data");
1759     irq_sdio = platform_get_irq_byname(pdev, "SDIO");
1760     if (irq_sd < 0 || irq_sdio < 0)
1761         return -ENODEV;
1762 
1763     mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
1764     if (!mmc)
1765         return -ENOMEM;
1766 
1767     ret = mmc_regulator_get_supply(mmc);
1768     if (ret)
1769         goto e_free_mmc;
1770 
1771     ret = mmc_of_parse(mmc);
1772     if (ret < 0)
1773         goto e_free_mmc;
1774 
1775     host        = mmc_priv(mmc);
1776     host->mmc   = mmc;
1777     host->wait  = USDHI6_WAIT_FOR_REQUEST;
1778     host->timeout   = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
1779     /*
1780      * We use a fixed timeout of 4s, hence inform the core about it. A
1781      * future improvement should instead respect the cmd->busy_timeout.
1782      */
1783     mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
1784 
1785     host->pinctrl = devm_pinctrl_get(&pdev->dev);
1786     if (IS_ERR(host->pinctrl)) {
1787         ret = PTR_ERR(host->pinctrl);
1788         goto e_free_mmc;
1789     }
1790 
1791     host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1792 
1793     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1794     host->base = devm_ioremap_resource(dev, res);
1795     if (IS_ERR(host->base)) {
1796         ret = PTR_ERR(host->base);
1797         goto e_free_mmc;
1798     }
1799 
1800     host->clk = devm_clk_get(dev, NULL);
1801     if (IS_ERR(host->clk)) {
1802         ret = PTR_ERR(host->clk);
1803         goto e_free_mmc;
1804     }
1805 
1806     host->imclk = clk_get_rate(host->clk);
1807 
1808     ret = clk_prepare_enable(host->clk);
1809     if (ret < 0)
1810         goto e_free_mmc;
1811 
1812     version = usdhi6_read(host, USDHI6_VERSION);
1813     if ((version & 0xfff) != 0xa0d) {
1814         ret = -EPERM;
1815         dev_err(dev, "Version not recognized %x\n", version);
1816         goto e_clk_off;
1817     }
1818 
1819     dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1820          usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1821 
1822     usdhi6_mask_all(host);
1823 
1824     if (irq_cd >= 0) {
1825         ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
1826                        dev_name(dev), host);
1827         if (ret < 0)
1828             goto e_clk_off;
1829     } else {
1830         mmc->caps |= MMC_CAP_NEEDS_POLL;
1831     }
1832 
1833     ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
1834                    dev_name(dev), host);
1835     if (ret < 0)
1836         goto e_clk_off;
1837 
1838     ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
1839                    dev_name(dev), host);
1840     if (ret < 0)
1841         goto e_clk_off;
1842 
1843     INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1844 
1845     usdhi6_dma_request(host, res->start);
1846 
1847     mmc->ops = &usdhi6_ops;
1848     mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1849              MMC_CAP_SDIO_IRQ;
1850     /* Set .max_segs to some random number. Feel free to adjust. */
1851     mmc->max_segs = 32;
1852     mmc->max_blk_size = 512;
1853     mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1854     mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1855     /*
1856      * Setting .max_seg_size to 1 page would simplify our page-mapping code,
1857      * But OTOH, having large segments makes DMA more efficient. We could
1858      * check, whether we managed to get DMA and fall back to 1 page
1859      * segments, but if we do manage to obtain DMA and then it fails at
1860      * run-time and we fall back to PIO, we will continue getting large
1861      * segments. So, we wouldn't be able to get rid of the code anyway.
1862      */
1863     mmc->max_seg_size = mmc->max_req_size;
1864     if (!mmc->f_max)
1865         mmc->f_max = host->imclk;
1866     mmc->f_min = host->imclk / 512;
1867 
1868     platform_set_drvdata(pdev, host);
1869 
1870     ret = mmc_add_host(mmc);
1871     if (ret < 0)
1872         goto e_release_dma;
1873 
1874     return 0;
1875 
1876 e_release_dma:
1877     usdhi6_dma_release(host);
1878 e_clk_off:
1879     clk_disable_unprepare(host->clk);
1880 e_free_mmc:
1881     mmc_free_host(mmc);
1882 
1883     return ret;
1884 }
1885 
1886 static int usdhi6_remove(struct platform_device *pdev)
1887 {
1888     struct usdhi6_host *host = platform_get_drvdata(pdev);
1889 
1890     mmc_remove_host(host->mmc);
1891 
1892     usdhi6_mask_all(host);
1893     cancel_delayed_work_sync(&host->timeout_work);
1894     usdhi6_dma_release(host);
1895     clk_disable_unprepare(host->clk);
1896     mmc_free_host(host->mmc);
1897 
1898     return 0;
1899 }
1900 
1901 static struct platform_driver usdhi6_driver = {
1902     .probe      = usdhi6_probe,
1903     .remove     = usdhi6_remove,
1904     .driver     = {
1905         .name   = "usdhi6rol0",
1906         .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1907         .of_match_table = usdhi6_of_match,
1908     },
1909 };
1910 
1911 module_platform_driver(usdhi6_driver);
1912 
1913 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
1914 MODULE_LICENSE("GPL v2");
1915 MODULE_ALIAS("platform:usdhi6rol0");
1916 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");