Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Actions Semi Owl SoCs SD/MMC driver
0004  *
0005  * Copyright (c) 2014 Actions Semi Inc.
0006  * Copyright (c) 2019 Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
0007  *
0008  * TODO: SDIO support
0009  */
0010 
0011 #include <linux/clk.h>
0012 #include <linux/delay.h>
0013 #include <linux/dmaengine.h>
0014 #include <linux/dma-direction.h>
0015 #include <linux/dma-mapping.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/mmc/host.h>
0018 #include <linux/mmc/slot-gpio.h>
0019 #include <linux/module.h>
0020 #include <linux/of_platform.h>
0021 #include <linux/reset.h>
0022 #include <linux/spinlock.h>
0023 
0024 /*
0025  * SDC registers
0026  */
0027 #define OWL_REG_SD_EN           0x0000
0028 #define OWL_REG_SD_CTL          0x0004
0029 #define OWL_REG_SD_STATE        0x0008
0030 #define OWL_REG_SD_CMD          0x000c
0031 #define OWL_REG_SD_ARG          0x0010
0032 #define OWL_REG_SD_RSPBUF0      0x0014
0033 #define OWL_REG_SD_RSPBUF1      0x0018
0034 #define OWL_REG_SD_RSPBUF2      0x001c
0035 #define OWL_REG_SD_RSPBUF3      0x0020
0036 #define OWL_REG_SD_RSPBUF4      0x0024
0037 #define OWL_REG_SD_DAT          0x0028
0038 #define OWL_REG_SD_BLK_SIZE     0x002c
0039 #define OWL_REG_SD_BLK_NUM      0x0030
0040 #define OWL_REG_SD_BUF_SIZE     0x0034
0041 
0042 /* SD_EN Bits */
0043 #define OWL_SD_EN_RANE          BIT(31)
0044 #define OWL_SD_EN_RAN_SEED(x)       (((x) & 0x3f) << 24)
0045 #define OWL_SD_EN_S18EN         BIT(12)
0046 #define OWL_SD_EN_RESE          BIT(10)
0047 #define OWL_SD_EN_DAT1_S        BIT(9)
0048 #define OWL_SD_EN_CLK_S         BIT(8)
0049 #define OWL_SD_ENABLE           BIT(7)
0050 #define OWL_SD_EN_BSEL          BIT(6)
0051 #define OWL_SD_EN_SDIOEN        BIT(3)
0052 #define OWL_SD_EN_DDREN         BIT(2)
0053 #define OWL_SD_EN_DATAWID(x)        (((x) & 0x3) << 0)
0054 
0055 /* SD_CTL Bits */
0056 #define OWL_SD_CTL_TOUTEN       BIT(31)
0057 #define OWL_SD_CTL_TOUTCNT(x)       (((x) & 0x7f) << 24)
0058 #define OWL_SD_CTL_DELAY_MSK        GENMASK(23, 16)
0059 #define OWL_SD_CTL_RDELAY(x)        (((x) & 0xf) << 20)
0060 #define OWL_SD_CTL_WDELAY(x)        (((x) & 0xf) << 16)
0061 #define OWL_SD_CTL_CMDLEN       BIT(13)
0062 #define OWL_SD_CTL_SCC          BIT(12)
0063 #define OWL_SD_CTL_TCN(x)       (((x) & 0xf) << 8)
0064 #define OWL_SD_CTL_TS           BIT(7)
0065 #define OWL_SD_CTL_LBE          BIT(6)
0066 #define OWL_SD_CTL_C7EN         BIT(5)
0067 #define OWL_SD_CTL_TM(x)        (((x) & 0xf) << 0)
0068 
0069 #define OWL_SD_DELAY_LOW_CLK        0x0f
0070 #define OWL_SD_DELAY_MID_CLK        0x0a
0071 #define OWL_SD_DELAY_HIGH_CLK       0x09
0072 #define OWL_SD_RDELAY_DDR50     0x0a
0073 #define OWL_SD_WDELAY_DDR50     0x08
0074 
0075 /* SD_STATE Bits */
0076 #define OWL_SD_STATE_DAT1BS     BIT(18)
0077 #define OWL_SD_STATE_SDIOB_P        BIT(17)
0078 #define OWL_SD_STATE_SDIOB_EN       BIT(16)
0079 #define OWL_SD_STATE_TOUTE      BIT(15)
0080 #define OWL_SD_STATE_BAEP       BIT(14)
0081 #define OWL_SD_STATE_MEMRDY     BIT(12)
0082 #define OWL_SD_STATE_CMDS       BIT(11)
0083 #define OWL_SD_STATE_DAT1AS     BIT(10)
0084 #define OWL_SD_STATE_SDIOA_P        BIT(9)
0085 #define OWL_SD_STATE_SDIOA_EN       BIT(8)
0086 #define OWL_SD_STATE_DAT0S      BIT(7)
0087 #define OWL_SD_STATE_TEIE       BIT(6)
0088 #define OWL_SD_STATE_TEI        BIT(5)
0089 #define OWL_SD_STATE_CLNR       BIT(4)
0090 #define OWL_SD_STATE_CLC        BIT(3)
0091 #define OWL_SD_STATE_WC16ER     BIT(2)
0092 #define OWL_SD_STATE_RC16ER     BIT(1)
0093 #define OWL_SD_STATE_CRC7ER     BIT(0)
0094 
0095 #define OWL_CMD_TIMEOUT_MS      30000
0096 
0097 struct owl_mmc_host {
0098     struct device *dev;
0099     struct reset_control *reset;
0100     void __iomem *base;
0101     struct clk *clk;
0102     struct completion sdc_complete;
0103     spinlock_t lock;
0104     int irq;
0105     u32 clock;
0106     bool ddr_50;
0107 
0108     enum dma_data_direction dma_dir;
0109     struct dma_chan *dma;
0110     struct dma_async_tx_descriptor *desc;
0111     struct dma_slave_config dma_cfg;
0112     struct completion dma_complete;
0113 
0114     struct mmc_host *mmc;
0115     struct mmc_request *mrq;
0116     struct mmc_command *cmd;
0117     struct mmc_data *data;
0118 };
0119 
0120 static void owl_mmc_update_reg(void __iomem *reg, unsigned int val, bool state)
0121 {
0122     unsigned int regval;
0123 
0124     regval = readl(reg);
0125 
0126     if (state)
0127         regval |= val;
0128     else
0129         regval &= ~val;
0130 
0131     writel(regval, reg);
0132 }
0133 
0134 static irqreturn_t owl_irq_handler(int irq, void *devid)
0135 {
0136     struct owl_mmc_host *owl_host = devid;
0137     u32 state;
0138 
0139     spin_lock(&owl_host->lock);
0140 
0141     state = readl(owl_host->base + OWL_REG_SD_STATE);
0142     if (state & OWL_SD_STATE_TEI) {
0143         state = readl(owl_host->base + OWL_REG_SD_STATE);
0144         state |= OWL_SD_STATE_TEI;
0145         writel(state, owl_host->base + OWL_REG_SD_STATE);
0146         complete(&owl_host->sdc_complete);
0147     }
0148 
0149     spin_unlock(&owl_host->lock);
0150 
0151     return IRQ_HANDLED;
0152 }
0153 
0154 static void owl_mmc_finish_request(struct owl_mmc_host *owl_host)
0155 {
0156     struct mmc_request *mrq = owl_host->mrq;
0157     struct mmc_data *data = mrq->data;
0158 
0159     /* Should never be NULL */
0160     WARN_ON(!mrq);
0161 
0162     owl_host->mrq = NULL;
0163 
0164     if (data)
0165         dma_unmap_sg(owl_host->dma->device->dev, data->sg, data->sg_len,
0166                  owl_host->dma_dir);
0167 
0168     /* Finally finish request */
0169     mmc_request_done(owl_host->mmc, mrq);
0170 }
0171 
0172 static void owl_mmc_send_cmd(struct owl_mmc_host *owl_host,
0173                  struct mmc_command *cmd,
0174                  struct mmc_data *data)
0175 {
0176     unsigned long timeout;
0177     u32 mode, state, resp[2];
0178     u32 cmd_rsp_mask = 0;
0179 
0180     init_completion(&owl_host->sdc_complete);
0181 
0182     switch (mmc_resp_type(cmd)) {
0183     case MMC_RSP_NONE:
0184         mode = OWL_SD_CTL_TM(0);
0185         break;
0186 
0187     case MMC_RSP_R1:
0188         if (data) {
0189             if (data->flags & MMC_DATA_READ)
0190                 mode = OWL_SD_CTL_TM(4);
0191             else
0192                 mode = OWL_SD_CTL_TM(5);
0193         } else {
0194             mode = OWL_SD_CTL_TM(1);
0195         }
0196         cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
0197 
0198         break;
0199 
0200     case MMC_RSP_R1B:
0201         mode = OWL_SD_CTL_TM(3);
0202         cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
0203         break;
0204 
0205     case MMC_RSP_R2:
0206         mode = OWL_SD_CTL_TM(2);
0207         cmd_rsp_mask = OWL_SD_STATE_CLNR | OWL_SD_STATE_CRC7ER;
0208         break;
0209 
0210     case MMC_RSP_R3:
0211         mode = OWL_SD_CTL_TM(1);
0212         cmd_rsp_mask = OWL_SD_STATE_CLNR;
0213         break;
0214 
0215     default:
0216         dev_warn(owl_host->dev, "Unknown MMC command\n");
0217         cmd->error = -EINVAL;
0218         return;
0219     }
0220 
0221     /* Keep current WDELAY and RDELAY */
0222     mode |= (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
0223 
0224     /* Start to send corresponding command type */
0225     writel(cmd->arg, owl_host->base + OWL_REG_SD_ARG);
0226     writel(cmd->opcode, owl_host->base + OWL_REG_SD_CMD);
0227 
0228     /* Set LBE to send clk at the end of last read block */
0229     if (data) {
0230         mode |= (OWL_SD_CTL_TS | OWL_SD_CTL_LBE | 0x64000000);
0231     } else {
0232         mode &= ~(OWL_SD_CTL_TOUTEN | OWL_SD_CTL_LBE);
0233         mode |= OWL_SD_CTL_TS;
0234     }
0235 
0236     owl_host->cmd = cmd;
0237 
0238     /* Start transfer */
0239     writel(mode, owl_host->base + OWL_REG_SD_CTL);
0240 
0241     if (data)
0242         return;
0243 
0244     timeout = msecs_to_jiffies(cmd->busy_timeout ? cmd->busy_timeout :
0245         OWL_CMD_TIMEOUT_MS);
0246 
0247     if (!wait_for_completion_timeout(&owl_host->sdc_complete, timeout)) {
0248         dev_err(owl_host->dev, "CMD interrupt timeout\n");
0249         cmd->error = -ETIMEDOUT;
0250         return;
0251     }
0252 
0253     state = readl(owl_host->base + OWL_REG_SD_STATE);
0254     if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
0255         if (cmd_rsp_mask & state) {
0256             if (state & OWL_SD_STATE_CLNR) {
0257                 dev_err(owl_host->dev, "Error CMD_NO_RSP\n");
0258                 cmd->error = -EILSEQ;
0259                 return;
0260             }
0261 
0262             if (state & OWL_SD_STATE_CRC7ER) {
0263                 dev_err(owl_host->dev, "Error CMD_RSP_CRC\n");
0264                 cmd->error = -EILSEQ;
0265                 return;
0266             }
0267         }
0268 
0269         if (mmc_resp_type(cmd) & MMC_RSP_136) {
0270             cmd->resp[3] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
0271             cmd->resp[2] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
0272             cmd->resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF2);
0273             cmd->resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF3);
0274         } else {
0275             resp[0] = readl(owl_host->base + OWL_REG_SD_RSPBUF0);
0276             resp[1] = readl(owl_host->base + OWL_REG_SD_RSPBUF1);
0277             cmd->resp[0] = resp[1] << 24 | resp[0] >> 8;
0278             cmd->resp[1] = resp[1] >> 8;
0279         }
0280     }
0281 }
0282 
0283 static void owl_mmc_dma_complete(void *param)
0284 {
0285     struct owl_mmc_host *owl_host = param;
0286     struct mmc_data *data = owl_host->data;
0287 
0288     if (data)
0289         complete(&owl_host->dma_complete);
0290 }
0291 
0292 static int owl_mmc_prepare_data(struct owl_mmc_host *owl_host,
0293                 struct mmc_data *data)
0294 {
0295     u32 total;
0296 
0297     owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN, OWL_SD_EN_BSEL,
0298                true);
0299     writel(data->blocks, owl_host->base + OWL_REG_SD_BLK_NUM);
0300     writel(data->blksz, owl_host->base + OWL_REG_SD_BLK_SIZE);
0301     total = data->blksz * data->blocks;
0302 
0303     if (total < 512)
0304         writel(total, owl_host->base + OWL_REG_SD_BUF_SIZE);
0305     else
0306         writel(512, owl_host->base + OWL_REG_SD_BUF_SIZE);
0307 
0308     if (data->flags & MMC_DATA_WRITE) {
0309         owl_host->dma_dir = DMA_TO_DEVICE;
0310         owl_host->dma_cfg.direction = DMA_MEM_TO_DEV;
0311     } else {
0312         owl_host->dma_dir = DMA_FROM_DEVICE;
0313         owl_host->dma_cfg.direction = DMA_DEV_TO_MEM;
0314     }
0315 
0316     dma_map_sg(owl_host->dma->device->dev, data->sg,
0317            data->sg_len, owl_host->dma_dir);
0318 
0319     dmaengine_slave_config(owl_host->dma, &owl_host->dma_cfg);
0320     owl_host->desc = dmaengine_prep_slave_sg(owl_host->dma, data->sg,
0321                          data->sg_len,
0322                          owl_host->dma_cfg.direction,
0323                          DMA_PREP_INTERRUPT |
0324                          DMA_CTRL_ACK);
0325     if (!owl_host->desc) {
0326         dev_err(owl_host->dev, "Can't prepare slave sg\n");
0327         return -EBUSY;
0328     }
0329 
0330     owl_host->data = data;
0331 
0332     owl_host->desc->callback = owl_mmc_dma_complete;
0333     owl_host->desc->callback_param = (void *)owl_host;
0334     data->error = 0;
0335 
0336     return 0;
0337 }
0338 
0339 static void owl_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
0340 {
0341     struct owl_mmc_host *owl_host = mmc_priv(mmc);
0342     struct mmc_data *data = mrq->data;
0343     int ret;
0344 
0345     owl_host->mrq = mrq;
0346     if (mrq->data) {
0347         ret = owl_mmc_prepare_data(owl_host, data);
0348         if (ret < 0) {
0349             data->error = ret;
0350             goto err_out;
0351         }
0352 
0353         init_completion(&owl_host->dma_complete);
0354         dmaengine_submit(owl_host->desc);
0355         dma_async_issue_pending(owl_host->dma);
0356     }
0357 
0358     owl_mmc_send_cmd(owl_host, mrq->cmd, data);
0359 
0360     if (data) {
0361         if (!wait_for_completion_timeout(&owl_host->sdc_complete,
0362                          10 * HZ)) {
0363             dev_err(owl_host->dev, "CMD interrupt timeout\n");
0364             mrq->cmd->error = -ETIMEDOUT;
0365             dmaengine_terminate_all(owl_host->dma);
0366             goto err_out;
0367         }
0368 
0369         if (!wait_for_completion_timeout(&owl_host->dma_complete,
0370                          5 * HZ)) {
0371             dev_err(owl_host->dev, "DMA interrupt timeout\n");
0372             mrq->cmd->error = -ETIMEDOUT;
0373             dmaengine_terminate_all(owl_host->dma);
0374             goto err_out;
0375         }
0376 
0377         if (data->stop)
0378             owl_mmc_send_cmd(owl_host, data->stop, NULL);
0379 
0380         data->bytes_xfered = data->blocks * data->blksz;
0381     }
0382 
0383 err_out:
0384     owl_mmc_finish_request(owl_host);
0385 }
0386 
0387 static int owl_mmc_set_clk_rate(struct owl_mmc_host *owl_host,
0388                 unsigned int rate)
0389 {
0390     unsigned long clk_rate;
0391     int ret;
0392     u32 reg;
0393 
0394     reg = readl(owl_host->base + OWL_REG_SD_CTL);
0395     reg &= ~OWL_SD_CTL_DELAY_MSK;
0396 
0397     /* Set RDELAY and WDELAY based on the clock */
0398     if (rate <= 1000000) {
0399         writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_LOW_CLK) |
0400                OWL_SD_CTL_WDELAY(OWL_SD_DELAY_LOW_CLK),
0401                owl_host->base + OWL_REG_SD_CTL);
0402     } else if ((rate > 1000000) && (rate <= 26000000)) {
0403         writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_MID_CLK) |
0404                OWL_SD_CTL_WDELAY(OWL_SD_DELAY_MID_CLK),
0405                owl_host->base + OWL_REG_SD_CTL);
0406     } else if ((rate > 26000000) && (rate <= 52000000) && !owl_host->ddr_50) {
0407         writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_DELAY_HIGH_CLK) |
0408                OWL_SD_CTL_WDELAY(OWL_SD_DELAY_HIGH_CLK),
0409                owl_host->base + OWL_REG_SD_CTL);
0410     /* DDR50 mode has special delay chain */
0411     } else if ((rate > 26000000) && (rate <= 52000000) && owl_host->ddr_50) {
0412         writel(reg | OWL_SD_CTL_RDELAY(OWL_SD_RDELAY_DDR50) |
0413                OWL_SD_CTL_WDELAY(OWL_SD_WDELAY_DDR50),
0414                owl_host->base + OWL_REG_SD_CTL);
0415     } else {
0416         dev_err(owl_host->dev, "SD clock rate not supported\n");
0417         return -EINVAL;
0418     }
0419 
0420     clk_rate = clk_round_rate(owl_host->clk, rate << 1);
0421     ret = clk_set_rate(owl_host->clk, clk_rate);
0422 
0423     return ret;
0424 }
0425 
0426 static void owl_mmc_set_clk(struct owl_mmc_host *owl_host, struct mmc_ios *ios)
0427 {
0428     if (!ios->clock)
0429         return;
0430 
0431     owl_host->clock = ios->clock;
0432     owl_mmc_set_clk_rate(owl_host, ios->clock);
0433 }
0434 
0435 static void owl_mmc_set_bus_width(struct owl_mmc_host *owl_host,
0436                   struct mmc_ios *ios)
0437 {
0438     u32 reg;
0439 
0440     reg = readl(owl_host->base + OWL_REG_SD_EN);
0441     reg &= ~0x03;
0442     switch (ios->bus_width) {
0443     case MMC_BUS_WIDTH_1:
0444         break;
0445     case MMC_BUS_WIDTH_4:
0446         reg |= OWL_SD_EN_DATAWID(1);
0447         break;
0448     case MMC_BUS_WIDTH_8:
0449         reg |= OWL_SD_EN_DATAWID(2);
0450         break;
0451     }
0452 
0453     writel(reg, owl_host->base + OWL_REG_SD_EN);
0454 }
0455 
0456 static void owl_mmc_ctr_reset(struct owl_mmc_host *owl_host)
0457 {
0458     reset_control_assert(owl_host->reset);
0459     udelay(20);
0460     reset_control_deassert(owl_host->reset);
0461 }
0462 
0463 static void owl_mmc_power_on(struct owl_mmc_host *owl_host)
0464 {
0465     u32 mode;
0466 
0467     init_completion(&owl_host->sdc_complete);
0468 
0469     /* Enable transfer end IRQ */
0470     owl_mmc_update_reg(owl_host->base + OWL_REG_SD_STATE,
0471                OWL_SD_STATE_TEIE, true);
0472 
0473     /* Send init clk */
0474     mode = (readl(owl_host->base + OWL_REG_SD_CTL) & (0xff << 16));
0475     mode |= OWL_SD_CTL_TS | OWL_SD_CTL_TCN(5) | OWL_SD_CTL_TM(8);
0476     writel(mode, owl_host->base + OWL_REG_SD_CTL);
0477 
0478     if (!wait_for_completion_timeout(&owl_host->sdc_complete, HZ)) {
0479         dev_err(owl_host->dev, "CMD interrupt timeout\n");
0480         return;
0481     }
0482 }
0483 
0484 static void owl_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0485 {
0486     struct owl_mmc_host *owl_host = mmc_priv(mmc);
0487 
0488     switch (ios->power_mode) {
0489     case MMC_POWER_UP:
0490         dev_dbg(owl_host->dev, "Powering card up\n");
0491 
0492         /* Reset the SDC controller to clear all previous states */
0493         owl_mmc_ctr_reset(owl_host);
0494         clk_prepare_enable(owl_host->clk);
0495         writel(OWL_SD_ENABLE | OWL_SD_EN_RESE,
0496                owl_host->base + OWL_REG_SD_EN);
0497 
0498         break;
0499 
0500     case MMC_POWER_ON:
0501         dev_dbg(owl_host->dev, "Powering card on\n");
0502         owl_mmc_power_on(owl_host);
0503 
0504         break;
0505 
0506     case MMC_POWER_OFF:
0507         dev_dbg(owl_host->dev, "Powering card off\n");
0508         clk_disable_unprepare(owl_host->clk);
0509 
0510         return;
0511 
0512     default:
0513         dev_dbg(owl_host->dev, "Ignoring unknown card power state\n");
0514         break;
0515     }
0516 
0517     if (ios->clock != owl_host->clock)
0518         owl_mmc_set_clk(owl_host, ios);
0519 
0520     owl_mmc_set_bus_width(owl_host, ios);
0521 
0522     /* Enable DDR mode if requested */
0523     if (ios->timing == MMC_TIMING_UHS_DDR50) {
0524         owl_host->ddr_50 = true;
0525         owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
0526                    OWL_SD_EN_DDREN, true);
0527     } else {
0528         owl_host->ddr_50 = false;
0529     }
0530 }
0531 
0532 static int owl_mmc_start_signal_voltage_switch(struct mmc_host *mmc,
0533                            struct mmc_ios *ios)
0534 {
0535     struct owl_mmc_host *owl_host = mmc_priv(mmc);
0536 
0537     /* It is enough to change the pad ctrl bit for voltage switch */
0538     switch (ios->signal_voltage) {
0539     case MMC_SIGNAL_VOLTAGE_330:
0540         owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
0541                    OWL_SD_EN_S18EN, false);
0542         break;
0543     case MMC_SIGNAL_VOLTAGE_180:
0544         owl_mmc_update_reg(owl_host->base + OWL_REG_SD_EN,
0545                    OWL_SD_EN_S18EN, true);
0546         break;
0547     default:
0548         return -ENOTSUPP;
0549     }
0550 
0551     return 0;
0552 }
0553 
0554 static const struct mmc_host_ops owl_mmc_ops = {
0555     .request    = owl_mmc_request,
0556     .set_ios    = owl_mmc_set_ios,
0557     .get_ro     = mmc_gpio_get_ro,
0558     .get_cd     = mmc_gpio_get_cd,
0559     .start_signal_voltage_switch = owl_mmc_start_signal_voltage_switch,
0560 };
0561 
0562 static int owl_mmc_probe(struct platform_device *pdev)
0563 {
0564     struct owl_mmc_host *owl_host;
0565     struct mmc_host *mmc;
0566     struct resource *res;
0567     int ret;
0568 
0569     mmc = mmc_alloc_host(sizeof(struct owl_mmc_host), &pdev->dev);
0570     if (!mmc) {
0571         dev_err(&pdev->dev, "mmc alloc host failed\n");
0572         return -ENOMEM;
0573     }
0574     platform_set_drvdata(pdev, mmc);
0575 
0576     owl_host = mmc_priv(mmc);
0577     owl_host->dev = &pdev->dev;
0578     owl_host->mmc = mmc;
0579     spin_lock_init(&owl_host->lock);
0580 
0581     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0582     owl_host->base = devm_ioremap_resource(&pdev->dev, res);
0583     if (IS_ERR(owl_host->base)) {
0584         ret = PTR_ERR(owl_host->base);
0585         goto err_free_host;
0586     }
0587 
0588     owl_host->clk = devm_clk_get(&pdev->dev, NULL);
0589     if (IS_ERR(owl_host->clk)) {
0590         dev_err(&pdev->dev, "No clock defined\n");
0591         ret = PTR_ERR(owl_host->clk);
0592         goto err_free_host;
0593     }
0594 
0595     owl_host->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
0596     if (IS_ERR(owl_host->reset)) {
0597         dev_err(&pdev->dev, "Could not get reset control\n");
0598         ret = PTR_ERR(owl_host->reset);
0599         goto err_free_host;
0600     }
0601 
0602     mmc->ops        = &owl_mmc_ops;
0603     mmc->max_blk_count  = 512;
0604     mmc->max_blk_size   = 512;
0605     mmc->max_segs       = 256;
0606     mmc->max_seg_size   = 262144;
0607     mmc->max_req_size   = 262144;
0608     /* 100kHz ~ 52MHz */
0609     mmc->f_min      = 100000;
0610     mmc->f_max      = 52000000;
0611     mmc->caps          |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
0612                   MMC_CAP_4_BIT_DATA;
0613     mmc->caps2      = (MMC_CAP2_BOOTPART_NOACC | MMC_CAP2_NO_SDIO);
0614     mmc->ocr_avail      = MMC_VDD_32_33 | MMC_VDD_33_34 |
0615                   MMC_VDD_165_195;
0616 
0617     ret = mmc_of_parse(mmc);
0618     if (ret)
0619         goto err_free_host;
0620 
0621     pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
0622     pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
0623     owl_host->dma = dma_request_chan(&pdev->dev, "mmc");
0624     if (IS_ERR(owl_host->dma)) {
0625         dev_err(owl_host->dev, "Failed to get external DMA channel.\n");
0626         ret = PTR_ERR(owl_host->dma);
0627         goto err_free_host;
0628     }
0629 
0630     dev_info(&pdev->dev, "Using %s for DMA transfers\n",
0631          dma_chan_name(owl_host->dma));
0632 
0633     owl_host->dma_cfg.src_addr = res->start + OWL_REG_SD_DAT;
0634     owl_host->dma_cfg.dst_addr = res->start + OWL_REG_SD_DAT;
0635     owl_host->dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0636     owl_host->dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
0637     owl_host->dma_cfg.device_fc = false;
0638 
0639     owl_host->irq = platform_get_irq(pdev, 0);
0640     if (owl_host->irq < 0) {
0641         ret = -EINVAL;
0642         goto err_release_channel;
0643     }
0644 
0645     ret = devm_request_irq(&pdev->dev, owl_host->irq, owl_irq_handler,
0646                    0, dev_name(&pdev->dev), owl_host);
0647     if (ret) {
0648         dev_err(&pdev->dev, "Failed to request irq %d\n",
0649             owl_host->irq);
0650         goto err_release_channel;
0651     }
0652 
0653     ret = mmc_add_host(mmc);
0654     if (ret) {
0655         dev_err(&pdev->dev, "Failed to add host\n");
0656         goto err_release_channel;
0657     }
0658 
0659     dev_dbg(&pdev->dev, "Owl MMC Controller Initialized\n");
0660 
0661     return 0;
0662 
0663 err_release_channel:
0664     dma_release_channel(owl_host->dma);
0665 err_free_host:
0666     mmc_free_host(mmc);
0667 
0668     return ret;
0669 }
0670 
0671 static int owl_mmc_remove(struct platform_device *pdev)
0672 {
0673     struct mmc_host *mmc = platform_get_drvdata(pdev);
0674     struct owl_mmc_host *owl_host = mmc_priv(mmc);
0675 
0676     mmc_remove_host(mmc);
0677     disable_irq(owl_host->irq);
0678     dma_release_channel(owl_host->dma);
0679     mmc_free_host(mmc);
0680 
0681     return 0;
0682 }
0683 
0684 static const struct of_device_id owl_mmc_of_match[] = {
0685     {.compatible = "actions,owl-mmc",},
0686     { /* sentinel */ }
0687 };
0688 MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
0689 
0690 static struct platform_driver owl_mmc_driver = {
0691     .driver = {
0692         .name   = "owl_mmc",
0693         .probe_type = PROBE_PREFER_ASYNCHRONOUS,
0694         .of_match_table = owl_mmc_of_match,
0695     },
0696     .probe      = owl_mmc_probe,
0697     .remove     = owl_mmc_remove,
0698 };
0699 module_platform_driver(owl_mmc_driver);
0700 
0701 MODULE_DESCRIPTION("Actions Semi Owl SoCs SD/MMC Driver");
0702 MODULE_AUTHOR("Actions Semi");
0703 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
0704 MODULE_LICENSE("GPL");