Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2018 Oleksij Rempel <linux@rempel-privat.de>
0004  *
0005  * Driver for Alcor Micro AU6601 and AU6621 controllers
0006  */
0007 
0008 /* Note: this driver was created without any documentation. Based
0009  * on sniffing, testing and in some cases mimic of original driver.
0010  * As soon as some one with documentation or more experience in SD/MMC, or
0011  * reverse engineering then me, please review this driver and question every
0012  * thing what I did. 2018 Oleksij Rempel <linux@rempel-privat.de>
0013  */
0014 
0015 #include <linux/delay.h>
0016 #include <linux/pci.h>
0017 #include <linux/module.h>
0018 #include <linux/io.h>
0019 #include <linux/pm.h>
0020 #include <linux/irq.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/platform_device.h>
0023 
0024 #include <linux/mmc/host.h>
0025 #include <linux/mmc/mmc.h>
0026 
0027 #include <linux/alcor_pci.h>
0028 
0029 enum alcor_cookie {
0030     COOKIE_UNMAPPED,
0031     COOKIE_PRE_MAPPED,
0032     COOKIE_MAPPED,
0033 };
0034 
0035 struct alcor_pll_conf {
0036     unsigned int clk_src_freq;
0037     unsigned int clk_src_reg;
0038     unsigned int min_div;
0039     unsigned int max_div;
0040 };
0041 
0042 struct alcor_sdmmc_host {
0043     struct  device *dev;
0044     struct alcor_pci_priv *alcor_pci;
0045 
0046     struct mmc_request *mrq;
0047     struct mmc_command *cmd;
0048     struct mmc_data *data;
0049     unsigned int dma_on:1;
0050 
0051     struct mutex cmd_mutex;
0052 
0053     struct delayed_work timeout_work;
0054 
0055     struct sg_mapping_iter sg_miter;    /* SG state for PIO */
0056     struct scatterlist *sg;
0057     unsigned int blocks;        /* remaining PIO blocks */
0058     int sg_count;
0059 
0060     u32         irq_status_sd;
0061     unsigned char       cur_power_mode;
0062 };
0063 
0064 static const struct alcor_pll_conf alcor_pll_cfg[] = {
0065     /* MHZ,     CLK src,        max div, min div */
0066     { 31250000, AU6601_CLK_31_25_MHZ,   1,  511},
0067     { 48000000, AU6601_CLK_48_MHZ,  1,  511},
0068     {125000000, AU6601_CLK_125_MHZ, 1,  511},
0069     {384000000, AU6601_CLK_384_MHZ, 1,  511},
0070 };
0071 
0072 static inline void alcor_rmw8(struct alcor_sdmmc_host *host, unsigned int addr,
0073                    u8 clear, u8 set)
0074 {
0075     struct alcor_pci_priv *priv = host->alcor_pci;
0076     u32 var;
0077 
0078     var = alcor_read8(priv, addr);
0079     var &= ~clear;
0080     var |= set;
0081     alcor_write8(priv, var, addr);
0082 }
0083 
0084 /* As soon as irqs are masked, some status updates may be missed.
0085  * Use this with care.
0086  */
0087 static inline void alcor_mask_sd_irqs(struct alcor_sdmmc_host *host)
0088 {
0089     struct alcor_pci_priv *priv = host->alcor_pci;
0090 
0091     alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
0092 }
0093 
0094 static inline void alcor_unmask_sd_irqs(struct alcor_sdmmc_host *host)
0095 {
0096     struct alcor_pci_priv *priv = host->alcor_pci;
0097 
0098     alcor_write32(priv, AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK |
0099           AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE |
0100           AU6601_INT_OVER_CURRENT_ERR,
0101           AU6601_REG_INT_ENABLE);
0102 }
0103 
0104 static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
0105 {
0106     struct alcor_pci_priv *priv = host->alcor_pci;
0107     int i;
0108 
0109     alcor_write8(priv, val | AU6601_BUF_CTRL_RESET,
0110               AU6601_REG_SW_RESET);
0111     for (i = 0; i < 100; i++) {
0112         if (!(alcor_read8(priv, AU6601_REG_SW_RESET) & val))
0113             return;
0114         udelay(50);
0115     }
0116     dev_err(host->dev, "%s: timeout\n", __func__);
0117 }
0118 
0119 /*
0120  * Perform DMA I/O of a single page.
0121  */
0122 static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
0123 {
0124     struct alcor_pci_priv *priv = host->alcor_pci;
0125     u32 addr;
0126 
0127     if (!host->sg_count)
0128         return;
0129 
0130     if (!host->sg) {
0131         dev_err(host->dev, "have blocks, but no SG\n");
0132         return;
0133     }
0134 
0135     if (!sg_dma_len(host->sg)) {
0136         dev_err(host->dev, "DMA SG len == 0\n");
0137         return;
0138     }
0139 
0140 
0141     addr = (u32)sg_dma_address(host->sg);
0142 
0143     alcor_write32(priv, addr, AU6601_REG_SDMA_ADDR);
0144     host->sg = sg_next(host->sg);
0145     host->sg_count--;
0146 }
0147 
0148 static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
0149 {
0150     struct alcor_pci_priv *priv = host->alcor_pci;
0151     struct mmc_data *data = host->data;
0152     u8 ctrl = 0;
0153 
0154     if (data->flags & MMC_DATA_WRITE)
0155         ctrl |= AU6601_DATA_WRITE;
0156 
0157     if (data->host_cookie == COOKIE_MAPPED) {
0158         /*
0159          * For DMA transfers, this function is called just once,
0160          * at the start of the operation. The hardware can only
0161          * perform DMA I/O on a single page at a time, so here
0162          * we kick off the transfer with the first page, and expect
0163          * subsequent pages to be transferred upon IRQ events
0164          * indicating that the single-page DMA was completed.
0165          */
0166         alcor_data_set_dma(host);
0167         ctrl |= AU6601_DATA_DMA_MODE;
0168         host->dma_on = 1;
0169         alcor_write32(priv, data->sg_count * 0x1000,
0170                    AU6601_REG_BLOCK_SIZE);
0171     } else {
0172         /*
0173          * For PIO transfers, we break down each operation
0174          * into several sector-sized transfers. When one sector has
0175          * complete, the IRQ handler will call this function again
0176          * to kick off the transfer of the next sector.
0177          */
0178         alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
0179     }
0180 
0181     alcor_write8(priv, ctrl | AU6601_DATA_START_XFER,
0182               AU6601_DATA_XFER_CTRL);
0183 }
0184 
0185 static void alcor_trf_block_pio(struct alcor_sdmmc_host *host, bool read)
0186 {
0187     struct alcor_pci_priv *priv = host->alcor_pci;
0188     size_t blksize, len;
0189     u8 *buf;
0190 
0191     if (!host->blocks)
0192         return;
0193 
0194     if (host->dma_on) {
0195         dev_err(host->dev, "configured DMA but got PIO request.\n");
0196         return;
0197     }
0198 
0199     if (!!(host->data->flags & MMC_DATA_READ) != read) {
0200         dev_err(host->dev, "got unexpected direction %i != %i\n",
0201             !!(host->data->flags & MMC_DATA_READ), read);
0202     }
0203 
0204     if (!sg_miter_next(&host->sg_miter))
0205         return;
0206 
0207     blksize = host->data->blksz;
0208     len = min(host->sg_miter.length, blksize);
0209 
0210     dev_dbg(host->dev, "PIO, %s block size: 0x%zx\n",
0211         read ? "read" : "write", blksize);
0212 
0213     host->sg_miter.consumed = len;
0214     host->blocks--;
0215 
0216     buf = host->sg_miter.addr;
0217 
0218     if (read)
0219         ioread32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
0220     else
0221         iowrite32_rep(priv->iobase + AU6601_REG_BUFFER, buf, len >> 2);
0222 
0223     sg_miter_stop(&host->sg_miter);
0224 }
0225 
0226 static void alcor_prepare_sg_miter(struct alcor_sdmmc_host *host)
0227 {
0228     unsigned int flags = SG_MITER_ATOMIC;
0229     struct mmc_data *data = host->data;
0230 
0231     if (data->flags & MMC_DATA_READ)
0232         flags |= SG_MITER_TO_SG;
0233     else
0234         flags |= SG_MITER_FROM_SG;
0235     sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
0236 }
0237 
0238 static void alcor_prepare_data(struct alcor_sdmmc_host *host,
0239                    struct mmc_command *cmd)
0240 {
0241     struct alcor_pci_priv *priv = host->alcor_pci;
0242     struct mmc_data *data = cmd->data;
0243 
0244     if (!data)
0245         return;
0246 
0247 
0248     host->data = data;
0249     host->data->bytes_xfered = 0;
0250     host->blocks = data->blocks;
0251     host->sg = data->sg;
0252     host->sg_count = data->sg_count;
0253     dev_dbg(host->dev, "prepare DATA: sg %i, blocks: %i\n",
0254             host->sg_count, host->blocks);
0255 
0256     if (data->host_cookie != COOKIE_MAPPED)
0257         alcor_prepare_sg_miter(host);
0258 
0259     alcor_write8(priv, 0, AU6601_DATA_XFER_CTRL);
0260 }
0261 
0262 static void alcor_send_cmd(struct alcor_sdmmc_host *host,
0263                struct mmc_command *cmd, bool set_timeout)
0264 {
0265     struct alcor_pci_priv *priv = host->alcor_pci;
0266     unsigned long timeout = 0;
0267     u8 ctrl = 0;
0268 
0269     host->cmd = cmd;
0270     alcor_prepare_data(host, cmd);
0271 
0272     dev_dbg(host->dev, "send CMD. opcode: 0x%02x, arg; 0x%08x\n",
0273         cmd->opcode, cmd->arg);
0274     alcor_write8(priv, cmd->opcode | 0x40, AU6601_REG_CMD_OPCODE);
0275     alcor_write32be(priv, cmd->arg, AU6601_REG_CMD_ARG);
0276 
0277     switch (mmc_resp_type(cmd)) {
0278     case MMC_RSP_NONE:
0279         ctrl = AU6601_CMD_NO_RESP;
0280         break;
0281     case MMC_RSP_R1:
0282         ctrl = AU6601_CMD_6_BYTE_CRC;
0283         break;
0284     case MMC_RSP_R1B:
0285         ctrl = AU6601_CMD_6_BYTE_CRC | AU6601_CMD_STOP_WAIT_RDY;
0286         break;
0287     case MMC_RSP_R2:
0288         ctrl = AU6601_CMD_17_BYTE_CRC;
0289         break;
0290     case MMC_RSP_R3:
0291         ctrl = AU6601_CMD_6_BYTE_WO_CRC;
0292         break;
0293     default:
0294         dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
0295             mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
0296         break;
0297     }
0298 
0299     if (set_timeout) {
0300         if (!cmd->data && cmd->busy_timeout)
0301             timeout = cmd->busy_timeout;
0302         else
0303             timeout = 10000;
0304 
0305         schedule_delayed_work(&host->timeout_work,
0306                       msecs_to_jiffies(timeout));
0307     }
0308 
0309     dev_dbg(host->dev, "xfer ctrl: 0x%02x; timeout: %lu\n", ctrl, timeout);
0310     alcor_write8(priv, ctrl | AU6601_CMD_START_XFER,
0311                  AU6601_CMD_XFER_CTRL);
0312 }
0313 
0314 static void alcor_request_complete(struct alcor_sdmmc_host *host,
0315                    bool cancel_timeout)
0316 {
0317     struct mmc_request *mrq;
0318 
0319     /*
0320      * If this work gets rescheduled while running, it will
0321      * be run again afterwards but without any active request.
0322      */
0323     if (!host->mrq)
0324         return;
0325 
0326     if (cancel_timeout)
0327         cancel_delayed_work(&host->timeout_work);
0328 
0329     mrq = host->mrq;
0330 
0331     host->mrq = NULL;
0332     host->cmd = NULL;
0333     host->data = NULL;
0334     host->dma_on = 0;
0335 
0336     mmc_request_done(mmc_from_priv(host), mrq);
0337 }
0338 
0339 static void alcor_finish_data(struct alcor_sdmmc_host *host)
0340 {
0341     struct mmc_data *data;
0342 
0343     data = host->data;
0344     host->data = NULL;
0345     host->dma_on = 0;
0346 
0347     /*
0348      * The specification states that the block count register must
0349      * be updated, but it does not specify at what point in the
0350      * data flow. That makes the register entirely useless to read
0351      * back so we have to assume that nothing made it to the card
0352      * in the event of an error.
0353      */
0354     if (data->error)
0355         data->bytes_xfered = 0;
0356     else
0357         data->bytes_xfered = data->blksz * data->blocks;
0358 
0359     /*
0360      * Need to send CMD12 if -
0361      * a) open-ended multiblock transfer (no CMD23)
0362      * b) error in multiblock transfer
0363      */
0364     if (data->stop &&
0365         (data->error ||
0366          !host->mrq->sbc)) {
0367 
0368         /*
0369          * The controller needs a reset of internal state machines
0370          * upon error conditions.
0371          */
0372         if (data->error)
0373             alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
0374 
0375         alcor_unmask_sd_irqs(host);
0376         alcor_send_cmd(host, data->stop, false);
0377         return;
0378     }
0379 
0380     alcor_request_complete(host, 1);
0381 }
0382 
0383 static void alcor_err_irq(struct alcor_sdmmc_host *host, u32 intmask)
0384 {
0385     dev_dbg(host->dev, "ERR IRQ %x\n", intmask);
0386 
0387     if (host->cmd) {
0388         if (intmask & AU6601_INT_CMD_TIMEOUT_ERR)
0389             host->cmd->error = -ETIMEDOUT;
0390         else
0391             host->cmd->error = -EILSEQ;
0392     }
0393 
0394     if (host->data) {
0395         if (intmask & AU6601_INT_DATA_TIMEOUT_ERR)
0396             host->data->error = -ETIMEDOUT;
0397         else
0398             host->data->error = -EILSEQ;
0399 
0400         host->data->bytes_xfered = 0;
0401     }
0402 
0403     alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
0404     alcor_request_complete(host, 1);
0405 }
0406 
0407 static int alcor_cmd_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
0408 {
0409     struct alcor_pci_priv *priv = host->alcor_pci;
0410 
0411     intmask &= AU6601_INT_CMD_END;
0412 
0413     if (!intmask)
0414         return true;
0415 
0416     /* got CMD_END but no CMD is in progress, wake thread an process the
0417      * error
0418      */
0419     if (!host->cmd)
0420         return false;
0421 
0422     if (host->cmd->flags & MMC_RSP_PRESENT) {
0423         struct mmc_command *cmd = host->cmd;
0424 
0425         cmd->resp[0] = alcor_read32be(priv, AU6601_REG_CMD_RSP0);
0426         dev_dbg(host->dev, "RSP0: 0x%04x\n", cmd->resp[0]);
0427         if (host->cmd->flags & MMC_RSP_136) {
0428             cmd->resp[1] =
0429                 alcor_read32be(priv, AU6601_REG_CMD_RSP1);
0430             cmd->resp[2] =
0431                 alcor_read32be(priv, AU6601_REG_CMD_RSP2);
0432             cmd->resp[3] =
0433                 alcor_read32be(priv, AU6601_REG_CMD_RSP3);
0434             dev_dbg(host->dev, "RSP1,2,3: 0x%04x 0x%04x 0x%04x\n",
0435                 cmd->resp[1], cmd->resp[2], cmd->resp[3]);
0436         }
0437 
0438     }
0439 
0440     host->cmd->error = 0;
0441 
0442     /* Processed actual command. */
0443     if (!host->data)
0444         return false;
0445 
0446     alcor_trigger_data_transfer(host);
0447     host->cmd = NULL;
0448     return true;
0449 }
0450 
0451 static void alcor_cmd_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
0452 {
0453     intmask &= AU6601_INT_CMD_END;
0454 
0455     if (!intmask)
0456         return;
0457 
0458     if (!host->cmd && intmask & AU6601_INT_CMD_END) {
0459         dev_dbg(host->dev, "Got command interrupt 0x%08x even though no command operation was in progress.\n",
0460             intmask);
0461     }
0462 
0463     /* Processed actual command. */
0464     if (!host->data)
0465         alcor_request_complete(host, 1);
0466     else
0467         alcor_trigger_data_transfer(host);
0468     host->cmd = NULL;
0469 }
0470 
0471 static int alcor_data_irq_done(struct alcor_sdmmc_host *host, u32 intmask)
0472 {
0473     u32 tmp;
0474 
0475     intmask &= AU6601_INT_DATA_MASK;
0476 
0477     /* nothing here to do */
0478     if (!intmask)
0479         return 1;
0480 
0481     /* we was too fast and got DATA_END after it was processed?
0482      * lets ignore it for now.
0483      */
0484     if (!host->data && intmask == AU6601_INT_DATA_END)
0485         return 1;
0486 
0487     /* looks like an error, so lets handle it. */
0488     if (!host->data)
0489         return 0;
0490 
0491     tmp = intmask & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
0492              | AU6601_INT_DMA_END);
0493     switch (tmp) {
0494     case 0:
0495         break;
0496     case AU6601_INT_READ_BUF_RDY:
0497         alcor_trf_block_pio(host, true);
0498         return 1;
0499     case AU6601_INT_WRITE_BUF_RDY:
0500         alcor_trf_block_pio(host, false);
0501         return 1;
0502     case AU6601_INT_DMA_END:
0503         if (!host->sg_count)
0504             break;
0505 
0506         alcor_data_set_dma(host);
0507         break;
0508     default:
0509         dev_err(host->dev, "Got READ_BUF_RDY and WRITE_BUF_RDY at same time\n");
0510         break;
0511     }
0512 
0513     if (intmask & AU6601_INT_DATA_END) {
0514         if (!host->dma_on && host->blocks) {
0515             alcor_trigger_data_transfer(host);
0516             return 1;
0517         } else {
0518             return 0;
0519         }
0520     }
0521 
0522     return 1;
0523 }
0524 
0525 static void alcor_data_irq_thread(struct alcor_sdmmc_host *host, u32 intmask)
0526 {
0527     intmask &= AU6601_INT_DATA_MASK;
0528 
0529     if (!intmask)
0530         return;
0531 
0532     if (!host->data) {
0533         dev_dbg(host->dev, "Got data interrupt 0x%08x even though no data operation was in progress.\n",
0534             intmask);
0535         alcor_reset(host, AU6601_RESET_DATA);
0536         return;
0537     }
0538 
0539     if (alcor_data_irq_done(host, intmask))
0540         return;
0541 
0542     if ((intmask & AU6601_INT_DATA_END) || !host->blocks ||
0543         (host->dma_on && !host->sg_count))
0544         alcor_finish_data(host);
0545 }
0546 
0547 static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
0548 {
0549     dev_dbg(host->dev, "card %s\n",
0550         intmask & AU6601_INT_CARD_REMOVE ? "removed" : "inserted");
0551 
0552     if (host->mrq) {
0553         dev_dbg(host->dev, "cancel all pending tasks.\n");
0554 
0555         if (host->data)
0556             host->data->error = -ENOMEDIUM;
0557 
0558         if (host->cmd)
0559             host->cmd->error = -ENOMEDIUM;
0560         else
0561             host->mrq->cmd->error = -ENOMEDIUM;
0562 
0563         alcor_request_complete(host, 1);
0564     }
0565 
0566     mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
0567 }
0568 
0569 static irqreturn_t alcor_irq_thread(int irq, void *d)
0570 {
0571     struct alcor_sdmmc_host *host = d;
0572     irqreturn_t ret = IRQ_HANDLED;
0573     u32 intmask, tmp;
0574 
0575     mutex_lock(&host->cmd_mutex);
0576 
0577     intmask = host->irq_status_sd;
0578 
0579     /* some thing bad */
0580     if (unlikely(!intmask || AU6601_INT_ALL_MASK == intmask)) {
0581         dev_dbg(host->dev, "unexpected IRQ: 0x%04x\n", intmask);
0582         ret = IRQ_NONE;
0583         goto exit;
0584     }
0585 
0586     tmp = intmask & (AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
0587     if (tmp) {
0588         if (tmp & AU6601_INT_ERROR_MASK)
0589             alcor_err_irq(host, tmp);
0590         else {
0591             alcor_cmd_irq_thread(host, tmp);
0592             alcor_data_irq_thread(host, tmp);
0593         }
0594         intmask &= ~(AU6601_INT_CMD_MASK | AU6601_INT_DATA_MASK);
0595     }
0596 
0597     if (intmask & (AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE)) {
0598         alcor_cd_irq(host, intmask);
0599         intmask &= ~(AU6601_INT_CARD_INSERT | AU6601_INT_CARD_REMOVE);
0600     }
0601 
0602     if (intmask & AU6601_INT_OVER_CURRENT_ERR) {
0603         dev_warn(host->dev,
0604              "warning: over current detected!\n");
0605         intmask &= ~AU6601_INT_OVER_CURRENT_ERR;
0606     }
0607 
0608     if (intmask)
0609         dev_dbg(host->dev, "got not handled IRQ: 0x%04x\n", intmask);
0610 
0611 exit:
0612     mutex_unlock(&host->cmd_mutex);
0613     alcor_unmask_sd_irqs(host);
0614     return ret;
0615 }
0616 
0617 
0618 static irqreturn_t alcor_irq(int irq, void *d)
0619 {
0620     struct alcor_sdmmc_host *host = d;
0621     struct alcor_pci_priv *priv = host->alcor_pci;
0622     u32 status, tmp;
0623     irqreturn_t ret;
0624     int cmd_done, data_done;
0625 
0626     status = alcor_read32(priv, AU6601_REG_INT_STATUS);
0627     if (!status)
0628         return IRQ_NONE;
0629 
0630     alcor_write32(priv, status, AU6601_REG_INT_STATUS);
0631 
0632     tmp = status & (AU6601_INT_READ_BUF_RDY | AU6601_INT_WRITE_BUF_RDY
0633             | AU6601_INT_DATA_END | AU6601_INT_DMA_END
0634             | AU6601_INT_CMD_END);
0635     if (tmp == status) {
0636         cmd_done = alcor_cmd_irq_done(host, tmp);
0637         data_done = alcor_data_irq_done(host, tmp);
0638         /* use fast path for simple tasks */
0639         if (cmd_done && data_done) {
0640             ret = IRQ_HANDLED;
0641             goto alcor_irq_done;
0642         }
0643     }
0644 
0645     host->irq_status_sd = status;
0646     ret = IRQ_WAKE_THREAD;
0647     alcor_mask_sd_irqs(host);
0648 alcor_irq_done:
0649     return ret;
0650 }
0651 
0652 static void alcor_set_clock(struct alcor_sdmmc_host *host, unsigned int clock)
0653 {
0654     struct alcor_pci_priv *priv = host->alcor_pci;
0655     int i, diff = 0x7fffffff, tmp_clock = 0;
0656     u16 clk_src = 0;
0657     u8 clk_div = 0;
0658 
0659     if (clock == 0) {
0660         alcor_write16(priv, 0, AU6601_CLK_SELECT);
0661         return;
0662     }
0663 
0664     for (i = 0; i < ARRAY_SIZE(alcor_pll_cfg); i++) {
0665         unsigned int tmp_div, tmp_diff;
0666         const struct alcor_pll_conf *cfg = &alcor_pll_cfg[i];
0667 
0668         tmp_div = DIV_ROUND_UP(cfg->clk_src_freq, clock);
0669         if (cfg->min_div > tmp_div || tmp_div > cfg->max_div)
0670             continue;
0671 
0672         tmp_clock = DIV_ROUND_UP(cfg->clk_src_freq, tmp_div);
0673         tmp_diff = abs(clock - tmp_clock);
0674 
0675         if (tmp_diff < diff) {
0676             diff = tmp_diff;
0677             clk_src = cfg->clk_src_reg;
0678             clk_div = tmp_div;
0679         }
0680     }
0681 
0682     clk_src |= ((clk_div - 1) << 8);
0683     clk_src |= AU6601_CLK_ENABLE;
0684 
0685     dev_dbg(host->dev, "set freq %d cal freq %d, use div %d, mod %x\n",
0686             clock, tmp_clock, clk_div, clk_src);
0687 
0688     alcor_write16(priv, clk_src, AU6601_CLK_SELECT);
0689 
0690 }
0691 
0692 static void alcor_set_timing(struct mmc_host *mmc, struct mmc_ios *ios)
0693 {
0694     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0695 
0696     if (ios->timing == MMC_TIMING_LEGACY) {
0697         alcor_rmw8(host, AU6601_CLK_DELAY,
0698                 AU6601_CLK_POSITIVE_EDGE_ALL, 0);
0699     } else {
0700         alcor_rmw8(host, AU6601_CLK_DELAY,
0701                 0, AU6601_CLK_POSITIVE_EDGE_ALL);
0702     }
0703 }
0704 
0705 static void alcor_set_bus_width(struct mmc_host *mmc, struct mmc_ios *ios)
0706 {
0707     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0708     struct alcor_pci_priv *priv = host->alcor_pci;
0709 
0710     if (ios->bus_width == MMC_BUS_WIDTH_1) {
0711         alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
0712     } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
0713         alcor_write8(priv, AU6601_BUS_WIDTH_4BIT,
0714                   AU6601_REG_BUS_CTRL);
0715     } else
0716         dev_err(host->dev, "Unknown BUS mode\n");
0717 
0718 }
0719 
0720 static int alcor_card_busy(struct mmc_host *mmc)
0721 {
0722     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0723     struct alcor_pci_priv *priv = host->alcor_pci;
0724     u8 status;
0725 
0726     /* Check whether dat[0:3] low */
0727     status = alcor_read8(priv, AU6601_DATA_PIN_STATE);
0728 
0729     return !(status & AU6601_BUS_STAT_DAT_MASK);
0730 }
0731 
0732 static int alcor_get_cd(struct mmc_host *mmc)
0733 {
0734     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0735     struct alcor_pci_priv *priv = host->alcor_pci;
0736     u8 detect;
0737 
0738     detect = alcor_read8(priv, AU6601_DETECT_STATUS)
0739         & AU6601_DETECT_STATUS_M;
0740     /* check if card is present then send command and data */
0741     return (detect == AU6601_SD_DETECTED);
0742 }
0743 
0744 static int alcor_get_ro(struct mmc_host *mmc)
0745 {
0746     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0747     struct alcor_pci_priv *priv = host->alcor_pci;
0748     u8 status;
0749 
0750     /* get write protect pin status */
0751     status = alcor_read8(priv, AU6601_INTERFACE_MODE_CTRL);
0752 
0753     return !!(status & AU6601_SD_CARD_WP);
0754 }
0755 
0756 static void alcor_request(struct mmc_host *mmc, struct mmc_request *mrq)
0757 {
0758     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0759 
0760     mutex_lock(&host->cmd_mutex);
0761 
0762     host->mrq = mrq;
0763 
0764     /* check if card is present then send command and data */
0765     if (alcor_get_cd(mmc))
0766         alcor_send_cmd(host, mrq->cmd, true);
0767     else {
0768         mrq->cmd->error = -ENOMEDIUM;
0769         alcor_request_complete(host, 1);
0770     }
0771 
0772     mutex_unlock(&host->cmd_mutex);
0773 }
0774 
0775 static void alcor_pre_req(struct mmc_host *mmc,
0776                struct mmc_request *mrq)
0777 {
0778     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0779     struct mmc_data *data = mrq->data;
0780     struct mmc_command *cmd = mrq->cmd;
0781     struct scatterlist *sg;
0782     unsigned int i, sg_len;
0783 
0784     if (!data || !cmd)
0785         return;
0786 
0787     data->host_cookie = COOKIE_UNMAPPED;
0788 
0789     /* FIXME: looks like the DMA engine works only with CMD18 */
0790     if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
0791             && cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
0792         return;
0793     /*
0794      * We don't do DMA on "complex" transfers, i.e. with
0795      * non-word-aligned buffers or lengths. A future improvement
0796      * could be made to use temporary DMA bounce-buffers when these
0797      * requirements are not met.
0798      *
0799      * Also, we don't bother with all the DMA setup overhead for
0800      * short transfers.
0801      */
0802     if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
0803         return;
0804 
0805     if (data->blksz & 3)
0806         return;
0807 
0808     for_each_sg(data->sg, sg, data->sg_len, i) {
0809         if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
0810             return;
0811         if (sg->offset != 0)
0812             return;
0813     }
0814 
0815     /* This data might be unmapped at this time */
0816 
0817     sg_len = dma_map_sg(host->dev, data->sg, data->sg_len,
0818                 mmc_get_dma_dir(data));
0819     if (sg_len)
0820         data->host_cookie = COOKIE_MAPPED;
0821 
0822     data->sg_count = sg_len;
0823 }
0824 
0825 static void alcor_post_req(struct mmc_host *mmc,
0826                 struct mmc_request *mrq,
0827                 int err)
0828 {
0829     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0830     struct mmc_data *data = mrq->data;
0831 
0832     if (!data)
0833         return;
0834 
0835     if (data->host_cookie == COOKIE_MAPPED) {
0836         dma_unmap_sg(host->dev,
0837                  data->sg,
0838                  data->sg_len,
0839                  mmc_get_dma_dir(data));
0840     }
0841 
0842     data->host_cookie = COOKIE_UNMAPPED;
0843 }
0844 
0845 static void alcor_set_power_mode(struct mmc_host *mmc, struct mmc_ios *ios)
0846 {
0847     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0848     struct alcor_pci_priv *priv = host->alcor_pci;
0849 
0850     switch (ios->power_mode) {
0851     case MMC_POWER_OFF:
0852         alcor_set_clock(host, ios->clock);
0853         /* set all pins to input */
0854         alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
0855         /* turn of VDD */
0856         alcor_write8(priv, 0, AU6601_POWER_CONTROL);
0857         break;
0858     case MMC_POWER_UP:
0859         break;
0860     case MMC_POWER_ON:
0861         /* This is most trickiest part. The order and timings of
0862          * instructions seems to play important role. Any changes may
0863          * confuse internal state engine if this HW.
0864          * FIXME: If we will ever get access to documentation, then this
0865          * part should be reviewed again.
0866          */
0867 
0868         /* enable SD card mode */
0869         alcor_write8(priv, AU6601_SD_CARD,
0870                   AU6601_ACTIVE_CTRL);
0871         /* set signal voltage to 3.3V */
0872         alcor_write8(priv, 0, AU6601_OPT);
0873         /* no documentation about clk delay, for now just try to mimic
0874          * original driver.
0875          */
0876         alcor_write8(priv, 0x20, AU6601_CLK_DELAY);
0877         /* set BUS width to 1 bit */
0878         alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
0879         /* set CLK first time */
0880         alcor_set_clock(host, ios->clock);
0881         /* power on VDD */
0882         alcor_write8(priv, AU6601_SD_CARD,
0883                   AU6601_POWER_CONTROL);
0884         /* wait until the CLK will get stable */
0885         mdelay(20);
0886         /* set CLK again, mimic original driver. */
0887         alcor_set_clock(host, ios->clock);
0888 
0889         /* enable output */
0890         alcor_write8(priv, AU6601_SD_CARD,
0891                   AU6601_OUTPUT_ENABLE);
0892         /* The clk will not work on au6621. We need to trigger data
0893          * transfer.
0894          */
0895         alcor_write8(priv, AU6601_DATA_WRITE,
0896                   AU6601_DATA_XFER_CTRL);
0897         /* configure timeout. Not clear what exactly it means. */
0898         alcor_write8(priv, 0x7d, AU6601_TIME_OUT_CTRL);
0899         mdelay(100);
0900         break;
0901     default:
0902         dev_err(host->dev, "Unknown power parameter\n");
0903     }
0904 }
0905 
0906 static void alcor_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0907 {
0908     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0909 
0910     mutex_lock(&host->cmd_mutex);
0911 
0912     dev_dbg(host->dev, "set ios. bus width: %x, power mode: %x\n",
0913         ios->bus_width, ios->power_mode);
0914 
0915     if (ios->power_mode != host->cur_power_mode) {
0916         alcor_set_power_mode(mmc, ios);
0917         host->cur_power_mode = ios->power_mode;
0918     } else {
0919         alcor_set_timing(mmc, ios);
0920         alcor_set_bus_width(mmc, ios);
0921         alcor_set_clock(host, ios->clock);
0922     }
0923 
0924     mutex_unlock(&host->cmd_mutex);
0925 }
0926 
0927 static int alcor_signal_voltage_switch(struct mmc_host *mmc,
0928                        struct mmc_ios *ios)
0929 {
0930     struct alcor_sdmmc_host *host = mmc_priv(mmc);
0931 
0932     mutex_lock(&host->cmd_mutex);
0933 
0934     switch (ios->signal_voltage) {
0935     case MMC_SIGNAL_VOLTAGE_330:
0936         alcor_rmw8(host, AU6601_OPT, AU6601_OPT_SD_18V, 0);
0937         break;
0938     case MMC_SIGNAL_VOLTAGE_180:
0939         alcor_rmw8(host, AU6601_OPT, 0, AU6601_OPT_SD_18V);
0940         break;
0941     default:
0942         /* No signal voltage switch required */
0943         break;
0944     }
0945 
0946     mutex_unlock(&host->cmd_mutex);
0947     return 0;
0948 }
0949 
0950 static const struct mmc_host_ops alcor_sdc_ops = {
0951     .card_busy  = alcor_card_busy,
0952     .get_cd     = alcor_get_cd,
0953     .get_ro     = alcor_get_ro,
0954     .post_req   = alcor_post_req,
0955     .pre_req    = alcor_pre_req,
0956     .request    = alcor_request,
0957     .set_ios    = alcor_set_ios,
0958     .start_signal_voltage_switch = alcor_signal_voltage_switch,
0959 };
0960 
0961 static void alcor_timeout_timer(struct work_struct *work)
0962 {
0963     struct delayed_work *d = to_delayed_work(work);
0964     struct alcor_sdmmc_host *host = container_of(d, struct alcor_sdmmc_host,
0965                         timeout_work);
0966     mutex_lock(&host->cmd_mutex);
0967 
0968     dev_dbg(host->dev, "triggered timeout\n");
0969     if (host->mrq) {
0970         dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
0971 
0972         if (host->data) {
0973             host->data->error = -ETIMEDOUT;
0974         } else {
0975             if (host->cmd)
0976                 host->cmd->error = -ETIMEDOUT;
0977             else
0978                 host->mrq->cmd->error = -ETIMEDOUT;
0979         }
0980 
0981         alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
0982         alcor_request_complete(host, 0);
0983     }
0984 
0985     mutex_unlock(&host->cmd_mutex);
0986 }
0987 
0988 static void alcor_hw_init(struct alcor_sdmmc_host *host)
0989 {
0990     struct alcor_pci_priv *priv = host->alcor_pci;
0991     struct alcor_dev_cfg *cfg = priv->cfg;
0992 
0993     /* FIXME: This part is a mimics HW init of original driver.
0994      * If we will ever get access to documentation, then this part
0995      * should be reviewed again.
0996      */
0997 
0998     /* reset command state engine */
0999     alcor_reset(host, AU6601_RESET_CMD);
1000 
1001     alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1002     /* enable sd card mode */
1003     alcor_write8(priv, AU6601_SD_CARD, AU6601_ACTIVE_CTRL);
1004 
1005     /* set BUS width to 1 bit */
1006     alcor_write8(priv, 0, AU6601_REG_BUS_CTRL);
1007 
1008     /* reset data state engine */
1009     alcor_reset(host, AU6601_RESET_DATA);
1010     /* Not sure if a voodoo with AU6601_DMA_BOUNDARY is really needed */
1011     alcor_write8(priv, 0, AU6601_DMA_BOUNDARY);
1012 
1013     alcor_write8(priv, 0, AU6601_INTERFACE_MODE_CTRL);
1014     /* not clear what we are doing here. */
1015     alcor_write8(priv, 0x44, AU6601_PAD_DRIVE0);
1016     alcor_write8(priv, 0x44, AU6601_PAD_DRIVE1);
1017     alcor_write8(priv, 0x00, AU6601_PAD_DRIVE2);
1018 
1019     /* for 6601 - dma_boundary; for 6621 - dma_page_cnt
1020      * exact meaning of this register is not clear.
1021      */
1022     alcor_write8(priv, cfg->dma, AU6601_DMA_BOUNDARY);
1023 
1024     /* make sure all pins are set to input and VDD is off */
1025     alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1026     alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1027 
1028     alcor_write8(priv, AU6601_DETECT_EN, AU6601_DETECT_STATUS);
1029     /* now we should be safe to enable IRQs */
1030     alcor_unmask_sd_irqs(host);
1031 }
1032 
1033 static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
1034 {
1035     struct alcor_pci_priv *priv = host->alcor_pci;
1036 
1037     alcor_mask_sd_irqs(host);
1038     alcor_reset(host, AU6601_RESET_CMD | AU6601_RESET_DATA);
1039 
1040     alcor_write8(priv, 0, AU6601_DETECT_STATUS);
1041 
1042     alcor_write8(priv, 0, AU6601_OUTPUT_ENABLE);
1043     alcor_write8(priv, 0, AU6601_POWER_CONTROL);
1044 
1045     alcor_write8(priv, 0, AU6601_OPT);
1046 }
1047 
1048 static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1049 {
1050     struct mmc_host *mmc = mmc_from_priv(host);
1051 
1052     mmc->f_min = AU6601_MIN_CLOCK;
1053     mmc->f_max = AU6601_MAX_CLOCK;
1054     mmc->ocr_avail = MMC_VDD_33_34;
1055     mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED
1056         | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
1057         | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50;
1058     mmc->caps2 = MMC_CAP2_NO_SDIO;
1059     mmc->ops = &alcor_sdc_ops;
1060 
1061     /* The hardware does DMA data transfer of 4096 bytes to/from a single
1062      * buffer address. Scatterlists are not supported at the hardware
1063      * level, however we can work with them at the driver level,
1064      * provided that each segment is exactly 4096 bytes in size.
1065      * Upon DMA completion of a single segment (signalled via IRQ), we
1066      * immediately proceed to transfer the next segment from the
1067      * scatterlist.
1068      *
1069      * The overall request is limited to 240 sectors, matching the
1070      * original vendor driver.
1071      */
1072     mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1073     mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1074     mmc->max_blk_count = 240;
1075     mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
1076     dma_set_max_seg_size(host->dev, mmc->max_seg_size);
1077 }
1078 
1079 static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
1080 {
1081     struct alcor_pci_priv *priv = pdev->dev.platform_data;
1082     struct mmc_host *mmc;
1083     struct alcor_sdmmc_host *host;
1084     int ret;
1085 
1086     mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
1087     if (!mmc) {
1088         dev_err(&pdev->dev, "Can't allocate MMC\n");
1089         return -ENOMEM;
1090     }
1091 
1092     host = mmc_priv(mmc);
1093     host->dev = &pdev->dev;
1094     host->cur_power_mode = MMC_POWER_UNDEFINED;
1095     host->alcor_pci = priv;
1096 
1097     /* make sure irqs are disabled */
1098     alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
1099     alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
1100 
1101     ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
1102             alcor_irq, alcor_irq_thread, IRQF_SHARED,
1103             DRV_NAME_ALCOR_PCI_SDMMC, host);
1104 
1105     if (ret) {
1106         dev_err(&pdev->dev, "Failed to get irq for data line\n");
1107         goto free_host;
1108     }
1109 
1110     mutex_init(&host->cmd_mutex);
1111     INIT_DELAYED_WORK(&host->timeout_work, alcor_timeout_timer);
1112 
1113     alcor_init_mmc(host);
1114     alcor_hw_init(host);
1115 
1116     dev_set_drvdata(&pdev->dev, host);
1117     mmc_add_host(mmc);
1118     return 0;
1119 
1120 free_host:
1121     mmc_free_host(mmc);
1122     return ret;
1123 }
1124 
1125 static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
1126 {
1127     struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
1128     struct mmc_host *mmc = mmc_from_priv(host);
1129 
1130     if (cancel_delayed_work_sync(&host->timeout_work))
1131         alcor_request_complete(host, 0);
1132 
1133     alcor_hw_uninit(host);
1134     mmc_remove_host(mmc);
1135     mmc_free_host(mmc);
1136 
1137     return 0;
1138 }
1139 
1140 #ifdef CONFIG_PM_SLEEP
1141 static int alcor_pci_sdmmc_suspend(struct device *dev)
1142 {
1143     struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1144 
1145     if (cancel_delayed_work_sync(&host->timeout_work))
1146         alcor_request_complete(host, 0);
1147 
1148     alcor_hw_uninit(host);
1149 
1150     return 0;
1151 }
1152 
1153 static int alcor_pci_sdmmc_resume(struct device *dev)
1154 {
1155     struct alcor_sdmmc_host *host = dev_get_drvdata(dev);
1156 
1157     alcor_hw_init(host);
1158 
1159     return 0;
1160 }
1161 #endif /* CONFIG_PM_SLEEP */
1162 
1163 static SIMPLE_DEV_PM_OPS(alcor_mmc_pm_ops, alcor_pci_sdmmc_suspend,
1164              alcor_pci_sdmmc_resume);
1165 
1166 static const struct platform_device_id alcor_pci_sdmmc_ids[] = {
1167     {
1168         .name = DRV_NAME_ALCOR_PCI_SDMMC,
1169     }, {
1170         /* sentinel */
1171     }
1172 };
1173 MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
1174 
1175 static struct platform_driver alcor_pci_sdmmc_driver = {
1176     .probe      = alcor_pci_sdmmc_drv_probe,
1177     .remove     = alcor_pci_sdmmc_drv_remove,
1178     .id_table   = alcor_pci_sdmmc_ids,
1179     .driver     = {
1180         .name   = DRV_NAME_ALCOR_PCI_SDMMC,
1181         .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1182         .pm = &alcor_mmc_pm_ops
1183     },
1184 };
1185 module_platform_driver(alcor_pci_sdmmc_driver);
1186 
1187 MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
1188 MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
1189 MODULE_LICENSE("GPL");