Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
0003  */
0004 
0005 #include <linux/delay.h>
0006 #include <linux/highmem.h>
0007 #include <linux/io.h>
0008 #include <linux/iopoll.h>
0009 #include <linux/module.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/slab.h>
0012 #include <linux/scatterlist.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/ktime.h>
0015 
0016 #include <linux/mmc/mmc.h>
0017 #include <linux/mmc/host.h>
0018 #include <linux/mmc/card.h>
0019 
0020 #include "cqhci.h"
0021 #include "cqhci-crypto.h"
0022 
0023 #define DCMD_SLOT 31
0024 #define NUM_SLOTS 32
0025 
0026 struct cqhci_slot {
0027     struct mmc_request *mrq;
0028     unsigned int flags;
0029 #define CQHCI_EXTERNAL_TIMEOUT  BIT(0)
0030 #define CQHCI_COMPLETED     BIT(1)
0031 #define CQHCI_HOST_CRC      BIT(2)
0032 #define CQHCI_HOST_TIMEOUT  BIT(3)
0033 #define CQHCI_HOST_OTHER    BIT(4)
0034 };
0035 
0036 static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
0037 {
0038     return cq_host->desc_base + (tag * cq_host->slot_sz);
0039 }
0040 
0041 static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
0042 {
0043     u8 *desc = get_desc(cq_host, tag);
0044 
0045     return desc + cq_host->task_desc_len;
0046 }
0047 
0048 static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
0049 {
0050     return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
0051 }
0052 
0053 static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
0054 {
0055     size_t offset = get_trans_desc_offset(cq_host, tag);
0056 
0057     return cq_host->trans_desc_dma_base + offset;
0058 }
0059 
0060 static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
0061 {
0062     size_t offset = get_trans_desc_offset(cq_host, tag);
0063 
0064     return cq_host->trans_desc_base + offset;
0065 }
0066 
0067 static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
0068 {
0069     u8 *link_temp;
0070     dma_addr_t trans_temp;
0071 
0072     link_temp = get_link_desc(cq_host, tag);
0073     trans_temp = get_trans_desc_dma(cq_host, tag);
0074 
0075     memset(link_temp, 0, cq_host->link_desc_len);
0076     if (cq_host->link_desc_len > 8)
0077         *(link_temp + 8) = 0;
0078 
0079     if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
0080         *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
0081         return;
0082     }
0083 
0084     *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
0085 
0086     if (cq_host->dma64) {
0087         __le64 *data_addr = (__le64 __force *)(link_temp + 4);
0088 
0089         data_addr[0] = cpu_to_le64(trans_temp);
0090     } else {
0091         __le32 *data_addr = (__le32 __force *)(link_temp + 4);
0092 
0093         data_addr[0] = cpu_to_le32(trans_temp);
0094     }
0095 }
0096 
0097 static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
0098 {
0099     cqhci_writel(cq_host, set, CQHCI_ISTE);
0100     cqhci_writel(cq_host, set, CQHCI_ISGE);
0101 }
0102 
0103 #define DRV_NAME "cqhci"
0104 
0105 #define CQHCI_DUMP(f, x...) \
0106     pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
0107 
0108 static void cqhci_dumpregs(struct cqhci_host *cq_host)
0109 {
0110     struct mmc_host *mmc = cq_host->mmc;
0111 
0112     CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
0113 
0114     CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
0115            cqhci_readl(cq_host, CQHCI_CAP),
0116            cqhci_readl(cq_host, CQHCI_VER));
0117     CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
0118            cqhci_readl(cq_host, CQHCI_CFG),
0119            cqhci_readl(cq_host, CQHCI_CTL));
0120     CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
0121            cqhci_readl(cq_host, CQHCI_IS),
0122            cqhci_readl(cq_host, CQHCI_ISTE));
0123     CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
0124            cqhci_readl(cq_host, CQHCI_ISGE),
0125            cqhci_readl(cq_host, CQHCI_IC));
0126     CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
0127            cqhci_readl(cq_host, CQHCI_TDLBA),
0128            cqhci_readl(cq_host, CQHCI_TDLBAU));
0129     CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
0130            cqhci_readl(cq_host, CQHCI_TDBR),
0131            cqhci_readl(cq_host, CQHCI_TCN));
0132     CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
0133            cqhci_readl(cq_host, CQHCI_DQS),
0134            cqhci_readl(cq_host, CQHCI_DPT));
0135     CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
0136            cqhci_readl(cq_host, CQHCI_TCLR),
0137            cqhci_readl(cq_host, CQHCI_SSC1));
0138     CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
0139            cqhci_readl(cq_host, CQHCI_SSC2),
0140            cqhci_readl(cq_host, CQHCI_CRDCT));
0141     CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
0142            cqhci_readl(cq_host, CQHCI_RMEM),
0143            cqhci_readl(cq_host, CQHCI_TERRI));
0144     CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
0145            cqhci_readl(cq_host, CQHCI_CRI),
0146            cqhci_readl(cq_host, CQHCI_CRA));
0147 
0148     if (cq_host->ops->dumpregs)
0149         cq_host->ops->dumpregs(mmc);
0150     else
0151         CQHCI_DUMP(": ===========================================\n");
0152 }
0153 
0154 /*
0155  * The allocated descriptor table for task, link & transfer descriptors
0156  * looks like:
0157  * |----------|
0158  * |task desc |  |->|----------|
0159  * |----------|  |  |trans desc|
0160  * |link desc-|->|  |----------|
0161  * |----------|          .
0162  *      .                .
0163  *  no. of slots      max-segs
0164  *      .           |----------|
0165  * |----------|
0166  * The idea here is to create the [task+trans] table and mark & point the
0167  * link desc to the transfer desc table on a per slot basis.
0168  */
0169 static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
0170 {
0171     int i = 0;
0172 
0173     /* task descriptor can be 64/128 bit irrespective of arch */
0174     if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
0175         cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
0176                    CQHCI_TASK_DESC_SZ, CQHCI_CFG);
0177         cq_host->task_desc_len = 16;
0178     } else {
0179         cq_host->task_desc_len = 8;
0180     }
0181 
0182     /*
0183      * 96 bits length of transfer desc instead of 128 bits which means
0184      * ADMA would expect next valid descriptor at the 96th bit
0185      * or 128th bit
0186      */
0187     if (cq_host->dma64) {
0188         if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
0189             cq_host->trans_desc_len = 12;
0190         else
0191             cq_host->trans_desc_len = 16;
0192         cq_host->link_desc_len = 16;
0193     } else {
0194         cq_host->trans_desc_len = 8;
0195         cq_host->link_desc_len = 8;
0196     }
0197 
0198     /* total size of a slot: 1 task & 1 transfer (link) */
0199     cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
0200 
0201     cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
0202 
0203     cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
0204 
0205     pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
0206          mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
0207          cq_host->slot_sz);
0208 
0209     /*
0210      * allocate a dma-mapped chunk of memory for the descriptors
0211      * allocate a dma-mapped chunk of memory for link descriptors
0212      * setup each link-desc memory offset per slot-number to
0213      * the descriptor table.
0214      */
0215     cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
0216                          cq_host->desc_size,
0217                          &cq_host->desc_dma_base,
0218                          GFP_KERNEL);
0219     if (!cq_host->desc_base)
0220         return -ENOMEM;
0221 
0222     cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
0223                           cq_host->data_size,
0224                           &cq_host->trans_desc_dma_base,
0225                           GFP_KERNEL);
0226     if (!cq_host->trans_desc_base) {
0227         dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
0228                    cq_host->desc_base,
0229                    cq_host->desc_dma_base);
0230         cq_host->desc_base = NULL;
0231         cq_host->desc_dma_base = 0;
0232         return -ENOMEM;
0233     }
0234 
0235     pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
0236          mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
0237         (unsigned long long)cq_host->desc_dma_base,
0238         (unsigned long long)cq_host->trans_desc_dma_base);
0239 
0240     for (; i < (cq_host->num_slots); i++)
0241         setup_trans_desc(cq_host, i);
0242 
0243     return 0;
0244 }
0245 
0246 static void __cqhci_enable(struct cqhci_host *cq_host)
0247 {
0248     struct mmc_host *mmc = cq_host->mmc;
0249     u32 cqcfg;
0250 
0251     cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
0252 
0253     /* Configuration must not be changed while enabled */
0254     if (cqcfg & CQHCI_ENABLE) {
0255         cqcfg &= ~CQHCI_ENABLE;
0256         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
0257     }
0258 
0259     cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
0260 
0261     if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
0262         cqcfg |= CQHCI_DCMD;
0263 
0264     if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
0265         cqcfg |= CQHCI_TASK_DESC_SZ;
0266 
0267     if (mmc->caps2 & MMC_CAP2_CRYPTO)
0268         cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
0269 
0270     cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
0271 
0272     cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
0273              CQHCI_TDLBA);
0274     cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
0275              CQHCI_TDLBAU);
0276 
0277     cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
0278 
0279     cqhci_set_irqs(cq_host, 0);
0280 
0281     cqcfg |= CQHCI_ENABLE;
0282 
0283     cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
0284 
0285     if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
0286         cqhci_writel(cq_host, 0, CQHCI_CTL);
0287 
0288     mmc->cqe_on = true;
0289 
0290     if (cq_host->ops->enable)
0291         cq_host->ops->enable(mmc);
0292 
0293     /* Ensure all writes are done before interrupts are enabled */
0294     wmb();
0295 
0296     cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
0297 
0298     cq_host->activated = true;
0299 }
0300 
0301 static void __cqhci_disable(struct cqhci_host *cq_host)
0302 {
0303     u32 cqcfg;
0304 
0305     cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
0306     cqcfg &= ~CQHCI_ENABLE;
0307     cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
0308 
0309     cq_host->mmc->cqe_on = false;
0310 
0311     cq_host->activated = false;
0312 }
0313 
0314 int cqhci_deactivate(struct mmc_host *mmc)
0315 {
0316     struct cqhci_host *cq_host = mmc->cqe_private;
0317 
0318     if (cq_host->enabled && cq_host->activated)
0319         __cqhci_disable(cq_host);
0320 
0321     return 0;
0322 }
0323 EXPORT_SYMBOL(cqhci_deactivate);
0324 
0325 int cqhci_resume(struct mmc_host *mmc)
0326 {
0327     /* Re-enable is done upon first request */
0328     return 0;
0329 }
0330 EXPORT_SYMBOL(cqhci_resume);
0331 
0332 static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
0333 {
0334     struct cqhci_host *cq_host = mmc->cqe_private;
0335     int err;
0336 
0337     if (!card->ext_csd.cmdq_en)
0338         return -EINVAL;
0339 
0340     if (cq_host->enabled)
0341         return 0;
0342 
0343     cq_host->rca = card->rca;
0344 
0345     err = cqhci_host_alloc_tdl(cq_host);
0346     if (err) {
0347         pr_err("%s: Failed to enable CQE, error %d\n",
0348                mmc_hostname(mmc), err);
0349         return err;
0350     }
0351 
0352     __cqhci_enable(cq_host);
0353 
0354     cq_host->enabled = true;
0355 
0356 #ifdef DEBUG
0357     cqhci_dumpregs(cq_host);
0358 #endif
0359     return 0;
0360 }
0361 
0362 /* CQHCI is idle and should halt immediately, so set a small timeout */
0363 #define CQHCI_OFF_TIMEOUT 100
0364 
0365 static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
0366 {
0367     return cqhci_readl(cq_host, CQHCI_CTL);
0368 }
0369 
0370 static void cqhci_off(struct mmc_host *mmc)
0371 {
0372     struct cqhci_host *cq_host = mmc->cqe_private;
0373     u32 reg;
0374     int err;
0375 
0376     if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
0377         return;
0378 
0379     if (cq_host->ops->disable)
0380         cq_host->ops->disable(mmc, false);
0381 
0382     cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
0383 
0384     err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
0385                  reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
0386     if (err < 0)
0387         pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
0388     else
0389         pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
0390 
0391     if (cq_host->ops->post_disable)
0392         cq_host->ops->post_disable(mmc);
0393 
0394     mmc->cqe_on = false;
0395 }
0396 
0397 static void cqhci_disable(struct mmc_host *mmc)
0398 {
0399     struct cqhci_host *cq_host = mmc->cqe_private;
0400 
0401     if (!cq_host->enabled)
0402         return;
0403 
0404     cqhci_off(mmc);
0405 
0406     __cqhci_disable(cq_host);
0407 
0408     dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
0409                cq_host->trans_desc_base,
0410                cq_host->trans_desc_dma_base);
0411 
0412     dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
0413                cq_host->desc_base,
0414                cq_host->desc_dma_base);
0415 
0416     cq_host->trans_desc_base = NULL;
0417     cq_host->desc_base = NULL;
0418 
0419     cq_host->enabled = false;
0420 }
0421 
0422 static void cqhci_prep_task_desc(struct mmc_request *mrq,
0423                  struct cqhci_host *cq_host, int tag)
0424 {
0425     __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
0426     u32 req_flags = mrq->data->flags;
0427     u64 desc0;
0428 
0429     desc0 = CQHCI_VALID(1) |
0430         CQHCI_END(1) |
0431         CQHCI_INT(1) |
0432         CQHCI_ACT(0x5) |
0433         CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
0434         CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
0435         CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
0436         CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
0437         CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
0438         CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
0439         CQHCI_BLK_COUNT(mrq->data->blocks) |
0440         CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
0441 
0442     task_desc[0] = cpu_to_le64(desc0);
0443 
0444     if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
0445         u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
0446 
0447         task_desc[1] = cpu_to_le64(desc1);
0448 
0449         pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
0450              mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
0451     } else {
0452         pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
0453              mmc_hostname(mrq->host), mrq->tag, desc0);
0454     }
0455 }
0456 
0457 static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
0458 {
0459     int sg_count;
0460     struct mmc_data *data = mrq->data;
0461 
0462     if (!data)
0463         return -EINVAL;
0464 
0465     sg_count = dma_map_sg(mmc_dev(host), data->sg,
0466                   data->sg_len,
0467                   (data->flags & MMC_DATA_WRITE) ?
0468                   DMA_TO_DEVICE : DMA_FROM_DEVICE);
0469     if (!sg_count) {
0470         pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
0471         return -ENOMEM;
0472     }
0473 
0474     return sg_count;
0475 }
0476 
0477 static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
0478                 bool dma64)
0479 {
0480     __le32 *attr = (__le32 __force *)desc;
0481 
0482     *attr = (CQHCI_VALID(1) |
0483          CQHCI_END(end ? 1 : 0) |
0484          CQHCI_INT(0) |
0485          CQHCI_ACT(0x4) |
0486          CQHCI_DAT_LENGTH(len));
0487 
0488     if (dma64) {
0489         __le64 *dataddr = (__le64 __force *)(desc + 4);
0490 
0491         dataddr[0] = cpu_to_le64(addr);
0492     } else {
0493         __le32 *dataddr = (__le32 __force *)(desc + 4);
0494 
0495         dataddr[0] = cpu_to_le32(addr);
0496     }
0497 }
0498 
0499 static int cqhci_prep_tran_desc(struct mmc_request *mrq,
0500                    struct cqhci_host *cq_host, int tag)
0501 {
0502     struct mmc_data *data = mrq->data;
0503     int i, sg_count, len;
0504     bool end = false;
0505     bool dma64 = cq_host->dma64;
0506     dma_addr_t addr;
0507     u8 *desc;
0508     struct scatterlist *sg;
0509 
0510     sg_count = cqhci_dma_map(mrq->host, mrq);
0511     if (sg_count < 0) {
0512         pr_err("%s: %s: unable to map sg lists, %d\n",
0513                 mmc_hostname(mrq->host), __func__, sg_count);
0514         return sg_count;
0515     }
0516 
0517     desc = get_trans_desc(cq_host, tag);
0518 
0519     for_each_sg(data->sg, sg, sg_count, i) {
0520         addr = sg_dma_address(sg);
0521         len = sg_dma_len(sg);
0522 
0523         if ((i+1) == sg_count)
0524             end = true;
0525         cqhci_set_tran_desc(desc, addr, len, end, dma64);
0526         desc += cq_host->trans_desc_len;
0527     }
0528 
0529     return 0;
0530 }
0531 
0532 static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
0533                    struct mmc_request *mrq)
0534 {
0535     u64 *task_desc = NULL;
0536     u64 data = 0;
0537     u8 resp_type;
0538     u8 *desc;
0539     __le64 *dataddr;
0540     struct cqhci_host *cq_host = mmc->cqe_private;
0541     u8 timing;
0542 
0543     if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
0544         resp_type = 0x0;
0545         timing = 0x1;
0546     } else {
0547         if (mrq->cmd->flags & MMC_RSP_R1B) {
0548             resp_type = 0x3;
0549             timing = 0x0;
0550         } else {
0551             resp_type = 0x2;
0552             timing = 0x1;
0553         }
0554     }
0555 
0556     task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
0557     memset(task_desc, 0, cq_host->task_desc_len);
0558     data |= (CQHCI_VALID(1) |
0559          CQHCI_END(1) |
0560          CQHCI_INT(1) |
0561          CQHCI_QBAR(1) |
0562          CQHCI_ACT(0x5) |
0563          CQHCI_CMD_INDEX(mrq->cmd->opcode) |
0564          CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
0565     if (cq_host->ops->update_dcmd_desc)
0566         cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
0567     *task_desc |= data;
0568     desc = (u8 *)task_desc;
0569     pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
0570          mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
0571     dataddr = (__le64 __force *)(desc + 4);
0572     dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
0573 
0574 }
0575 
0576 static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
0577 {
0578     struct mmc_data *data = mrq->data;
0579 
0580     if (data) {
0581         dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
0582                  (data->flags & MMC_DATA_READ) ?
0583                  DMA_FROM_DEVICE : DMA_TO_DEVICE);
0584     }
0585 }
0586 
0587 static inline int cqhci_tag(struct mmc_request *mrq)
0588 {
0589     return mrq->cmd ? DCMD_SLOT : mrq->tag;
0590 }
0591 
0592 static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
0593 {
0594     int err = 0;
0595     int tag = cqhci_tag(mrq);
0596     struct cqhci_host *cq_host = mmc->cqe_private;
0597     unsigned long flags;
0598 
0599     if (!cq_host->enabled) {
0600         pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
0601         return -EINVAL;
0602     }
0603 
0604     /* First request after resume has to re-enable */
0605     if (!cq_host->activated)
0606         __cqhci_enable(cq_host);
0607 
0608     if (!mmc->cqe_on) {
0609         if (cq_host->ops->pre_enable)
0610             cq_host->ops->pre_enable(mmc);
0611 
0612         cqhci_writel(cq_host, 0, CQHCI_CTL);
0613         mmc->cqe_on = true;
0614         pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
0615         if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
0616             pr_err("%s: cqhci: CQE failed to exit halt state\n",
0617                    mmc_hostname(mmc));
0618         }
0619         if (cq_host->ops->enable)
0620             cq_host->ops->enable(mmc);
0621     }
0622 
0623     if (mrq->data) {
0624         cqhci_prep_task_desc(mrq, cq_host, tag);
0625 
0626         err = cqhci_prep_tran_desc(mrq, cq_host, tag);
0627         if (err) {
0628             pr_err("%s: cqhci: failed to setup tx desc: %d\n",
0629                    mmc_hostname(mmc), err);
0630             return err;
0631         }
0632     } else {
0633         cqhci_prep_dcmd_desc(mmc, mrq);
0634     }
0635 
0636     spin_lock_irqsave(&cq_host->lock, flags);
0637 
0638     if (cq_host->recovery_halt) {
0639         err = -EBUSY;
0640         goto out_unlock;
0641     }
0642 
0643     cq_host->slot[tag].mrq = mrq;
0644     cq_host->slot[tag].flags = 0;
0645 
0646     cq_host->qcnt += 1;
0647     /* Make sure descriptors are ready before ringing the doorbell */
0648     wmb();
0649     cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
0650     if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
0651         pr_debug("%s: cqhci: doorbell not set for tag %d\n",
0652              mmc_hostname(mmc), tag);
0653 out_unlock:
0654     spin_unlock_irqrestore(&cq_host->lock, flags);
0655 
0656     if (err)
0657         cqhci_post_req(mmc, mrq);
0658 
0659     return err;
0660 }
0661 
0662 static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
0663                   bool notify)
0664 {
0665     struct cqhci_host *cq_host = mmc->cqe_private;
0666 
0667     if (!cq_host->recovery_halt) {
0668         cq_host->recovery_halt = true;
0669         pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
0670         wake_up(&cq_host->wait_queue);
0671         if (notify && mrq->recovery_notifier)
0672             mrq->recovery_notifier(mrq);
0673     }
0674 }
0675 
0676 static unsigned int cqhci_error_flags(int error1, int error2)
0677 {
0678     int error = error1 ? error1 : error2;
0679 
0680     switch (error) {
0681     case -EILSEQ:
0682         return CQHCI_HOST_CRC;
0683     case -ETIMEDOUT:
0684         return CQHCI_HOST_TIMEOUT;
0685     default:
0686         return CQHCI_HOST_OTHER;
0687     }
0688 }
0689 
0690 static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
0691                 int data_error)
0692 {
0693     struct cqhci_host *cq_host = mmc->cqe_private;
0694     struct cqhci_slot *slot;
0695     u32 terri;
0696     u32 tdpe;
0697     int tag;
0698 
0699     spin_lock(&cq_host->lock);
0700 
0701     terri = cqhci_readl(cq_host, CQHCI_TERRI);
0702 
0703     pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
0704          mmc_hostname(mmc), status, cmd_error, data_error, terri);
0705 
0706     /* Forget about errors when recovery has already been triggered */
0707     if (cq_host->recovery_halt)
0708         goto out_unlock;
0709 
0710     if (!cq_host->qcnt) {
0711         WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
0712               mmc_hostname(mmc), status, cmd_error, data_error,
0713               terri);
0714         goto out_unlock;
0715     }
0716 
0717     if (CQHCI_TERRI_C_VALID(terri)) {
0718         tag = CQHCI_TERRI_C_TASK(terri);
0719         slot = &cq_host->slot[tag];
0720         if (slot->mrq) {
0721             slot->flags = cqhci_error_flags(cmd_error, data_error);
0722             cqhci_recovery_needed(mmc, slot->mrq, true);
0723         }
0724     }
0725 
0726     if (CQHCI_TERRI_D_VALID(terri)) {
0727         tag = CQHCI_TERRI_D_TASK(terri);
0728         slot = &cq_host->slot[tag];
0729         if (slot->mrq) {
0730             slot->flags = cqhci_error_flags(data_error, cmd_error);
0731             cqhci_recovery_needed(mmc, slot->mrq, true);
0732         }
0733     }
0734 
0735     /*
0736      * Handle ICCE ("Invalid Crypto Configuration Error").  This should
0737      * never happen, since the block layer ensures that all crypto-enabled
0738      * I/O requests have a valid keyslot before they reach the driver.
0739      *
0740      * Note that GCE ("General Crypto Error") is different; it already got
0741      * handled above by checking TERRI.
0742      */
0743     if (status & CQHCI_IS_ICCE) {
0744         tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
0745         WARN_ONCE(1,
0746               "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
0747               mmc_hostname(mmc), status, tdpe);
0748         while (tdpe != 0) {
0749             tag = __ffs(tdpe);
0750             tdpe &= ~(1 << tag);
0751             slot = &cq_host->slot[tag];
0752             if (!slot->mrq)
0753                 continue;
0754             slot->flags = cqhci_error_flags(data_error, cmd_error);
0755             cqhci_recovery_needed(mmc, slot->mrq, true);
0756         }
0757     }
0758 
0759     if (!cq_host->recovery_halt) {
0760         /*
0761          * The only way to guarantee forward progress is to mark at
0762          * least one task in error, so if none is indicated, pick one.
0763          */
0764         for (tag = 0; tag < NUM_SLOTS; tag++) {
0765             slot = &cq_host->slot[tag];
0766             if (!slot->mrq)
0767                 continue;
0768             slot->flags = cqhci_error_flags(data_error, cmd_error);
0769             cqhci_recovery_needed(mmc, slot->mrq, true);
0770             break;
0771         }
0772     }
0773 
0774 out_unlock:
0775     spin_unlock(&cq_host->lock);
0776 }
0777 
0778 static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
0779 {
0780     struct cqhci_host *cq_host = mmc->cqe_private;
0781     struct cqhci_slot *slot = &cq_host->slot[tag];
0782     struct mmc_request *mrq = slot->mrq;
0783     struct mmc_data *data;
0784 
0785     if (!mrq) {
0786         WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
0787               mmc_hostname(mmc), tag);
0788         return;
0789     }
0790 
0791     /* No completions allowed during recovery */
0792     if (cq_host->recovery_halt) {
0793         slot->flags |= CQHCI_COMPLETED;
0794         return;
0795     }
0796 
0797     slot->mrq = NULL;
0798 
0799     cq_host->qcnt -= 1;
0800 
0801     data = mrq->data;
0802     if (data) {
0803         if (data->error)
0804             data->bytes_xfered = 0;
0805         else
0806             data->bytes_xfered = data->blksz * data->blocks;
0807     }
0808 
0809     mmc_cqe_request_done(mmc, mrq);
0810 }
0811 
0812 irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
0813               int data_error)
0814 {
0815     u32 status;
0816     unsigned long tag = 0, comp_status;
0817     struct cqhci_host *cq_host = mmc->cqe_private;
0818 
0819     status = cqhci_readl(cq_host, CQHCI_IS);
0820     cqhci_writel(cq_host, status, CQHCI_IS);
0821 
0822     pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
0823 
0824     if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
0825         cmd_error || data_error) {
0826         if (status & CQHCI_IS_RED)
0827             mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_RED);
0828         if (status & CQHCI_IS_GCE)
0829             mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
0830         if (status & CQHCI_IS_ICCE)
0831             mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
0832         cqhci_error_irq(mmc, status, cmd_error, data_error);
0833     }
0834 
0835     if (status & CQHCI_IS_TCC) {
0836         /* read TCN and complete the request */
0837         comp_status = cqhci_readl(cq_host, CQHCI_TCN);
0838         cqhci_writel(cq_host, comp_status, CQHCI_TCN);
0839         pr_debug("%s: cqhci: TCN: 0x%08lx\n",
0840              mmc_hostname(mmc), comp_status);
0841 
0842         spin_lock(&cq_host->lock);
0843 
0844         for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
0845             /* complete the corresponding mrq */
0846             pr_debug("%s: cqhci: completing tag %lu\n",
0847                  mmc_hostname(mmc), tag);
0848             cqhci_finish_mrq(mmc, tag);
0849         }
0850 
0851         if (cq_host->waiting_for_idle && !cq_host->qcnt) {
0852             cq_host->waiting_for_idle = false;
0853             wake_up(&cq_host->wait_queue);
0854         }
0855 
0856         spin_unlock(&cq_host->lock);
0857     }
0858 
0859     if (status & CQHCI_IS_TCL)
0860         wake_up(&cq_host->wait_queue);
0861 
0862     if (status & CQHCI_IS_HAC)
0863         wake_up(&cq_host->wait_queue);
0864 
0865     return IRQ_HANDLED;
0866 }
0867 EXPORT_SYMBOL(cqhci_irq);
0868 
0869 static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
0870 {
0871     unsigned long flags;
0872     bool is_idle;
0873 
0874     spin_lock_irqsave(&cq_host->lock, flags);
0875     is_idle = !cq_host->qcnt || cq_host->recovery_halt;
0876     *ret = cq_host->recovery_halt ? -EBUSY : 0;
0877     cq_host->waiting_for_idle = !is_idle;
0878     spin_unlock_irqrestore(&cq_host->lock, flags);
0879 
0880     return is_idle;
0881 }
0882 
0883 static int cqhci_wait_for_idle(struct mmc_host *mmc)
0884 {
0885     struct cqhci_host *cq_host = mmc->cqe_private;
0886     int ret;
0887 
0888     wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
0889 
0890     return ret;
0891 }
0892 
0893 static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
0894               bool *recovery_needed)
0895 {
0896     struct cqhci_host *cq_host = mmc->cqe_private;
0897     int tag = cqhci_tag(mrq);
0898     struct cqhci_slot *slot = &cq_host->slot[tag];
0899     unsigned long flags;
0900     bool timed_out;
0901 
0902     spin_lock_irqsave(&cq_host->lock, flags);
0903     timed_out = slot->mrq == mrq;
0904     if (timed_out) {
0905         slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
0906         cqhci_recovery_needed(mmc, mrq, false);
0907         *recovery_needed = cq_host->recovery_halt;
0908     }
0909     spin_unlock_irqrestore(&cq_host->lock, flags);
0910 
0911     if (timed_out) {
0912         pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
0913                mmc_hostname(mmc), tag, cq_host->qcnt);
0914         cqhci_dumpregs(cq_host);
0915     }
0916 
0917     return timed_out;
0918 }
0919 
0920 static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
0921 {
0922     return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
0923 }
0924 
0925 static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
0926 {
0927     struct cqhci_host *cq_host = mmc->cqe_private;
0928     bool ret;
0929     u32 ctl;
0930 
0931     cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
0932 
0933     ctl = cqhci_readl(cq_host, CQHCI_CTL);
0934     ctl |= CQHCI_CLEAR_ALL_TASKS;
0935     cqhci_writel(cq_host, ctl, CQHCI_CTL);
0936 
0937     wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
0938                msecs_to_jiffies(timeout) + 1);
0939 
0940     cqhci_set_irqs(cq_host, 0);
0941 
0942     ret = cqhci_tasks_cleared(cq_host);
0943 
0944     if (!ret)
0945         pr_debug("%s: cqhci: Failed to clear tasks\n",
0946              mmc_hostname(mmc));
0947 
0948     return ret;
0949 }
0950 
0951 static bool cqhci_halted(struct cqhci_host *cq_host)
0952 {
0953     return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
0954 }
0955 
0956 static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
0957 {
0958     struct cqhci_host *cq_host = mmc->cqe_private;
0959     bool ret;
0960     u32 ctl;
0961 
0962     if (cqhci_halted(cq_host))
0963         return true;
0964 
0965     cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
0966 
0967     ctl = cqhci_readl(cq_host, CQHCI_CTL);
0968     ctl |= CQHCI_HALT;
0969     cqhci_writel(cq_host, ctl, CQHCI_CTL);
0970 
0971     wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
0972                msecs_to_jiffies(timeout) + 1);
0973 
0974     cqhci_set_irqs(cq_host, 0);
0975 
0976     ret = cqhci_halted(cq_host);
0977 
0978     if (!ret)
0979         pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
0980 
0981     return ret;
0982 }
0983 
0984 /*
0985  * After halting we expect to be able to use the command line. We interpret the
0986  * failure to halt to mean the data lines might still be in use (and the upper
0987  * layers will need to send a STOP command), so we set the timeout based on a
0988  * generous command timeout.
0989  */
0990 #define CQHCI_START_HALT_TIMEOUT    5
0991 
0992 static void cqhci_recovery_start(struct mmc_host *mmc)
0993 {
0994     struct cqhci_host *cq_host = mmc->cqe_private;
0995 
0996     pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
0997 
0998     WARN_ON(!cq_host->recovery_halt);
0999 
1000     cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
1001 
1002     if (cq_host->ops->disable)
1003         cq_host->ops->disable(mmc, true);
1004 
1005     mmc->cqe_on = false;
1006 }
1007 
1008 static int cqhci_error_from_flags(unsigned int flags)
1009 {
1010     if (!flags)
1011         return 0;
1012 
1013     /* CRC errors might indicate re-tuning so prefer to report that */
1014     if (flags & CQHCI_HOST_CRC)
1015         return -EILSEQ;
1016 
1017     if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1018         return -ETIMEDOUT;
1019 
1020     return -EIO;
1021 }
1022 
1023 static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1024 {
1025     struct cqhci_slot *slot = &cq_host->slot[tag];
1026     struct mmc_request *mrq = slot->mrq;
1027     struct mmc_data *data;
1028 
1029     if (!mrq)
1030         return;
1031 
1032     slot->mrq = NULL;
1033 
1034     cq_host->qcnt -= 1;
1035 
1036     data = mrq->data;
1037     if (data) {
1038         data->bytes_xfered = 0;
1039         data->error = cqhci_error_from_flags(slot->flags);
1040     } else {
1041         mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1042     }
1043 
1044     mmc_cqe_request_done(cq_host->mmc, mrq);
1045 }
1046 
1047 static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1048 {
1049     int i;
1050 
1051     for (i = 0; i < cq_host->num_slots; i++)
1052         cqhci_recover_mrq(cq_host, i);
1053 }
1054 
1055 /*
1056  * By now the command and data lines should be unused so there is no reason for
1057  * CQHCI to take a long time to halt, but if it doesn't halt there could be
1058  * problems clearing tasks, so be generous.
1059  */
1060 #define CQHCI_FINISH_HALT_TIMEOUT   20
1061 
1062 /* CQHCI could be expected to clear it's internal state pretty quickly */
1063 #define CQHCI_CLEAR_TIMEOUT     20
1064 
1065 static void cqhci_recovery_finish(struct mmc_host *mmc)
1066 {
1067     struct cqhci_host *cq_host = mmc->cqe_private;
1068     unsigned long flags;
1069     u32 cqcfg;
1070     bool ok;
1071 
1072     pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1073 
1074     WARN_ON(!cq_host->recovery_halt);
1075 
1076     ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1077 
1078     if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1079         ok = false;
1080 
1081     /*
1082      * The specification contradicts itself, by saying that tasks cannot be
1083      * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1084      * be disabled/re-enabled, but not to disable before clearing tasks.
1085      * Have a go anyway.
1086      */
1087     if (!ok) {
1088         pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1089         cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1090         cqcfg &= ~CQHCI_ENABLE;
1091         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1092         cqcfg |= CQHCI_ENABLE;
1093         cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1094         /* Be sure that there are no tasks */
1095         ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1096         if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1097             ok = false;
1098         WARN_ON(!ok);
1099     }
1100 
1101     cqhci_recover_mrqs(cq_host);
1102 
1103     WARN_ON(cq_host->qcnt);
1104 
1105     spin_lock_irqsave(&cq_host->lock, flags);
1106     cq_host->qcnt = 0;
1107     cq_host->recovery_halt = false;
1108     mmc->cqe_on = false;
1109     spin_unlock_irqrestore(&cq_host->lock, flags);
1110 
1111     /* Ensure all writes are done before interrupts are re-enabled */
1112     wmb();
1113 
1114     cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1115 
1116     cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1117 
1118     pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1119 }
1120 
1121 static const struct mmc_cqe_ops cqhci_cqe_ops = {
1122     .cqe_enable = cqhci_enable,
1123     .cqe_disable = cqhci_disable,
1124     .cqe_request = cqhci_request,
1125     .cqe_post_req = cqhci_post_req,
1126     .cqe_off = cqhci_off,
1127     .cqe_wait_for_idle = cqhci_wait_for_idle,
1128     .cqe_timeout = cqhci_timeout,
1129     .cqe_recovery_start = cqhci_recovery_start,
1130     .cqe_recovery_finish = cqhci_recovery_finish,
1131 };
1132 
1133 struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1134 {
1135     struct cqhci_host *cq_host;
1136     struct resource *cqhci_memres = NULL;
1137 
1138     /* check and setup CMDQ interface */
1139     cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1140                            "cqhci");
1141     if (!cqhci_memres) {
1142         dev_dbg(&pdev->dev, "CMDQ not supported\n");
1143         return ERR_PTR(-EINVAL);
1144     }
1145 
1146     cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1147     if (!cq_host)
1148         return ERR_PTR(-ENOMEM);
1149     cq_host->mmio = devm_ioremap(&pdev->dev,
1150                      cqhci_memres->start,
1151                      resource_size(cqhci_memres));
1152     if (!cq_host->mmio) {
1153         dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1154         return ERR_PTR(-EBUSY);
1155     }
1156     dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1157 
1158     return cq_host;
1159 }
1160 EXPORT_SYMBOL(cqhci_pltfm_init);
1161 
1162 static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1163 {
1164     return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1165 }
1166 
1167 static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1168 {
1169     u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1170 
1171     return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1172 }
1173 
1174 int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1175           bool dma64)
1176 {
1177     int err;
1178 
1179     cq_host->dma64 = dma64;
1180     cq_host->mmc = mmc;
1181     cq_host->mmc->cqe_private = cq_host;
1182 
1183     cq_host->num_slots = NUM_SLOTS;
1184     cq_host->dcmd_slot = DCMD_SLOT;
1185 
1186     mmc->cqe_ops = &cqhci_cqe_ops;
1187 
1188     mmc->cqe_qdepth = NUM_SLOTS;
1189     if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1190         mmc->cqe_qdepth -= 1;
1191 
1192     cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1193                      sizeof(*cq_host->slot), GFP_KERNEL);
1194     if (!cq_host->slot) {
1195         err = -ENOMEM;
1196         goto out_err;
1197     }
1198 
1199     err = cqhci_crypto_init(cq_host);
1200     if (err) {
1201         pr_err("%s: CQHCI crypto initialization failed\n",
1202                mmc_hostname(mmc));
1203         goto out_err;
1204     }
1205 
1206     spin_lock_init(&cq_host->lock);
1207 
1208     init_completion(&cq_host->halt_comp);
1209     init_waitqueue_head(&cq_host->wait_queue);
1210 
1211     pr_info("%s: CQHCI version %u.%02u\n",
1212         mmc_hostname(mmc), cqhci_ver_major(cq_host),
1213         cqhci_ver_minor(cq_host));
1214 
1215     return 0;
1216 
1217 out_err:
1218     pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1219            mmc_hostname(mmc), cqhci_ver_major(cq_host),
1220            cqhci_ver_minor(cq_host), err);
1221     return err;
1222 }
1223 EXPORT_SYMBOL(cqhci_init);
1224 
1225 MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1226 MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1227 MODULE_LICENSE("GPL v2");