0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/device.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/mmc/mmc.h>
0014 #include <linux/module.h>
0015 #include <linux/of.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/pci.h>
0018 #include "cavium.h"
0019
0020 static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
0021 {
0022 down(&host->mmc_serializer);
0023 }
0024
0025 static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
0026 {
0027 up(&host->mmc_serializer);
0028 }
0029
0030 static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
0031 {
0032 writeq(val, host->base + MIO_EMM_INT(host));
0033 writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
0034 }
0035
0036 static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
0037 struct pci_dev *pdev)
0038 {
0039 int nvec, ret, i;
0040
0041 nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
0042 if (nvec < 0)
0043 return nvec;
0044
0045
0046 for (i = 0; i < nvec; i++) {
0047 ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
0048 cvm_mmc_interrupt,
0049 0, cvm_mmc_irq_names[i], host);
0050 if (ret)
0051 return ret;
0052 }
0053 return 0;
0054 }
0055
0056 static int thunder_mmc_probe(struct pci_dev *pdev,
0057 const struct pci_device_id *id)
0058 {
0059 struct device_node *node = pdev->dev.of_node;
0060 struct device *dev = &pdev->dev;
0061 struct device_node *child_node;
0062 struct cvm_mmc_host *host;
0063 int ret, i = 0;
0064
0065 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
0066 if (!host)
0067 return -ENOMEM;
0068
0069 pci_set_drvdata(pdev, host);
0070 ret = pcim_enable_device(pdev);
0071 if (ret)
0072 return ret;
0073
0074 ret = pci_request_regions(pdev, KBUILD_MODNAME);
0075 if (ret)
0076 return ret;
0077
0078 host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
0079 if (!host->base) {
0080 ret = -EINVAL;
0081 goto error;
0082 }
0083
0084
0085 host->dma_base = host->base;
0086
0087 host->reg_off = 0x2000;
0088 host->reg_off_dma = 0x160;
0089
0090 host->clk = devm_clk_get(dev, NULL);
0091 if (IS_ERR(host->clk)) {
0092 ret = PTR_ERR(host->clk);
0093 goto error;
0094 }
0095
0096 ret = clk_prepare_enable(host->clk);
0097 if (ret)
0098 goto error;
0099 host->sys_freq = clk_get_rate(host->clk);
0100
0101 spin_lock_init(&host->irq_handler_lock);
0102 sema_init(&host->mmc_serializer, 1);
0103
0104 host->dev = dev;
0105 host->acquire_bus = thunder_mmc_acquire_bus;
0106 host->release_bus = thunder_mmc_release_bus;
0107 host->int_enable = thunder_mmc_int_enable;
0108
0109 host->use_sg = true;
0110 host->big_dma_addr = true;
0111 host->need_irq_handler_lock = true;
0112 host->last_slot = -1;
0113
0114 ret = dma_set_mask(dev, DMA_BIT_MASK(48));
0115 if (ret)
0116 goto error;
0117
0118
0119
0120
0121
0122 writeq(127, host->base + MIO_EMM_INT_EN(host));
0123 writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
0124
0125 writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
0126
0127 ret = thunder_mmc_register_interrupts(host, pdev);
0128 if (ret)
0129 goto error;
0130
0131 for_each_child_of_node(node, child_node) {
0132
0133
0134
0135
0136
0137
0138 if (of_device_is_compatible(child_node, "mmc-slot")) {
0139 host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
0140 &pdev->dev);
0141 if (!host->slot_pdev[i])
0142 continue;
0143
0144 ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
0145 if (ret) {
0146 of_node_put(child_node);
0147 goto error;
0148 }
0149 }
0150 i++;
0151 }
0152 dev_info(dev, "probed\n");
0153 return 0;
0154
0155 error:
0156 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
0157 if (host->slot[i])
0158 cvm_mmc_of_slot_remove(host->slot[i]);
0159 if (host->slot_pdev[i]) {
0160 get_device(&host->slot_pdev[i]->dev);
0161 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
0162 put_device(&host->slot_pdev[i]->dev);
0163 }
0164 }
0165 clk_disable_unprepare(host->clk);
0166 pci_release_regions(pdev);
0167 return ret;
0168 }
0169
0170 static void thunder_mmc_remove(struct pci_dev *pdev)
0171 {
0172 struct cvm_mmc_host *host = pci_get_drvdata(pdev);
0173 u64 dma_cfg;
0174 int i;
0175
0176 for (i = 0; i < CAVIUM_MAX_MMC; i++)
0177 if (host->slot[i])
0178 cvm_mmc_of_slot_remove(host->slot[i]);
0179
0180 dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
0181 dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
0182 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
0183
0184 clk_disable_unprepare(host->clk);
0185 pci_release_regions(pdev);
0186 }
0187
0188 static const struct pci_device_id thunder_mmc_id_table[] = {
0189 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
0190 { 0, }
0191 };
0192
0193 static struct pci_driver thunder_mmc_driver = {
0194 .name = KBUILD_MODNAME,
0195 .id_table = thunder_mmc_id_table,
0196 .probe = thunder_mmc_probe,
0197 .remove = thunder_mmc_remove,
0198 };
0199
0200 module_pci_driver(thunder_mmc_driver);
0201
0202 MODULE_AUTHOR("Cavium Inc.");
0203 MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
0204 MODULE_LICENSE("GPL");
0205 MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);