0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/clk.h>
0010 #include <linux/device.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/errno.h>
0013 #include <linux/iommu.h>
0014 #include <linux/mfd/syscon.h>
0015 #include <linux/module.h>
0016 #include <linux/of_platform.h>
0017 #include <linux/regmap.h>
0018 #include <linux/slab.h>
0019
0020 #define SPRD_IOMMU_PAGE_SHIFT 12
0021 #define SPRD_IOMMU_PAGE_SIZE SZ_4K
0022
0023 #define SPRD_EX_CFG 0x0
0024 #define SPRD_IOMMU_VAOR_BYPASS BIT(4)
0025 #define SPRD_IOMMU_GATE_EN BIT(1)
0026 #define SPRD_IOMMU_EN BIT(0)
0027 #define SPRD_EX_UPDATE 0x4
0028 #define SPRD_EX_FIRST_VPN 0x8
0029 #define SPRD_EX_VPN_RANGE 0xc
0030 #define SPRD_EX_FIRST_PPN 0x10
0031 #define SPRD_EX_DEFAULT_PPN 0x14
0032
0033 #define SPRD_IOMMU_VERSION 0x0
0034 #define SPRD_VERSION_MASK GENMASK(15, 8)
0035 #define SPRD_VERSION_SHIFT 0x8
0036 #define SPRD_VAU_CFG 0x4
0037 #define SPRD_VAU_UPDATE 0x8
0038 #define SPRD_VAU_AUTH_CFG 0xc
0039 #define SPRD_VAU_FIRST_PPN 0x10
0040 #define SPRD_VAU_DEFAULT_PPN_RD 0x14
0041 #define SPRD_VAU_DEFAULT_PPN_WR 0x18
0042 #define SPRD_VAU_FIRST_VPN 0x1c
0043 #define SPRD_VAU_VPN_RANGE 0x20
0044
0045 enum sprd_iommu_version {
0046 SPRD_IOMMU_EX,
0047 SPRD_IOMMU_VAU,
0048 };
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064 struct sprd_iommu_device {
0065 enum sprd_iommu_version ver;
0066 u32 *prot_page_va;
0067 dma_addr_t prot_page_pa;
0068 void __iomem *base;
0069 struct device *dev;
0070 struct iommu_device iommu;
0071 struct iommu_group *group;
0072 struct clk *eb;
0073 };
0074
0075 struct sprd_iommu_domain {
0076 spinlock_t pgtlock;
0077 struct iommu_domain domain;
0078 u32 *pgt_va;
0079 dma_addr_t pgt_pa;
0080 struct sprd_iommu_device *sdev;
0081 };
0082
0083 static const struct iommu_ops sprd_iommu_ops;
0084
0085 static struct sprd_iommu_domain *to_sprd_domain(struct iommu_domain *dom)
0086 {
0087 return container_of(dom, struct sprd_iommu_domain, domain);
0088 }
0089
0090 static inline void
0091 sprd_iommu_write(struct sprd_iommu_device *sdev, unsigned int reg, u32 val)
0092 {
0093 writel_relaxed(val, sdev->base + reg);
0094 }
0095
0096 static inline u32
0097 sprd_iommu_read(struct sprd_iommu_device *sdev, unsigned int reg)
0098 {
0099 return readl_relaxed(sdev->base + reg);
0100 }
0101
0102 static inline void
0103 sprd_iommu_update_bits(struct sprd_iommu_device *sdev, unsigned int reg,
0104 u32 mask, u32 shift, u32 val)
0105 {
0106 u32 t = sprd_iommu_read(sdev, reg);
0107
0108 t = (t & (~(mask << shift))) | ((val & mask) << shift);
0109 sprd_iommu_write(sdev, reg, t);
0110 }
0111
0112 static inline int
0113 sprd_iommu_get_version(struct sprd_iommu_device *sdev)
0114 {
0115 int ver = (sprd_iommu_read(sdev, SPRD_IOMMU_VERSION) &
0116 SPRD_VERSION_MASK) >> SPRD_VERSION_SHIFT;
0117
0118 switch (ver) {
0119 case SPRD_IOMMU_EX:
0120 case SPRD_IOMMU_VAU:
0121 return ver;
0122 default:
0123 return -EINVAL;
0124 }
0125 }
0126
0127 static size_t
0128 sprd_iommu_pgt_size(struct iommu_domain *domain)
0129 {
0130 return ((domain->geometry.aperture_end -
0131 domain->geometry.aperture_start + 1) >>
0132 SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
0133 }
0134
0135 static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
0136 {
0137 struct sprd_iommu_domain *dom;
0138
0139 if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
0140 return NULL;
0141
0142 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
0143 if (!dom)
0144 return NULL;
0145
0146 spin_lock_init(&dom->pgtlock);
0147
0148 dom->domain.geometry.aperture_start = 0;
0149 dom->domain.geometry.aperture_end = SZ_256M - 1;
0150
0151 return &dom->domain;
0152 }
0153
0154 static void sprd_iommu_domain_free(struct iommu_domain *domain)
0155 {
0156 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0157
0158 kfree(dom);
0159 }
0160
0161 static void sprd_iommu_first_vpn(struct sprd_iommu_domain *dom)
0162 {
0163 struct sprd_iommu_device *sdev = dom->sdev;
0164 u32 val;
0165 unsigned int reg;
0166
0167 if (sdev->ver == SPRD_IOMMU_EX)
0168 reg = SPRD_EX_FIRST_VPN;
0169 else
0170 reg = SPRD_VAU_FIRST_VPN;
0171
0172 val = dom->domain.geometry.aperture_start >> SPRD_IOMMU_PAGE_SHIFT;
0173 sprd_iommu_write(sdev, reg, val);
0174 }
0175
0176 static void sprd_iommu_vpn_range(struct sprd_iommu_domain *dom)
0177 {
0178 struct sprd_iommu_device *sdev = dom->sdev;
0179 u32 val;
0180 unsigned int reg;
0181
0182 if (sdev->ver == SPRD_IOMMU_EX)
0183 reg = SPRD_EX_VPN_RANGE;
0184 else
0185 reg = SPRD_VAU_VPN_RANGE;
0186
0187 val = (dom->domain.geometry.aperture_end -
0188 dom->domain.geometry.aperture_start) >> SPRD_IOMMU_PAGE_SHIFT;
0189 sprd_iommu_write(sdev, reg, val);
0190 }
0191
0192 static void sprd_iommu_first_ppn(struct sprd_iommu_domain *dom)
0193 {
0194 u32 val = dom->pgt_pa >> SPRD_IOMMU_PAGE_SHIFT;
0195 struct sprd_iommu_device *sdev = dom->sdev;
0196 unsigned int reg;
0197
0198 if (sdev->ver == SPRD_IOMMU_EX)
0199 reg = SPRD_EX_FIRST_PPN;
0200 else
0201 reg = SPRD_VAU_FIRST_PPN;
0202
0203 sprd_iommu_write(sdev, reg, val);
0204 }
0205
0206 static void sprd_iommu_default_ppn(struct sprd_iommu_device *sdev)
0207 {
0208 u32 val = sdev->prot_page_pa >> SPRD_IOMMU_PAGE_SHIFT;
0209
0210 if (sdev->ver == SPRD_IOMMU_EX) {
0211 sprd_iommu_write(sdev, SPRD_EX_DEFAULT_PPN, val);
0212 } else if (sdev->ver == SPRD_IOMMU_VAU) {
0213 sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_RD, val);
0214 sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_WR, val);
0215 }
0216 }
0217
0218 static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
0219 {
0220 unsigned int reg_cfg;
0221 u32 mask, val;
0222
0223 if (sdev->ver == SPRD_IOMMU_EX)
0224 reg_cfg = SPRD_EX_CFG;
0225 else
0226 reg_cfg = SPRD_VAU_CFG;
0227
0228 mask = SPRD_IOMMU_EN | SPRD_IOMMU_GATE_EN;
0229 val = en ? mask : 0;
0230 sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
0231 }
0232
0233 static int sprd_iommu_attach_device(struct iommu_domain *domain,
0234 struct device *dev)
0235 {
0236 struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
0237 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0238 size_t pgt_size = sprd_iommu_pgt_size(domain);
0239
0240 if (dom->sdev) {
0241 pr_err("There's already a device attached to this domain.\n");
0242 return -EINVAL;
0243 }
0244
0245 dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
0246 if (!dom->pgt_va)
0247 return -ENOMEM;
0248
0249 dom->sdev = sdev;
0250
0251 sprd_iommu_first_ppn(dom);
0252 sprd_iommu_first_vpn(dom);
0253 sprd_iommu_vpn_range(dom);
0254 sprd_iommu_default_ppn(sdev);
0255 sprd_iommu_hw_en(sdev, true);
0256
0257 return 0;
0258 }
0259
0260 static void sprd_iommu_detach_device(struct iommu_domain *domain,
0261 struct device *dev)
0262 {
0263 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0264 struct sprd_iommu_device *sdev = dom->sdev;
0265 size_t pgt_size = sprd_iommu_pgt_size(domain);
0266
0267 if (!sdev)
0268 return;
0269
0270 dma_free_coherent(sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
0271 sprd_iommu_hw_en(sdev, false);
0272 dom->sdev = NULL;
0273 }
0274
0275 static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
0276 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0277 {
0278 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0279 unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
0280 unsigned long flags;
0281 unsigned int i;
0282 u32 *pgt_base_iova;
0283 u32 pabase = (u32)paddr;
0284 unsigned long start = domain->geometry.aperture_start;
0285 unsigned long end = domain->geometry.aperture_end;
0286
0287 if (!dom->sdev) {
0288 pr_err("No sprd_iommu_device attached to the domain\n");
0289 return -EINVAL;
0290 }
0291
0292 if (iova < start || (iova + size) > (end + 1)) {
0293 dev_err(dom->sdev->dev, "(iova(0x%lx) + size(%zx)) are not in the range!\n",
0294 iova, size);
0295 return -EINVAL;
0296 }
0297
0298 pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
0299
0300 spin_lock_irqsave(&dom->pgtlock, flags);
0301 for (i = 0; i < page_num; i++) {
0302 pgt_base_iova[i] = pabase >> SPRD_IOMMU_PAGE_SHIFT;
0303 pabase += SPRD_IOMMU_PAGE_SIZE;
0304 }
0305 spin_unlock_irqrestore(&dom->pgtlock, flags);
0306
0307 return 0;
0308 }
0309
0310 static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
0311 size_t size, struct iommu_iotlb_gather *iotlb_gather)
0312 {
0313 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0314 unsigned long flags;
0315 u32 *pgt_base_iova;
0316 unsigned int page_num = size >> SPRD_IOMMU_PAGE_SHIFT;
0317 unsigned long start = domain->geometry.aperture_start;
0318 unsigned long end = domain->geometry.aperture_end;
0319
0320 if (iova < start || (iova + size) > (end + 1))
0321 return -EINVAL;
0322
0323 pgt_base_iova = dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT);
0324
0325 spin_lock_irqsave(&dom->pgtlock, flags);
0326 memset(pgt_base_iova, 0, page_num * sizeof(u32));
0327 spin_unlock_irqrestore(&dom->pgtlock, flags);
0328
0329 return 0;
0330 }
0331
0332 static void sprd_iommu_sync_map(struct iommu_domain *domain,
0333 unsigned long iova, size_t size)
0334 {
0335 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0336 unsigned int reg;
0337
0338 if (dom->sdev->ver == SPRD_IOMMU_EX)
0339 reg = SPRD_EX_UPDATE;
0340 else
0341 reg = SPRD_VAU_UPDATE;
0342
0343
0344 sprd_iommu_write(dom->sdev, reg, 0xffffffff);
0345 }
0346
0347 static void sprd_iommu_sync(struct iommu_domain *domain,
0348 struct iommu_iotlb_gather *iotlb_gather)
0349 {
0350 sprd_iommu_sync_map(domain, 0, 0);
0351 }
0352
0353 static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
0354 dma_addr_t iova)
0355 {
0356 struct sprd_iommu_domain *dom = to_sprd_domain(domain);
0357 unsigned long flags;
0358 phys_addr_t pa;
0359 unsigned long start = domain->geometry.aperture_start;
0360 unsigned long end = domain->geometry.aperture_end;
0361
0362 if (WARN_ON(iova < start || iova > end))
0363 return 0;
0364
0365 spin_lock_irqsave(&dom->pgtlock, flags);
0366 pa = *(dom->pgt_va + ((iova - start) >> SPRD_IOMMU_PAGE_SHIFT));
0367 pa = (pa << SPRD_IOMMU_PAGE_SHIFT) + ((iova - start) & (SPRD_IOMMU_PAGE_SIZE - 1));
0368 spin_unlock_irqrestore(&dom->pgtlock, flags);
0369
0370 return pa;
0371 }
0372
0373 static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
0374 {
0375 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0376 struct sprd_iommu_device *sdev;
0377
0378 if (!fwspec || fwspec->ops != &sprd_iommu_ops)
0379 return ERR_PTR(-ENODEV);
0380
0381 sdev = dev_iommu_priv_get(dev);
0382
0383 return &sdev->iommu;
0384 }
0385
0386 static struct iommu_group *sprd_iommu_device_group(struct device *dev)
0387 {
0388 struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
0389
0390 return iommu_group_ref_get(sdev->group);
0391 }
0392
0393 static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
0394 {
0395 struct platform_device *pdev;
0396
0397 if (!dev_iommu_priv_get(dev)) {
0398 pdev = of_find_device_by_node(args->np);
0399 dev_iommu_priv_set(dev, platform_get_drvdata(pdev));
0400 platform_device_put(pdev);
0401 }
0402
0403 return 0;
0404 }
0405
0406
0407 static const struct iommu_ops sprd_iommu_ops = {
0408 .domain_alloc = sprd_iommu_domain_alloc,
0409 .probe_device = sprd_iommu_probe_device,
0410 .device_group = sprd_iommu_device_group,
0411 .of_xlate = sprd_iommu_of_xlate,
0412 .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
0413 .owner = THIS_MODULE,
0414 .default_domain_ops = &(const struct iommu_domain_ops) {
0415 .attach_dev = sprd_iommu_attach_device,
0416 .detach_dev = sprd_iommu_detach_device,
0417 .map = sprd_iommu_map,
0418 .unmap = sprd_iommu_unmap,
0419 .iotlb_sync_map = sprd_iommu_sync_map,
0420 .iotlb_sync = sprd_iommu_sync,
0421 .iova_to_phys = sprd_iommu_iova_to_phys,
0422 .free = sprd_iommu_domain_free,
0423 }
0424 };
0425
0426 static const struct of_device_id sprd_iommu_of_match[] = {
0427 { .compatible = "sprd,iommu-v1" },
0428 { },
0429 };
0430 MODULE_DEVICE_TABLE(of, sprd_iommu_of_match);
0431
0432
0433
0434
0435
0436
0437 static int sprd_iommu_clk_enable(struct sprd_iommu_device *sdev)
0438 {
0439 struct clk *eb;
0440
0441 eb = devm_clk_get_optional(sdev->dev, NULL);
0442 if (!eb)
0443 return 0;
0444
0445 if (IS_ERR(eb))
0446 return PTR_ERR(eb);
0447
0448 sdev->eb = eb;
0449 return clk_prepare_enable(eb);
0450 }
0451
0452 static void sprd_iommu_clk_disable(struct sprd_iommu_device *sdev)
0453 {
0454 if (sdev->eb)
0455 clk_disable_unprepare(sdev->eb);
0456 }
0457
0458 static int sprd_iommu_probe(struct platform_device *pdev)
0459 {
0460 struct sprd_iommu_device *sdev;
0461 struct device *dev = &pdev->dev;
0462 void __iomem *base;
0463 int ret;
0464
0465 sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
0466 if (!sdev)
0467 return -ENOMEM;
0468
0469 base = devm_platform_ioremap_resource(pdev, 0);
0470 if (IS_ERR(base)) {
0471 dev_err(dev, "Failed to get ioremap resource.\n");
0472 return PTR_ERR(base);
0473 }
0474 sdev->base = base;
0475
0476 sdev->prot_page_va = dma_alloc_coherent(dev, SPRD_IOMMU_PAGE_SIZE,
0477 &sdev->prot_page_pa, GFP_KERNEL);
0478 if (!sdev->prot_page_va)
0479 return -ENOMEM;
0480
0481 platform_set_drvdata(pdev, sdev);
0482 sdev->dev = dev;
0483
0484
0485 sdev->group = iommu_group_alloc();
0486 if (IS_ERR(sdev->group)) {
0487 ret = PTR_ERR(sdev->group);
0488 goto free_page;
0489 }
0490
0491 ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
0492 if (ret)
0493 goto put_group;
0494
0495 ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
0496 if (ret)
0497 goto remove_sysfs;
0498
0499 if (!iommu_present(&platform_bus_type))
0500 bus_set_iommu(&platform_bus_type, &sprd_iommu_ops);
0501
0502 ret = sprd_iommu_clk_enable(sdev);
0503 if (ret)
0504 goto unregister_iommu;
0505
0506 ret = sprd_iommu_get_version(sdev);
0507 if (ret < 0) {
0508 dev_err(dev, "IOMMU version(%d) is invalid.\n", ret);
0509 goto disable_clk;
0510 }
0511 sdev->ver = ret;
0512
0513 return 0;
0514
0515 disable_clk:
0516 sprd_iommu_clk_disable(sdev);
0517 unregister_iommu:
0518 iommu_device_unregister(&sdev->iommu);
0519 remove_sysfs:
0520 iommu_device_sysfs_remove(&sdev->iommu);
0521 put_group:
0522 iommu_group_put(sdev->group);
0523 free_page:
0524 dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
0525 return ret;
0526 }
0527
0528 static int sprd_iommu_remove(struct platform_device *pdev)
0529 {
0530 struct sprd_iommu_device *sdev = platform_get_drvdata(pdev);
0531
0532 dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
0533
0534 iommu_group_put(sdev->group);
0535 sdev->group = NULL;
0536
0537 bus_set_iommu(&platform_bus_type, NULL);
0538
0539 platform_set_drvdata(pdev, NULL);
0540 iommu_device_sysfs_remove(&sdev->iommu);
0541 iommu_device_unregister(&sdev->iommu);
0542
0543 return 0;
0544 }
0545
0546 static struct platform_driver sprd_iommu_driver = {
0547 .driver = {
0548 .name = "sprd-iommu",
0549 .of_match_table = sprd_iommu_of_match,
0550 .suppress_bind_attrs = true,
0551 },
0552 .probe = sprd_iommu_probe,
0553 .remove = sprd_iommu_remove,
0554 };
0555 module_platform_driver(sprd_iommu_driver);
0556
0557 MODULE_DESCRIPTION("IOMMU driver for Unisoc SoCs");
0558 MODULE_ALIAS("platform:sprd-iommu");
0559 MODULE_LICENSE("GPL");