0001
0002
0003
0004
0005 #include <linux/bitfield.h>
0006 #include <linux/bug.h>
0007 #include <linux/clk.h>
0008 #include <linux/device.h>
0009 #include <linux/dma-direction.h>
0010 #include <linux/dma-mapping.h>
0011 #include <linux/err.h>
0012 #include <linux/errno.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/iommu.h>
0015 #include <linux/iopoll.h>
0016 #include <linux/ioport.h>
0017 #include <linux/log2.h>
0018 #include <linux/module.h>
0019 #include <linux/of_platform.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/pm.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/reset.h>
0024 #include <linux/sizes.h>
0025 #include <linux/slab.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/types.h>
0028
0029 #define IOMMU_RESET_REG 0x010
0030 #define IOMMU_ENABLE_REG 0x020
0031 #define IOMMU_ENABLE_ENABLE BIT(0)
0032
0033 #define IOMMU_BYPASS_REG 0x030
0034 #define IOMMU_AUTO_GATING_REG 0x040
0035 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
0036
0037 #define IOMMU_WBUF_CTRL_REG 0x044
0038 #define IOMMU_OOO_CTRL_REG 0x048
0039 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
0040 #define IOMMU_TTB_REG 0x050
0041 #define IOMMU_TLB_ENABLE_REG 0x060
0042 #define IOMMU_TLB_PREFETCH_REG 0x070
0043 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
0044
0045 #define IOMMU_TLB_FLUSH_REG 0x080
0046 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
0047 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
0048 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
0049
0050 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
0051 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
0052 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
0053 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
0054
0055 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
0056 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
0057 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
0058
0059 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
0060 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
0061 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
0062
0063 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
0064 #define IOMMU_INT_ENABLE_REG 0x100
0065 #define IOMMU_INT_CLR_REG 0x104
0066 #define IOMMU_INT_STA_REG 0x108
0067 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
0068 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
0069 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
0070 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
0071 #define IOMMU_L1PG_INT_REG 0x0180
0072 #define IOMMU_L2PG_INT_REG 0x0184
0073
0074 #define IOMMU_INT_INVALID_L2PG BIT(17)
0075 #define IOMMU_INT_INVALID_L1PG BIT(16)
0076 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
0077 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
0078 IOMMU_INT_MASTER_PERMISSION(1) | \
0079 IOMMU_INT_MASTER_PERMISSION(2) | \
0080 IOMMU_INT_MASTER_PERMISSION(3) | \
0081 IOMMU_INT_MASTER_PERMISSION(4) | \
0082 IOMMU_INT_MASTER_PERMISSION(5))
0083 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
0084 IOMMU_INT_INVALID_L2PG | \
0085 IOMMU_INT_MASTER_MASK)
0086
0087 #define PT_ENTRY_SIZE sizeof(u32)
0088
0089 #define NUM_DT_ENTRIES 4096
0090 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
0091
0092 #define NUM_PT_ENTRIES 256
0093 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
0094
0095 struct sun50i_iommu {
0096 struct iommu_device iommu;
0097
0098
0099 spinlock_t iommu_lock;
0100
0101 struct device *dev;
0102 void __iomem *base;
0103 struct reset_control *reset;
0104 struct clk *clk;
0105
0106 struct iommu_domain *domain;
0107 struct iommu_group *group;
0108 struct kmem_cache *pt_pool;
0109 };
0110
0111 struct sun50i_iommu_domain {
0112 struct iommu_domain domain;
0113
0114
0115 refcount_t refcnt;
0116
0117
0118 u32 *dt;
0119 dma_addr_t dt_dma;
0120
0121 struct sun50i_iommu *iommu;
0122 };
0123
0124 static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
0125 {
0126 return container_of(domain, struct sun50i_iommu_domain, domain);
0127 }
0128
0129 static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
0130 {
0131 return dev_iommu_priv_get(dev);
0132 }
0133
0134 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
0135 {
0136 return readl(iommu->base + offset);
0137 }
0138
0139 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
0140 {
0141 writel(value, iommu->base + offset);
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
0159 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
0160 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
0161
0162 static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
0163 {
0164 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
0165 }
0166
0167 static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
0168 {
0169 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
0170 }
0171
0172 static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
0173 {
0174 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
0190 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
0191 #define SUN50I_DTE_PT_VALID 1
0192
0193 static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
0194 {
0195 return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
0196 }
0197
0198 static bool sun50i_dte_is_pt_valid(u32 dte)
0199 {
0200 return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
0201 }
0202
0203 static u32 sun50i_mk_dte(dma_addr_t pt_dma)
0204 {
0205 return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
0206 }
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241 enum sun50i_iommu_aci {
0242 SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
0243 SUN50I_IOMMU_ACI_NONE,
0244 SUN50I_IOMMU_ACI_RD,
0245 SUN50I_IOMMU_ACI_WR,
0246 SUN50I_IOMMU_ACI_RD_WR,
0247 };
0248
0249 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
0250 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
0251 #define SUN50I_PTE_PAGE_VALID BIT(1)
0252
0253 static phys_addr_t sun50i_pte_get_page_address(u32 pte)
0254 {
0255 return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
0256 }
0257
0258 static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
0259 {
0260 return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
0261 }
0262
0263 static bool sun50i_pte_is_page_valid(u32 pte)
0264 {
0265 return pte & SUN50I_PTE_PAGE_VALID;
0266 }
0267
0268 static u32 sun50i_mk_pte(phys_addr_t page, int prot)
0269 {
0270 enum sun50i_iommu_aci aci;
0271 u32 flags = 0;
0272
0273 if (prot & (IOMMU_READ | IOMMU_WRITE))
0274 aci = SUN50I_IOMMU_ACI_RD_WR;
0275 else if (prot & IOMMU_READ)
0276 aci = SUN50I_IOMMU_ACI_RD;
0277 else if (prot & IOMMU_WRITE)
0278 aci = SUN50I_IOMMU_ACI_WR;
0279 else
0280 aci = SUN50I_IOMMU_ACI_NONE;
0281
0282 flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
0283 page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
0284 return page | flags | SUN50I_PTE_PAGE_VALID;
0285 }
0286
0287 static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
0288 void *vaddr, unsigned int count)
0289 {
0290 struct sun50i_iommu *iommu = sun50i_domain->iommu;
0291 dma_addr_t dma = virt_to_phys(vaddr);
0292 size_t size = count * PT_ENTRY_SIZE;
0293
0294 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
0295 }
0296
0297 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
0298 {
0299 u32 reg;
0300 int ret;
0301
0302 assert_spin_locked(&iommu->iommu_lock);
0303
0304 iommu_write(iommu,
0305 IOMMU_TLB_FLUSH_REG,
0306 IOMMU_TLB_FLUSH_PTW_CACHE |
0307 IOMMU_TLB_FLUSH_MACRO_TLB |
0308 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
0309 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
0310 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
0311 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
0312 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
0313 IOMMU_TLB_FLUSH_MICRO_TLB(0));
0314
0315 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
0316 reg, !reg,
0317 1, 2000);
0318 if (ret)
0319 dev_warn(iommu->dev, "TLB Flush timed out!\n");
0320
0321 return ret;
0322 }
0323
0324 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
0325 {
0326 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0327 struct sun50i_iommu *iommu = sun50i_domain->iommu;
0328 unsigned long flags;
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 if (!iommu)
0339 return;
0340
0341 spin_lock_irqsave(&iommu->iommu_lock, flags);
0342 sun50i_iommu_flush_all_tlb(iommu);
0343 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
0344 }
0345
0346 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
0347 struct iommu_iotlb_gather *gather)
0348 {
0349 sun50i_iommu_flush_iotlb_all(domain);
0350 }
0351
0352 static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
0353 {
0354 struct sun50i_iommu_domain *sun50i_domain;
0355 unsigned long flags;
0356 int ret;
0357
0358 if (!iommu->domain)
0359 return 0;
0360
0361 sun50i_domain = to_sun50i_domain(iommu->domain);
0362
0363 ret = reset_control_deassert(iommu->reset);
0364 if (ret)
0365 return ret;
0366
0367 ret = clk_prepare_enable(iommu->clk);
0368 if (ret)
0369 goto err_reset_assert;
0370
0371 spin_lock_irqsave(&iommu->iommu_lock, flags);
0372
0373 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
0374 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
0375 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
0376 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
0377 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
0378 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
0379 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
0380 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
0381 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
0382 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
0383 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
0384 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
0385 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
0386 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
0387 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
0388 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
0389 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
0390 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
0391 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
0392 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
0393 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
0394 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
0395
0396 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
0397 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
0398 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
0399 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
0400 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
0401 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
0402 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
0403
0404 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
0405 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
0406 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
0407 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
0408 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
0409 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
0410 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
0411
0412 ret = sun50i_iommu_flush_all_tlb(iommu);
0413 if (ret) {
0414 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
0415 goto err_clk_disable;
0416 }
0417
0418 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
0419 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
0420
0421 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
0422
0423 return 0;
0424
0425 err_clk_disable:
0426 clk_disable_unprepare(iommu->clk);
0427
0428 err_reset_assert:
0429 reset_control_assert(iommu->reset);
0430
0431 return ret;
0432 }
0433
0434 static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
0435 {
0436 unsigned long flags;
0437
0438 spin_lock_irqsave(&iommu->iommu_lock, flags);
0439
0440 iommu_write(iommu, IOMMU_ENABLE_REG, 0);
0441 iommu_write(iommu, IOMMU_TTB_REG, 0);
0442
0443 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
0444
0445 clk_disable_unprepare(iommu->clk);
0446 reset_control_assert(iommu->reset);
0447 }
0448
0449 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
0450 gfp_t gfp)
0451 {
0452 dma_addr_t pt_dma;
0453 u32 *page_table;
0454
0455 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
0456 if (!page_table)
0457 return ERR_PTR(-ENOMEM);
0458
0459 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
0460 if (dma_mapping_error(iommu->dev, pt_dma)) {
0461 dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
0462 kmem_cache_free(iommu->pt_pool, page_table);
0463 return ERR_PTR(-ENOMEM);
0464 }
0465
0466
0467 WARN_ON(pt_dma != virt_to_phys(page_table));
0468
0469 return page_table;
0470 }
0471
0472 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
0473 u32 *page_table)
0474 {
0475 phys_addr_t pt_phys = virt_to_phys(page_table);
0476
0477 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
0478 kmem_cache_free(iommu->pt_pool, page_table);
0479 }
0480
0481 static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
0482 dma_addr_t iova, gfp_t gfp)
0483 {
0484 struct sun50i_iommu *iommu = sun50i_domain->iommu;
0485 u32 *page_table;
0486 u32 *dte_addr;
0487 u32 old_dte;
0488 u32 dte;
0489
0490 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
0491 dte = *dte_addr;
0492 if (sun50i_dte_is_pt_valid(dte)) {
0493 phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
0494 return (u32 *)phys_to_virt(pt_phys);
0495 }
0496
0497 page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
0498 if (IS_ERR(page_table))
0499 return page_table;
0500
0501 dte = sun50i_mk_dte(virt_to_phys(page_table));
0502 old_dte = cmpxchg(dte_addr, 0, dte);
0503 if (old_dte) {
0504 phys_addr_t installed_pt_phys =
0505 sun50i_dte_get_pt_address(old_dte);
0506 u32 *installed_pt = phys_to_virt(installed_pt_phys);
0507 u32 *drop_pt = page_table;
0508
0509 page_table = installed_pt;
0510 dte = old_dte;
0511 sun50i_iommu_free_page_table(iommu, drop_pt);
0512 }
0513
0514 sun50i_table_flush(sun50i_domain, page_table, PT_SIZE);
0515 sun50i_table_flush(sun50i_domain, dte_addr, 1);
0516
0517 return page_table;
0518 }
0519
0520 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
0521 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0522 {
0523 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0524 struct sun50i_iommu *iommu = sun50i_domain->iommu;
0525 u32 pte_index;
0526 u32 *page_table, *pte_addr;
0527 int ret = 0;
0528
0529 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
0530 if (IS_ERR(page_table)) {
0531 ret = PTR_ERR(page_table);
0532 goto out;
0533 }
0534
0535 pte_index = sun50i_iova_get_pte_index(iova);
0536 pte_addr = &page_table[pte_index];
0537 if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
0538 phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
0539 dev_err(iommu->dev,
0540 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
0541 &iova, &page_phys, &paddr, prot);
0542 ret = -EBUSY;
0543 goto out;
0544 }
0545
0546 *pte_addr = sun50i_mk_pte(paddr, prot);
0547 sun50i_table_flush(sun50i_domain, pte_addr, 1);
0548
0549 out:
0550 return ret;
0551 }
0552
0553 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
0554 size_t size, struct iommu_iotlb_gather *gather)
0555 {
0556 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0557 phys_addr_t pt_phys;
0558 u32 *pte_addr;
0559 u32 dte;
0560
0561 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
0562 if (!sun50i_dte_is_pt_valid(dte))
0563 return 0;
0564
0565 pt_phys = sun50i_dte_get_pt_address(dte);
0566 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
0567
0568 if (!sun50i_pte_is_page_valid(*pte_addr))
0569 return 0;
0570
0571 memset(pte_addr, 0, sizeof(*pte_addr));
0572 sun50i_table_flush(sun50i_domain, pte_addr, 1);
0573
0574 return SZ_4K;
0575 }
0576
0577 static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
0578 dma_addr_t iova)
0579 {
0580 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0581 phys_addr_t pt_phys;
0582 u32 *page_table;
0583 u32 dte, pte;
0584
0585 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
0586 if (!sun50i_dte_is_pt_valid(dte))
0587 return 0;
0588
0589 pt_phys = sun50i_dte_get_pt_address(dte);
0590 page_table = (u32 *)phys_to_virt(pt_phys);
0591 pte = page_table[sun50i_iova_get_pte_index(iova)];
0592 if (!sun50i_pte_is_page_valid(pte))
0593 return 0;
0594
0595 return sun50i_pte_get_page_address(pte) +
0596 sun50i_iova_get_page_offset(iova);
0597 }
0598
0599 static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
0600 {
0601 struct sun50i_iommu_domain *sun50i_domain;
0602
0603 if (type != IOMMU_DOMAIN_DMA &&
0604 type != IOMMU_DOMAIN_IDENTITY &&
0605 type != IOMMU_DOMAIN_UNMANAGED)
0606 return NULL;
0607
0608 sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
0609 if (!sun50i_domain)
0610 return NULL;
0611
0612 sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
0613 get_order(DT_SIZE));
0614 if (!sun50i_domain->dt)
0615 goto err_free_domain;
0616
0617 refcount_set(&sun50i_domain->refcnt, 1);
0618
0619 sun50i_domain->domain.geometry.aperture_start = 0;
0620 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
0621 sun50i_domain->domain.geometry.force_aperture = true;
0622
0623 return &sun50i_domain->domain;
0624
0625 err_free_domain:
0626 kfree(sun50i_domain);
0627
0628 return NULL;
0629 }
0630
0631 static void sun50i_iommu_domain_free(struct iommu_domain *domain)
0632 {
0633 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0634
0635 free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
0636 sun50i_domain->dt = NULL;
0637
0638 kfree(sun50i_domain);
0639 }
0640
0641 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
0642 struct sun50i_iommu_domain *sun50i_domain)
0643 {
0644 iommu->domain = &sun50i_domain->domain;
0645 sun50i_domain->iommu = iommu;
0646
0647 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
0648 DT_SIZE, DMA_TO_DEVICE);
0649 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
0650 dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
0651 return -ENOMEM;
0652 }
0653
0654 return sun50i_iommu_enable(iommu);
0655 }
0656
0657 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
0658 struct sun50i_iommu_domain *sun50i_domain)
0659 {
0660 unsigned int i;
0661
0662 for (i = 0; i < NUM_DT_ENTRIES; i++) {
0663 phys_addr_t pt_phys;
0664 u32 *page_table;
0665 u32 *dte_addr;
0666 u32 dte;
0667
0668 dte_addr = &sun50i_domain->dt[i];
0669 dte = *dte_addr;
0670 if (!sun50i_dte_is_pt_valid(dte))
0671 continue;
0672
0673 memset(dte_addr, 0, sizeof(*dte_addr));
0674 sun50i_table_flush(sun50i_domain, dte_addr, 1);
0675
0676 pt_phys = sun50i_dte_get_pt_address(dte);
0677 page_table = phys_to_virt(pt_phys);
0678 sun50i_iommu_free_page_table(iommu, page_table);
0679 }
0680
0681
0682 sun50i_iommu_disable(iommu);
0683
0684 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
0685 DT_SIZE, DMA_TO_DEVICE);
0686
0687 iommu->domain = NULL;
0688 }
0689
0690 static void sun50i_iommu_detach_device(struct iommu_domain *domain,
0691 struct device *dev)
0692 {
0693 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0694 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
0695
0696 dev_dbg(dev, "Detaching from IOMMU domain\n");
0697
0698 if (iommu->domain != domain)
0699 return;
0700
0701 if (refcount_dec_and_test(&sun50i_domain->refcnt))
0702 sun50i_iommu_detach_domain(iommu, sun50i_domain);
0703 }
0704
0705 static int sun50i_iommu_attach_device(struct iommu_domain *domain,
0706 struct device *dev)
0707 {
0708 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
0709 struct sun50i_iommu *iommu;
0710
0711 iommu = sun50i_iommu_from_dev(dev);
0712 if (!iommu)
0713 return -ENODEV;
0714
0715 dev_dbg(dev, "Attaching to IOMMU domain\n");
0716
0717 refcount_inc(&sun50i_domain->refcnt);
0718
0719 if (iommu->domain == domain)
0720 return 0;
0721
0722 if (iommu->domain)
0723 sun50i_iommu_detach_device(iommu->domain, dev);
0724
0725 sun50i_iommu_attach_domain(iommu, sun50i_domain);
0726
0727 return 0;
0728 }
0729
0730 static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
0731 {
0732 struct sun50i_iommu *iommu;
0733
0734 iommu = sun50i_iommu_from_dev(dev);
0735 if (!iommu)
0736 return ERR_PTR(-ENODEV);
0737
0738 return &iommu->iommu;
0739 }
0740
0741 static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
0742 {
0743 struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
0744
0745 return iommu_group_ref_get(iommu->group);
0746 }
0747
0748 static int sun50i_iommu_of_xlate(struct device *dev,
0749 struct of_phandle_args *args)
0750 {
0751 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
0752 unsigned id = args->args[0];
0753
0754 dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
0755
0756 return iommu_fwspec_add_ids(dev, &id, 1);
0757 }
0758
0759 static const struct iommu_ops sun50i_iommu_ops = {
0760 .pgsize_bitmap = SZ_4K,
0761 .device_group = sun50i_iommu_device_group,
0762 .domain_alloc = sun50i_iommu_domain_alloc,
0763 .of_xlate = sun50i_iommu_of_xlate,
0764 .probe_device = sun50i_iommu_probe_device,
0765 .default_domain_ops = &(const struct iommu_domain_ops) {
0766 .attach_dev = sun50i_iommu_attach_device,
0767 .detach_dev = sun50i_iommu_detach_device,
0768 .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
0769 .iotlb_sync = sun50i_iommu_iotlb_sync,
0770 .iova_to_phys = sun50i_iommu_iova_to_phys,
0771 .map = sun50i_iommu_map,
0772 .unmap = sun50i_iommu_unmap,
0773 .free = sun50i_iommu_domain_free,
0774 }
0775 };
0776
0777 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
0778 unsigned master, phys_addr_t iova,
0779 unsigned prot)
0780 {
0781 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
0782 &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
0783
0784 if (iommu->domain)
0785 report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
0786 else
0787 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
0788 }
0789
0790 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
0791 unsigned addr_reg,
0792 unsigned blame_reg)
0793 {
0794 phys_addr_t iova;
0795 unsigned master;
0796 u32 blame;
0797
0798 assert_spin_locked(&iommu->iommu_lock);
0799
0800 iova = iommu_read(iommu, addr_reg);
0801 blame = iommu_read(iommu, blame_reg);
0802 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
0803
0804
0805
0806
0807
0808
0809 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
0810
0811 return iova;
0812 }
0813
0814 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
0815 {
0816 enum sun50i_iommu_aci aci;
0817 phys_addr_t iova;
0818 unsigned master;
0819 unsigned dir;
0820 u32 blame;
0821
0822 assert_spin_locked(&iommu->iommu_lock);
0823
0824 blame = iommu_read(iommu, IOMMU_INT_STA_REG);
0825 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
0826 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
0827 aci = sun50i_get_pte_aci(iommu_read(iommu,
0828 IOMMU_INT_ERR_DATA_REG(master)));
0829
0830 switch (aci) {
0831
0832
0833
0834
0835 case SUN50I_IOMMU_ACI_RD:
0836 dir = IOMMU_FAULT_WRITE;
0837 break;
0838
0839
0840
0841
0842
0843 case SUN50I_IOMMU_ACI_WR:
0844
0845
0846
0847
0848
0849
0850 case SUN50I_IOMMU_ACI_NONE:
0851
0852
0853 case SUN50I_IOMMU_ACI_RD_WR:
0854 default:
0855 dir = IOMMU_FAULT_READ;
0856 break;
0857 }
0858
0859
0860
0861
0862
0863
0864 sun50i_iommu_report_fault(iommu, master, iova, dir);
0865
0866 return iova;
0867 }
0868
0869 static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
0870 {
0871 struct sun50i_iommu *iommu = dev_id;
0872 u32 status;
0873
0874 spin_lock(&iommu->iommu_lock);
0875
0876 status = iommu_read(iommu, IOMMU_INT_STA_REG);
0877 if (!(status & IOMMU_INT_MASK)) {
0878 spin_unlock(&iommu->iommu_lock);
0879 return IRQ_NONE;
0880 }
0881
0882 if (status & IOMMU_INT_INVALID_L2PG)
0883 sun50i_iommu_handle_pt_irq(iommu,
0884 IOMMU_INT_ERR_ADDR_L2_REG,
0885 IOMMU_L2PG_INT_REG);
0886 else if (status & IOMMU_INT_INVALID_L1PG)
0887 sun50i_iommu_handle_pt_irq(iommu,
0888 IOMMU_INT_ERR_ADDR_L1_REG,
0889 IOMMU_L1PG_INT_REG);
0890 else
0891 sun50i_iommu_handle_perm_irq(iommu);
0892
0893 iommu_write(iommu, IOMMU_INT_CLR_REG, status);
0894
0895 iommu_write(iommu, IOMMU_RESET_REG, ~status);
0896 iommu_write(iommu, IOMMU_RESET_REG, status);
0897
0898 spin_unlock(&iommu->iommu_lock);
0899
0900 return IRQ_HANDLED;
0901 }
0902
0903 static int sun50i_iommu_probe(struct platform_device *pdev)
0904 {
0905 struct sun50i_iommu *iommu;
0906 int ret, irq;
0907
0908 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
0909 if (!iommu)
0910 return -ENOMEM;
0911 spin_lock_init(&iommu->iommu_lock);
0912 platform_set_drvdata(pdev, iommu);
0913 iommu->dev = &pdev->dev;
0914
0915 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
0916 PT_SIZE, PT_SIZE,
0917 SLAB_HWCACHE_ALIGN,
0918 NULL);
0919 if (!iommu->pt_pool)
0920 return -ENOMEM;
0921
0922 iommu->group = iommu_group_alloc();
0923 if (IS_ERR(iommu->group)) {
0924 ret = PTR_ERR(iommu->group);
0925 goto err_free_cache;
0926 }
0927
0928 iommu->base = devm_platform_ioremap_resource(pdev, 0);
0929 if (IS_ERR(iommu->base)) {
0930 ret = PTR_ERR(iommu->base);
0931 goto err_free_group;
0932 }
0933
0934 irq = platform_get_irq(pdev, 0);
0935 if (irq < 0) {
0936 ret = irq;
0937 goto err_free_group;
0938 }
0939
0940 iommu->clk = devm_clk_get(&pdev->dev, NULL);
0941 if (IS_ERR(iommu->clk)) {
0942 dev_err(&pdev->dev, "Couldn't get our clock.\n");
0943 ret = PTR_ERR(iommu->clk);
0944 goto err_free_group;
0945 }
0946
0947 iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
0948 if (IS_ERR(iommu->reset)) {
0949 dev_err(&pdev->dev, "Couldn't get our reset line.\n");
0950 ret = PTR_ERR(iommu->reset);
0951 goto err_free_group;
0952 }
0953
0954 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
0955 NULL, dev_name(&pdev->dev));
0956 if (ret)
0957 goto err_free_group;
0958
0959 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
0960 if (ret)
0961 goto err_remove_sysfs;
0962
0963 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
0964 dev_name(&pdev->dev), iommu);
0965 if (ret < 0)
0966 goto err_unregister;
0967
0968 bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
0969
0970 return 0;
0971
0972 err_unregister:
0973 iommu_device_unregister(&iommu->iommu);
0974
0975 err_remove_sysfs:
0976 iommu_device_sysfs_remove(&iommu->iommu);
0977
0978 err_free_group:
0979 iommu_group_put(iommu->group);
0980
0981 err_free_cache:
0982 kmem_cache_destroy(iommu->pt_pool);
0983
0984 return ret;
0985 }
0986
0987 static const struct of_device_id sun50i_iommu_dt[] = {
0988 { .compatible = "allwinner,sun50i-h6-iommu", },
0989 { },
0990 };
0991 MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
0992
0993 static struct platform_driver sun50i_iommu_driver = {
0994 .driver = {
0995 .name = "sun50i-iommu",
0996 .of_match_table = sun50i_iommu_dt,
0997 .suppress_bind_attrs = true,
0998 }
0999 };
1000 builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
1001
1002 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1003 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1004 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
1005 MODULE_LICENSE("Dual BSD/GPL");