Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * omap iommu: tlb and pagetable primitives
0004  *
0005  * Copyright (C) 2008-2010 Nokia Corporation
0006  * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
0007  *
0008  * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
0009  *      Paul Mundt and Toshihiro Kobayashi
0010  */
0011 
0012 #include <linux/dma-mapping.h>
0013 #include <linux/err.h>
0014 #include <linux/slab.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/ioport.h>
0017 #include <linux/platform_device.h>
0018 #include <linux/iommu.h>
0019 #include <linux/omap-iommu.h>
0020 #include <linux/mutex.h>
0021 #include <linux/spinlock.h>
0022 #include <linux/io.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/of.h>
0025 #include <linux/of_irq.h>
0026 #include <linux/of_platform.h>
0027 #include <linux/regmap.h>
0028 #include <linux/mfd/syscon.h>
0029 
0030 #include <linux/platform_data/iommu-omap.h>
0031 
0032 #include "omap-iopgtable.h"
0033 #include "omap-iommu.h"
0034 
0035 static const struct iommu_ops omap_iommu_ops;
0036 
0037 #define to_iommu(dev)   ((struct omap_iommu *)dev_get_drvdata(dev))
0038 
0039 /* bitmap of the page sizes currently supported */
0040 #define OMAP_IOMMU_PGSIZES  (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
0041 
0042 #define MMU_LOCK_BASE_SHIFT 10
0043 #define MMU_LOCK_BASE_MASK  (0x1f << MMU_LOCK_BASE_SHIFT)
0044 #define MMU_LOCK_BASE(x)    \
0045     ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
0046 
0047 #define MMU_LOCK_VICT_SHIFT 4
0048 #define MMU_LOCK_VICT_MASK  (0x1f << MMU_LOCK_VICT_SHIFT)
0049 #define MMU_LOCK_VICT(x)    \
0050     ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
0051 
0052 static struct platform_driver omap_iommu_driver;
0053 static struct kmem_cache *iopte_cachep;
0054 
0055 /**
0056  * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
0057  * @dom:    generic iommu domain handle
0058  **/
0059 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
0060 {
0061     return container_of(dom, struct omap_iommu_domain, domain);
0062 }
0063 
0064 /**
0065  * omap_iommu_save_ctx - Save registers for pm off-mode support
0066  * @dev:    client device
0067  *
0068  * This should be treated as an deprecated API. It is preserved only
0069  * to maintain existing functionality for OMAP3 ISP driver.
0070  **/
0071 void omap_iommu_save_ctx(struct device *dev)
0072 {
0073     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
0074     struct omap_iommu *obj;
0075     u32 *p;
0076     int i;
0077 
0078     if (!arch_data)
0079         return;
0080 
0081     while (arch_data->iommu_dev) {
0082         obj = arch_data->iommu_dev;
0083         p = obj->ctx;
0084         for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
0085             p[i] = iommu_read_reg(obj, i * sizeof(u32));
0086             dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
0087                 p[i]);
0088         }
0089         arch_data++;
0090     }
0091 }
0092 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
0093 
0094 /**
0095  * omap_iommu_restore_ctx - Restore registers for pm off-mode support
0096  * @dev:    client device
0097  *
0098  * This should be treated as an deprecated API. It is preserved only
0099  * to maintain existing functionality for OMAP3 ISP driver.
0100  **/
0101 void omap_iommu_restore_ctx(struct device *dev)
0102 {
0103     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
0104     struct omap_iommu *obj;
0105     u32 *p;
0106     int i;
0107 
0108     if (!arch_data)
0109         return;
0110 
0111     while (arch_data->iommu_dev) {
0112         obj = arch_data->iommu_dev;
0113         p = obj->ctx;
0114         for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
0115             iommu_write_reg(obj, p[i], i * sizeof(u32));
0116             dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
0117                 p[i]);
0118         }
0119         arch_data++;
0120     }
0121 }
0122 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
0123 
0124 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
0125 {
0126     u32 val, mask;
0127 
0128     if (!obj->syscfg)
0129         return;
0130 
0131     mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
0132     val = enable ? mask : 0;
0133     regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
0134 }
0135 
0136 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
0137 {
0138     u32 l = iommu_read_reg(obj, MMU_CNTL);
0139 
0140     if (on)
0141         iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
0142     else
0143         iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
0144 
0145     l &= ~MMU_CNTL_MASK;
0146     if (on)
0147         l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
0148     else
0149         l |= (MMU_CNTL_MMU_EN);
0150 
0151     iommu_write_reg(obj, l, MMU_CNTL);
0152 }
0153 
0154 static int omap2_iommu_enable(struct omap_iommu *obj)
0155 {
0156     u32 l, pa;
0157 
0158     if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd,  SZ_16K))
0159         return -EINVAL;
0160 
0161     pa = virt_to_phys(obj->iopgd);
0162     if (!IS_ALIGNED(pa, SZ_16K))
0163         return -EINVAL;
0164 
0165     l = iommu_read_reg(obj, MMU_REVISION);
0166     dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
0167          (l >> 4) & 0xf, l & 0xf);
0168 
0169     iommu_write_reg(obj, pa, MMU_TTB);
0170 
0171     dra7_cfg_dspsys_mmu(obj, true);
0172 
0173     if (obj->has_bus_err_back)
0174         iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
0175 
0176     __iommu_set_twl(obj, true);
0177 
0178     return 0;
0179 }
0180 
0181 static void omap2_iommu_disable(struct omap_iommu *obj)
0182 {
0183     u32 l = iommu_read_reg(obj, MMU_CNTL);
0184 
0185     l &= ~MMU_CNTL_MASK;
0186     iommu_write_reg(obj, l, MMU_CNTL);
0187     dra7_cfg_dspsys_mmu(obj, false);
0188 
0189     dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
0190 }
0191 
0192 static int iommu_enable(struct omap_iommu *obj)
0193 {
0194     int ret;
0195 
0196     ret = pm_runtime_get_sync(obj->dev);
0197     if (ret < 0)
0198         pm_runtime_put_noidle(obj->dev);
0199 
0200     return ret < 0 ? ret : 0;
0201 }
0202 
0203 static void iommu_disable(struct omap_iommu *obj)
0204 {
0205     pm_runtime_put_sync(obj->dev);
0206 }
0207 
0208 /*
0209  *  TLB operations
0210  */
0211 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
0212 {
0213     u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
0214     u32 mask = get_cam_va_mask(cr->cam & page_size);
0215 
0216     return cr->cam & mask;
0217 }
0218 
0219 static u32 get_iopte_attr(struct iotlb_entry *e)
0220 {
0221     u32 attr;
0222 
0223     attr = e->mixed << 5;
0224     attr |= e->endian;
0225     attr |= e->elsz >> 3;
0226     attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
0227             (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
0228     return attr;
0229 }
0230 
0231 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
0232 {
0233     u32 status, fault_addr;
0234 
0235     status = iommu_read_reg(obj, MMU_IRQSTATUS);
0236     status &= MMU_IRQ_MASK;
0237     if (!status) {
0238         *da = 0;
0239         return 0;
0240     }
0241 
0242     fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
0243     *da = fault_addr;
0244 
0245     iommu_write_reg(obj, status, MMU_IRQSTATUS);
0246 
0247     return status;
0248 }
0249 
0250 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
0251 {
0252     u32 val;
0253 
0254     val = iommu_read_reg(obj, MMU_LOCK);
0255 
0256     l->base = MMU_LOCK_BASE(val);
0257     l->vict = MMU_LOCK_VICT(val);
0258 }
0259 
0260 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
0261 {
0262     u32 val;
0263 
0264     val = (l->base << MMU_LOCK_BASE_SHIFT);
0265     val |= (l->vict << MMU_LOCK_VICT_SHIFT);
0266 
0267     iommu_write_reg(obj, val, MMU_LOCK);
0268 }
0269 
0270 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
0271 {
0272     cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
0273     cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
0274 }
0275 
0276 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
0277 {
0278     iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
0279     iommu_write_reg(obj, cr->ram, MMU_RAM);
0280 
0281     iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
0282     iommu_write_reg(obj, 1, MMU_LD_TLB);
0283 }
0284 
0285 /* only used in iotlb iteration for-loop */
0286 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
0287 {
0288     struct cr_regs cr;
0289     struct iotlb_lock l;
0290 
0291     iotlb_lock_get(obj, &l);
0292     l.vict = n;
0293     iotlb_lock_set(obj, &l);
0294     iotlb_read_cr(obj, &cr);
0295 
0296     return cr;
0297 }
0298 
0299 #ifdef PREFETCH_IOTLB
0300 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
0301                       struct iotlb_entry *e)
0302 {
0303     struct cr_regs *cr;
0304 
0305     if (!e)
0306         return NULL;
0307 
0308     if (e->da & ~(get_cam_va_mask(e->pgsz))) {
0309         dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
0310             e->da);
0311         return ERR_PTR(-EINVAL);
0312     }
0313 
0314     cr = kmalloc(sizeof(*cr), GFP_KERNEL);
0315     if (!cr)
0316         return ERR_PTR(-ENOMEM);
0317 
0318     cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
0319     cr->ram = e->pa | e->endian | e->elsz | e->mixed;
0320 
0321     return cr;
0322 }
0323 
0324 /**
0325  * load_iotlb_entry - Set an iommu tlb entry
0326  * @obj:    target iommu
0327  * @e:      an iommu tlb entry info
0328  **/
0329 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
0330 {
0331     int err = 0;
0332     struct iotlb_lock l;
0333     struct cr_regs *cr;
0334 
0335     if (!obj || !obj->nr_tlb_entries || !e)
0336         return -EINVAL;
0337 
0338     pm_runtime_get_sync(obj->dev);
0339 
0340     iotlb_lock_get(obj, &l);
0341     if (l.base == obj->nr_tlb_entries) {
0342         dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
0343         err = -EBUSY;
0344         goto out;
0345     }
0346     if (!e->prsvd) {
0347         int i;
0348         struct cr_regs tmp;
0349 
0350         for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
0351             if (!iotlb_cr_valid(&tmp))
0352                 break;
0353 
0354         if (i == obj->nr_tlb_entries) {
0355             dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
0356             err = -EBUSY;
0357             goto out;
0358         }
0359 
0360         iotlb_lock_get(obj, &l);
0361     } else {
0362         l.vict = l.base;
0363         iotlb_lock_set(obj, &l);
0364     }
0365 
0366     cr = iotlb_alloc_cr(obj, e);
0367     if (IS_ERR(cr)) {
0368         pm_runtime_put_sync(obj->dev);
0369         return PTR_ERR(cr);
0370     }
0371 
0372     iotlb_load_cr(obj, cr);
0373     kfree(cr);
0374 
0375     if (e->prsvd)
0376         l.base++;
0377     /* increment victim for next tlb load */
0378     if (++l.vict == obj->nr_tlb_entries)
0379         l.vict = l.base;
0380     iotlb_lock_set(obj, &l);
0381 out:
0382     pm_runtime_put_sync(obj->dev);
0383     return err;
0384 }
0385 
0386 #else /* !PREFETCH_IOTLB */
0387 
0388 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
0389 {
0390     return 0;
0391 }
0392 
0393 #endif /* !PREFETCH_IOTLB */
0394 
0395 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
0396 {
0397     return load_iotlb_entry(obj, e);
0398 }
0399 
0400 /**
0401  * flush_iotlb_page - Clear an iommu tlb entry
0402  * @obj:    target iommu
0403  * @da:     iommu device virtual address
0404  *
0405  * Clear an iommu tlb entry which includes 'da' address.
0406  **/
0407 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
0408 {
0409     int i;
0410     struct cr_regs cr;
0411 
0412     pm_runtime_get_sync(obj->dev);
0413 
0414     for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
0415         u32 start;
0416         size_t bytes;
0417 
0418         if (!iotlb_cr_valid(&cr))
0419             continue;
0420 
0421         start = iotlb_cr_to_virt(&cr);
0422         bytes = iopgsz_to_bytes(cr.cam & 3);
0423 
0424         if ((start <= da) && (da < start + bytes)) {
0425             dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
0426                 __func__, start, da, bytes);
0427             iotlb_load_cr(obj, &cr);
0428             iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
0429             break;
0430         }
0431     }
0432     pm_runtime_put_sync(obj->dev);
0433 
0434     if (i == obj->nr_tlb_entries)
0435         dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
0436 }
0437 
0438 /**
0439  * flush_iotlb_all - Clear all iommu tlb entries
0440  * @obj:    target iommu
0441  **/
0442 static void flush_iotlb_all(struct omap_iommu *obj)
0443 {
0444     struct iotlb_lock l;
0445 
0446     pm_runtime_get_sync(obj->dev);
0447 
0448     l.base = 0;
0449     l.vict = 0;
0450     iotlb_lock_set(obj, &l);
0451 
0452     iommu_write_reg(obj, 1, MMU_GFLUSH);
0453 
0454     pm_runtime_put_sync(obj->dev);
0455 }
0456 
0457 /*
0458  *  H/W pagetable operations
0459  */
0460 static void flush_iopte_range(struct device *dev, dma_addr_t dma,
0461                   unsigned long offset, int num_entries)
0462 {
0463     size_t size = num_entries * sizeof(u32);
0464 
0465     dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
0466 }
0467 
0468 static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
0469 {
0470     dma_addr_t pt_dma;
0471 
0472     /* Note: freed iopte's must be clean ready for re-use */
0473     if (iopte) {
0474         if (dma_valid) {
0475             pt_dma = virt_to_phys(iopte);
0476             dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
0477                      DMA_TO_DEVICE);
0478         }
0479 
0480         kmem_cache_free(iopte_cachep, iopte);
0481     }
0482 }
0483 
0484 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
0485             dma_addr_t *pt_dma, u32 da)
0486 {
0487     u32 *iopte;
0488     unsigned long offset = iopgd_index(da) * sizeof(da);
0489 
0490     /* a table has already existed */
0491     if (*iopgd)
0492         goto pte_ready;
0493 
0494     /*
0495      * do the allocation outside the page table lock
0496      */
0497     spin_unlock(&obj->page_table_lock);
0498     iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
0499     spin_lock(&obj->page_table_lock);
0500 
0501     if (!*iopgd) {
0502         if (!iopte)
0503             return ERR_PTR(-ENOMEM);
0504 
0505         *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
0506                      DMA_TO_DEVICE);
0507         if (dma_mapping_error(obj->dev, *pt_dma)) {
0508             dev_err(obj->dev, "DMA map error for L2 table\n");
0509             iopte_free(obj, iopte, false);
0510             return ERR_PTR(-ENOMEM);
0511         }
0512 
0513         /*
0514          * we rely on dma address and the physical address to be
0515          * the same for mapping the L2 table
0516          */
0517         if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
0518             dev_err(obj->dev, "DMA translation error for L2 table\n");
0519             dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
0520                      DMA_TO_DEVICE);
0521             iopte_free(obj, iopte, false);
0522             return ERR_PTR(-ENOMEM);
0523         }
0524 
0525         *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
0526 
0527         flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
0528         dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
0529     } else {
0530         /* We raced, free the reduniovant table */
0531         iopte_free(obj, iopte, false);
0532     }
0533 
0534 pte_ready:
0535     iopte = iopte_offset(iopgd, da);
0536     *pt_dma = iopgd_page_paddr(iopgd);
0537     dev_vdbg(obj->dev,
0538          "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
0539          __func__, da, iopgd, *iopgd, iopte, *iopte);
0540 
0541     return iopte;
0542 }
0543 
0544 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
0545 {
0546     u32 *iopgd = iopgd_offset(obj, da);
0547     unsigned long offset = iopgd_index(da) * sizeof(da);
0548 
0549     if ((da | pa) & ~IOSECTION_MASK) {
0550         dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
0551             __func__, da, pa, IOSECTION_SIZE);
0552         return -EINVAL;
0553     }
0554 
0555     *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
0556     flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
0557     return 0;
0558 }
0559 
0560 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
0561 {
0562     u32 *iopgd = iopgd_offset(obj, da);
0563     unsigned long offset = iopgd_index(da) * sizeof(da);
0564     int i;
0565 
0566     if ((da | pa) & ~IOSUPER_MASK) {
0567         dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
0568             __func__, da, pa, IOSUPER_SIZE);
0569         return -EINVAL;
0570     }
0571 
0572     for (i = 0; i < 16; i++)
0573         *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
0574     flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
0575     return 0;
0576 }
0577 
0578 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
0579 {
0580     u32 *iopgd = iopgd_offset(obj, da);
0581     dma_addr_t pt_dma;
0582     u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
0583     unsigned long offset = iopte_index(da) * sizeof(da);
0584 
0585     if (IS_ERR(iopte))
0586         return PTR_ERR(iopte);
0587 
0588     *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
0589     flush_iopte_range(obj->dev, pt_dma, offset, 1);
0590 
0591     dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
0592          __func__, da, pa, iopte, *iopte);
0593 
0594     return 0;
0595 }
0596 
0597 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
0598 {
0599     u32 *iopgd = iopgd_offset(obj, da);
0600     dma_addr_t pt_dma;
0601     u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
0602     unsigned long offset = iopte_index(da) * sizeof(da);
0603     int i;
0604 
0605     if ((da | pa) & ~IOLARGE_MASK) {
0606         dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
0607             __func__, da, pa, IOLARGE_SIZE);
0608         return -EINVAL;
0609     }
0610 
0611     if (IS_ERR(iopte))
0612         return PTR_ERR(iopte);
0613 
0614     for (i = 0; i < 16; i++)
0615         *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
0616     flush_iopte_range(obj->dev, pt_dma, offset, 16);
0617     return 0;
0618 }
0619 
0620 static int
0621 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
0622 {
0623     int (*fn)(struct omap_iommu *, u32, u32, u32);
0624     u32 prot;
0625     int err;
0626 
0627     if (!obj || !e)
0628         return -EINVAL;
0629 
0630     switch (e->pgsz) {
0631     case MMU_CAM_PGSZ_16M:
0632         fn = iopgd_alloc_super;
0633         break;
0634     case MMU_CAM_PGSZ_1M:
0635         fn = iopgd_alloc_section;
0636         break;
0637     case MMU_CAM_PGSZ_64K:
0638         fn = iopte_alloc_large;
0639         break;
0640     case MMU_CAM_PGSZ_4K:
0641         fn = iopte_alloc_page;
0642         break;
0643     default:
0644         fn = NULL;
0645         break;
0646     }
0647 
0648     if (WARN_ON(!fn))
0649         return -EINVAL;
0650 
0651     prot = get_iopte_attr(e);
0652 
0653     spin_lock(&obj->page_table_lock);
0654     err = fn(obj, e->da, e->pa, prot);
0655     spin_unlock(&obj->page_table_lock);
0656 
0657     return err;
0658 }
0659 
0660 /**
0661  * omap_iopgtable_store_entry - Make an iommu pte entry
0662  * @obj:    target iommu
0663  * @e:      an iommu tlb entry info
0664  **/
0665 static int
0666 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
0667 {
0668     int err;
0669 
0670     flush_iotlb_page(obj, e->da);
0671     err = iopgtable_store_entry_core(obj, e);
0672     if (!err)
0673         prefetch_iotlb_entry(obj, e);
0674     return err;
0675 }
0676 
0677 /**
0678  * iopgtable_lookup_entry - Lookup an iommu pte entry
0679  * @obj:    target iommu
0680  * @da:     iommu device virtual address
0681  * @ppgd:   iommu pgd entry pointer to be returned
0682  * @ppte:   iommu pte entry pointer to be returned
0683  **/
0684 static void
0685 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
0686 {
0687     u32 *iopgd, *iopte = NULL;
0688 
0689     iopgd = iopgd_offset(obj, da);
0690     if (!*iopgd)
0691         goto out;
0692 
0693     if (iopgd_is_table(*iopgd))
0694         iopte = iopte_offset(iopgd, da);
0695 out:
0696     *ppgd = iopgd;
0697     *ppte = iopte;
0698 }
0699 
0700 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
0701 {
0702     size_t bytes;
0703     u32 *iopgd = iopgd_offset(obj, da);
0704     int nent = 1;
0705     dma_addr_t pt_dma;
0706     unsigned long pd_offset = iopgd_index(da) * sizeof(da);
0707     unsigned long pt_offset = iopte_index(da) * sizeof(da);
0708 
0709     if (!*iopgd)
0710         return 0;
0711 
0712     if (iopgd_is_table(*iopgd)) {
0713         int i;
0714         u32 *iopte = iopte_offset(iopgd, da);
0715 
0716         bytes = IOPTE_SIZE;
0717         if (*iopte & IOPTE_LARGE) {
0718             nent *= 16;
0719             /* rewind to the 1st entry */
0720             iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
0721         }
0722         bytes *= nent;
0723         memset(iopte, 0, nent * sizeof(*iopte));
0724         pt_dma = iopgd_page_paddr(iopgd);
0725         flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
0726 
0727         /*
0728          * do table walk to check if this table is necessary or not
0729          */
0730         iopte = iopte_offset(iopgd, 0);
0731         for (i = 0; i < PTRS_PER_IOPTE; i++)
0732             if (iopte[i])
0733                 goto out;
0734 
0735         iopte_free(obj, iopte, true);
0736         nent = 1; /* for the next L1 entry */
0737     } else {
0738         bytes = IOPGD_SIZE;
0739         if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
0740             nent *= 16;
0741             /* rewind to the 1st entry */
0742             iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
0743         }
0744         bytes *= nent;
0745     }
0746     memset(iopgd, 0, nent * sizeof(*iopgd));
0747     flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
0748 out:
0749     return bytes;
0750 }
0751 
0752 /**
0753  * iopgtable_clear_entry - Remove an iommu pte entry
0754  * @obj:    target iommu
0755  * @da:     iommu device virtual address
0756  **/
0757 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
0758 {
0759     size_t bytes;
0760 
0761     spin_lock(&obj->page_table_lock);
0762 
0763     bytes = iopgtable_clear_entry_core(obj, da);
0764     flush_iotlb_page(obj, da);
0765 
0766     spin_unlock(&obj->page_table_lock);
0767 
0768     return bytes;
0769 }
0770 
0771 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
0772 {
0773     unsigned long offset;
0774     int i;
0775 
0776     spin_lock(&obj->page_table_lock);
0777 
0778     for (i = 0; i < PTRS_PER_IOPGD; i++) {
0779         u32 da;
0780         u32 *iopgd;
0781 
0782         da = i << IOPGD_SHIFT;
0783         iopgd = iopgd_offset(obj, da);
0784         offset = iopgd_index(da) * sizeof(da);
0785 
0786         if (!*iopgd)
0787             continue;
0788 
0789         if (iopgd_is_table(*iopgd))
0790             iopte_free(obj, iopte_offset(iopgd, 0), true);
0791 
0792         *iopgd = 0;
0793         flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
0794     }
0795 
0796     flush_iotlb_all(obj);
0797 
0798     spin_unlock(&obj->page_table_lock);
0799 }
0800 
0801 /*
0802  *  Device IOMMU generic operations
0803  */
0804 static irqreturn_t iommu_fault_handler(int irq, void *data)
0805 {
0806     u32 da, errs;
0807     u32 *iopgd, *iopte;
0808     struct omap_iommu *obj = data;
0809     struct iommu_domain *domain = obj->domain;
0810     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
0811 
0812     if (!omap_domain->dev)
0813         return IRQ_NONE;
0814 
0815     errs = iommu_report_fault(obj, &da);
0816     if (errs == 0)
0817         return IRQ_HANDLED;
0818 
0819     /* Fault callback or TLB/PTE Dynamic loading */
0820     if (!report_iommu_fault(domain, obj->dev, da, 0))
0821         return IRQ_HANDLED;
0822 
0823     iommu_write_reg(obj, 0, MMU_IRQENABLE);
0824 
0825     iopgd = iopgd_offset(obj, da);
0826 
0827     if (!iopgd_is_table(*iopgd)) {
0828         dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
0829             obj->name, errs, da, iopgd, *iopgd);
0830         return IRQ_NONE;
0831     }
0832 
0833     iopte = iopte_offset(iopgd, da);
0834 
0835     dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
0836         obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
0837 
0838     return IRQ_NONE;
0839 }
0840 
0841 /**
0842  * omap_iommu_attach() - attach iommu device to an iommu domain
0843  * @obj:    target omap iommu device
0844  * @iopgd:  page table
0845  **/
0846 static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
0847 {
0848     int err;
0849 
0850     spin_lock(&obj->iommu_lock);
0851 
0852     obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
0853                      DMA_TO_DEVICE);
0854     if (dma_mapping_error(obj->dev, obj->pd_dma)) {
0855         dev_err(obj->dev, "DMA map error for L1 table\n");
0856         err = -ENOMEM;
0857         goto out_err;
0858     }
0859 
0860     obj->iopgd = iopgd;
0861     err = iommu_enable(obj);
0862     if (err)
0863         goto out_err;
0864     flush_iotlb_all(obj);
0865 
0866     spin_unlock(&obj->iommu_lock);
0867 
0868     dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
0869 
0870     return 0;
0871 
0872 out_err:
0873     spin_unlock(&obj->iommu_lock);
0874 
0875     return err;
0876 }
0877 
0878 /**
0879  * omap_iommu_detach - release iommu device
0880  * @obj:    target iommu
0881  **/
0882 static void omap_iommu_detach(struct omap_iommu *obj)
0883 {
0884     if (!obj || IS_ERR(obj))
0885         return;
0886 
0887     spin_lock(&obj->iommu_lock);
0888 
0889     dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
0890              DMA_TO_DEVICE);
0891     obj->pd_dma = 0;
0892     obj->iopgd = NULL;
0893     iommu_disable(obj);
0894 
0895     spin_unlock(&obj->iommu_lock);
0896 
0897     dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
0898 }
0899 
0900 static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
0901 {
0902     struct iotlb_lock lock;
0903     struct cr_regs cr;
0904     struct cr_regs *tmp;
0905     int i;
0906 
0907     /* check if there are any locked tlbs to save */
0908     iotlb_lock_get(obj, &lock);
0909     obj->num_cr_ctx = lock.base;
0910     if (!obj->num_cr_ctx)
0911         return;
0912 
0913     tmp = obj->cr_ctx;
0914     for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
0915         * tmp++ = cr;
0916 }
0917 
0918 static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
0919 {
0920     struct iotlb_lock l;
0921     struct cr_regs *tmp;
0922     int i;
0923 
0924     /* no locked tlbs to restore */
0925     if (!obj->num_cr_ctx)
0926         return;
0927 
0928     l.base = 0;
0929     tmp = obj->cr_ctx;
0930     for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
0931         l.vict = i;
0932         iotlb_lock_set(obj, &l);
0933         iotlb_load_cr(obj, tmp);
0934     }
0935     l.base = obj->num_cr_ctx;
0936     l.vict = i;
0937     iotlb_lock_set(obj, &l);
0938 }
0939 
0940 /**
0941  * omap_iommu_domain_deactivate - deactivate attached iommu devices
0942  * @domain: iommu domain attached to the target iommu device
0943  *
0944  * This API allows the client devices of IOMMU devices to suspend
0945  * the IOMMUs they control at runtime, after they are idled and
0946  * suspended all activity. System Suspend will leverage the PM
0947  * driver late callbacks.
0948  **/
0949 int omap_iommu_domain_deactivate(struct iommu_domain *domain)
0950 {
0951     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
0952     struct omap_iommu_device *iommu;
0953     struct omap_iommu *oiommu;
0954     int i;
0955 
0956     if (!omap_domain->dev)
0957         return 0;
0958 
0959     iommu = omap_domain->iommus;
0960     iommu += (omap_domain->num_iommus - 1);
0961     for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
0962         oiommu = iommu->iommu_dev;
0963         pm_runtime_put_sync(oiommu->dev);
0964     }
0965 
0966     return 0;
0967 }
0968 EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
0969 
0970 /**
0971  * omap_iommu_domain_activate - activate attached iommu devices
0972  * @domain: iommu domain attached to the target iommu device
0973  *
0974  * This API allows the client devices of IOMMU devices to resume the
0975  * IOMMUs they control at runtime, before they can resume operations.
0976  * System Resume will leverage the PM driver late callbacks.
0977  **/
0978 int omap_iommu_domain_activate(struct iommu_domain *domain)
0979 {
0980     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
0981     struct omap_iommu_device *iommu;
0982     struct omap_iommu *oiommu;
0983     int i;
0984 
0985     if (!omap_domain->dev)
0986         return 0;
0987 
0988     iommu = omap_domain->iommus;
0989     for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
0990         oiommu = iommu->iommu_dev;
0991         pm_runtime_get_sync(oiommu->dev);
0992     }
0993 
0994     return 0;
0995 }
0996 EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
0997 
0998 /**
0999  * omap_iommu_runtime_suspend - disable an iommu device
1000  * @dev:    iommu device
1001  *
1002  * This function performs all that is necessary to disable an
1003  * IOMMU device, either during final detachment from a client
1004  * device, or during system/runtime suspend of the device. This
1005  * includes programming all the appropriate IOMMU registers, and
1006  * managing the associated omap_hwmod's state and the device's
1007  * reset line. This function also saves the context of any
1008  * locked TLBs if suspending.
1009  **/
1010 static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
1011 {
1012     struct platform_device *pdev = to_platform_device(dev);
1013     struct iommu_platform_data *pdata = dev_get_platdata(dev);
1014     struct omap_iommu *obj = to_iommu(dev);
1015     int ret;
1016 
1017     /* save the TLBs only during suspend, and not for power down */
1018     if (obj->domain && obj->iopgd)
1019         omap_iommu_save_tlb_entries(obj);
1020 
1021     omap2_iommu_disable(obj);
1022 
1023     if (pdata && pdata->device_idle)
1024         pdata->device_idle(pdev);
1025 
1026     if (pdata && pdata->assert_reset)
1027         pdata->assert_reset(pdev, pdata->reset_name);
1028 
1029     if (pdata && pdata->set_pwrdm_constraint) {
1030         ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
1031         if (ret) {
1032             dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
1033                  ret);
1034         }
1035     }
1036 
1037     return 0;
1038 }
1039 
1040 /**
1041  * omap_iommu_runtime_resume - enable an iommu device
1042  * @dev:    iommu device
1043  *
1044  * This function performs all that is necessary to enable an
1045  * IOMMU device, either during initial attachment to a client
1046  * device, or during system/runtime resume of the device. This
1047  * includes programming all the appropriate IOMMU registers, and
1048  * managing the associated omap_hwmod's state and the device's
1049  * reset line. The function also restores any locked TLBs if
1050  * resuming after a suspend.
1051  **/
1052 static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
1053 {
1054     struct platform_device *pdev = to_platform_device(dev);
1055     struct iommu_platform_data *pdata = dev_get_platdata(dev);
1056     struct omap_iommu *obj = to_iommu(dev);
1057     int ret = 0;
1058 
1059     if (pdata && pdata->set_pwrdm_constraint) {
1060         ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
1061         if (ret) {
1062             dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
1063                  ret);
1064         }
1065     }
1066 
1067     if (pdata && pdata->deassert_reset) {
1068         ret = pdata->deassert_reset(pdev, pdata->reset_name);
1069         if (ret) {
1070             dev_err(dev, "deassert_reset failed: %d\n", ret);
1071             return ret;
1072         }
1073     }
1074 
1075     if (pdata && pdata->device_enable)
1076         pdata->device_enable(pdev);
1077 
1078     /* restore the TLBs only during resume, and not for power up */
1079     if (obj->domain)
1080         omap_iommu_restore_tlb_entries(obj);
1081 
1082     ret = omap2_iommu_enable(obj);
1083 
1084     return ret;
1085 }
1086 
1087 /**
1088  * omap_iommu_prepare - prepare() dev_pm_ops implementation
1089  * @dev:    iommu device
1090  *
1091  * This function performs the necessary checks to determine if the IOMMU
1092  * device needs suspending or not. The function checks if the runtime_pm
1093  * status of the device is suspended, and returns 1 in that case. This
1094  * results in the PM core to skip invoking any of the Sleep PM callbacks
1095  * (suspend, suspend_late, resume, resume_early etc).
1096  */
1097 static int omap_iommu_prepare(struct device *dev)
1098 {
1099     if (pm_runtime_status_suspended(dev))
1100         return 1;
1101     return 0;
1102 }
1103 
1104 static bool omap_iommu_can_register(struct platform_device *pdev)
1105 {
1106     struct device_node *np = pdev->dev.of_node;
1107 
1108     if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1109         return true;
1110 
1111     /*
1112      * restrict IOMMU core registration only for processor-port MDMA MMUs
1113      * on DRA7 DSPs
1114      */
1115     if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
1116         (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
1117         return true;
1118 
1119     return false;
1120 }
1121 
1122 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
1123                           struct omap_iommu *obj)
1124 {
1125     struct device_node *np = pdev->dev.of_node;
1126     int ret;
1127 
1128     if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1129         return 0;
1130 
1131     if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
1132         dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
1133         return -EINVAL;
1134     }
1135 
1136     obj->syscfg =
1137         syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
1138     if (IS_ERR(obj->syscfg)) {
1139         /* can fail with -EPROBE_DEFER */
1140         ret = PTR_ERR(obj->syscfg);
1141         return ret;
1142     }
1143 
1144     if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
1145                        &obj->id)) {
1146         dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
1147         return -EINVAL;
1148     }
1149 
1150     if (obj->id != 0 && obj->id != 1) {
1151         dev_err(&pdev->dev, "invalid IOMMU instance id\n");
1152         return -EINVAL;
1153     }
1154 
1155     return 0;
1156 }
1157 
1158 /*
1159  *  OMAP Device MMU(IOMMU) detection
1160  */
1161 static int omap_iommu_probe(struct platform_device *pdev)
1162 {
1163     int err = -ENODEV;
1164     int irq;
1165     struct omap_iommu *obj;
1166     struct resource *res;
1167     struct device_node *of = pdev->dev.of_node;
1168 
1169     if (!of) {
1170         pr_err("%s: only DT-based devices are supported\n", __func__);
1171         return -ENODEV;
1172     }
1173 
1174     obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1175     if (!obj)
1176         return -ENOMEM;
1177 
1178     /*
1179      * self-manage the ordering dependencies between omap_device_enable/idle
1180      * and omap_device_assert/deassert_hardreset API
1181      */
1182     if (pdev->dev.pm_domain) {
1183         dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
1184         pdev->dev.pm_domain = NULL;
1185     }
1186 
1187     obj->name = dev_name(&pdev->dev);
1188     obj->nr_tlb_entries = 32;
1189     err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
1190     if (err && err != -EINVAL)
1191         return err;
1192     if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
1193         return -EINVAL;
1194     if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
1195         obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
1196 
1197     obj->dev = &pdev->dev;
1198     obj->ctx = (void *)obj + sizeof(*obj);
1199     obj->cr_ctx = devm_kzalloc(&pdev->dev,
1200                    sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
1201                    GFP_KERNEL);
1202     if (!obj->cr_ctx)
1203         return -ENOMEM;
1204 
1205     spin_lock_init(&obj->iommu_lock);
1206     spin_lock_init(&obj->page_table_lock);
1207 
1208     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1209     obj->regbase = devm_ioremap_resource(obj->dev, res);
1210     if (IS_ERR(obj->regbase))
1211         return PTR_ERR(obj->regbase);
1212 
1213     err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
1214     if (err)
1215         return err;
1216 
1217     irq = platform_get_irq(pdev, 0);
1218     if (irq < 0)
1219         return -ENODEV;
1220 
1221     err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1222                    dev_name(obj->dev), obj);
1223     if (err < 0)
1224         return err;
1225     platform_set_drvdata(pdev, obj);
1226 
1227     if (omap_iommu_can_register(pdev)) {
1228         obj->group = iommu_group_alloc();
1229         if (IS_ERR(obj->group))
1230             return PTR_ERR(obj->group);
1231 
1232         err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
1233                          obj->name);
1234         if (err)
1235             goto out_group;
1236 
1237         err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
1238         if (err)
1239             goto out_sysfs;
1240     }
1241 
1242     pm_runtime_enable(obj->dev);
1243 
1244     omap_iommu_debugfs_add(obj);
1245 
1246     dev_info(&pdev->dev, "%s registered\n", obj->name);
1247 
1248     /* Re-probe bus to probe device attached to this IOMMU */
1249     bus_iommu_probe(&platform_bus_type);
1250 
1251     return 0;
1252 
1253 out_sysfs:
1254     iommu_device_sysfs_remove(&obj->iommu);
1255 out_group:
1256     iommu_group_put(obj->group);
1257     return err;
1258 }
1259 
1260 static int omap_iommu_remove(struct platform_device *pdev)
1261 {
1262     struct omap_iommu *obj = platform_get_drvdata(pdev);
1263 
1264     if (obj->group) {
1265         iommu_group_put(obj->group);
1266         obj->group = NULL;
1267 
1268         iommu_device_sysfs_remove(&obj->iommu);
1269         iommu_device_unregister(&obj->iommu);
1270     }
1271 
1272     omap_iommu_debugfs_remove(obj);
1273 
1274     pm_runtime_disable(obj->dev);
1275 
1276     dev_info(&pdev->dev, "%s removed\n", obj->name);
1277     return 0;
1278 }
1279 
1280 static const struct dev_pm_ops omap_iommu_pm_ops = {
1281     .prepare = omap_iommu_prepare,
1282     SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1283                      pm_runtime_force_resume)
1284     SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
1285                omap_iommu_runtime_resume, NULL)
1286 };
1287 
1288 static const struct of_device_id omap_iommu_of_match[] = {
1289     { .compatible = "ti,omap2-iommu" },
1290     { .compatible = "ti,omap4-iommu" },
1291     { .compatible = "ti,dra7-iommu" },
1292     { .compatible = "ti,dra7-dsp-iommu" },
1293     {},
1294 };
1295 
1296 static struct platform_driver omap_iommu_driver = {
1297     .probe  = omap_iommu_probe,
1298     .remove = omap_iommu_remove,
1299     .driver = {
1300         .name   = "omap-iommu",
1301         .pm = &omap_iommu_pm_ops,
1302         .of_match_table = of_match_ptr(omap_iommu_of_match),
1303     },
1304 };
1305 
1306 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1307 {
1308     memset(e, 0, sizeof(*e));
1309 
1310     e->da       = da;
1311     e->pa       = pa;
1312     e->valid    = MMU_CAM_V;
1313     e->pgsz     = pgsz;
1314     e->endian   = MMU_RAM_ENDIAN_LITTLE;
1315     e->elsz     = MMU_RAM_ELSZ_8;
1316     e->mixed    = 0;
1317 
1318     return iopgsz_to_bytes(e->pgsz);
1319 }
1320 
1321 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1322               phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
1323 {
1324     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1325     struct device *dev = omap_domain->dev;
1326     struct omap_iommu_device *iommu;
1327     struct omap_iommu *oiommu;
1328     struct iotlb_entry e;
1329     int omap_pgsz;
1330     u32 ret = -EINVAL;
1331     int i;
1332 
1333     omap_pgsz = bytes_to_iopgsz(bytes);
1334     if (omap_pgsz < 0) {
1335         dev_err(dev, "invalid size to map: %zu\n", bytes);
1336         return -EINVAL;
1337     }
1338 
1339     dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
1340 
1341     iotlb_init_entry(&e, da, pa, omap_pgsz);
1342 
1343     iommu = omap_domain->iommus;
1344     for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1345         oiommu = iommu->iommu_dev;
1346         ret = omap_iopgtable_store_entry(oiommu, &e);
1347         if (ret) {
1348             dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1349                 ret);
1350             break;
1351         }
1352     }
1353 
1354     if (ret) {
1355         while (i--) {
1356             iommu--;
1357             oiommu = iommu->iommu_dev;
1358             iopgtable_clear_entry(oiommu, da);
1359         }
1360     }
1361 
1362     return ret;
1363 }
1364 
1365 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1366                    size_t size, struct iommu_iotlb_gather *gather)
1367 {
1368     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1369     struct device *dev = omap_domain->dev;
1370     struct omap_iommu_device *iommu;
1371     struct omap_iommu *oiommu;
1372     bool error = false;
1373     size_t bytes = 0;
1374     int i;
1375 
1376     dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
1377 
1378     iommu = omap_domain->iommus;
1379     for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1380         oiommu = iommu->iommu_dev;
1381         bytes = iopgtable_clear_entry(oiommu, da);
1382         if (!bytes)
1383             error = true;
1384     }
1385 
1386     /*
1387      * simplify return - we are only checking if any of the iommus
1388      * reported an error, but not if all of them are unmapping the
1389      * same number of entries. This should not occur due to the
1390      * mirror programming.
1391      */
1392     return error ? 0 : bytes;
1393 }
1394 
1395 static int omap_iommu_count(struct device *dev)
1396 {
1397     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1398     int count = 0;
1399 
1400     while (arch_data->iommu_dev) {
1401         count++;
1402         arch_data++;
1403     }
1404 
1405     return count;
1406 }
1407 
1408 /* caller should call cleanup if this function fails */
1409 static int omap_iommu_attach_init(struct device *dev,
1410                   struct omap_iommu_domain *odomain)
1411 {
1412     struct omap_iommu_device *iommu;
1413     int i;
1414 
1415     odomain->num_iommus = omap_iommu_count(dev);
1416     if (!odomain->num_iommus)
1417         return -EINVAL;
1418 
1419     odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1420                   GFP_ATOMIC);
1421     if (!odomain->iommus)
1422         return -ENOMEM;
1423 
1424     iommu = odomain->iommus;
1425     for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1426         iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1427         if (!iommu->pgtable)
1428             return -ENOMEM;
1429 
1430         /*
1431          * should never fail, but please keep this around to ensure
1432          * we keep the hardware happy
1433          */
1434         if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1435                     IOPGD_TABLE_SIZE)))
1436             return -EINVAL;
1437     }
1438 
1439     return 0;
1440 }
1441 
1442 static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1443 {
1444     int i;
1445     struct omap_iommu_device *iommu = odomain->iommus;
1446 
1447     for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1448         kfree(iommu->pgtable);
1449 
1450     kfree(odomain->iommus);
1451     odomain->num_iommus = 0;
1452     odomain->iommus = NULL;
1453 }
1454 
1455 static int
1456 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1457 {
1458     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1459     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1460     struct omap_iommu_device *iommu;
1461     struct omap_iommu *oiommu;
1462     int ret = 0;
1463     int i;
1464 
1465     if (!arch_data || !arch_data->iommu_dev) {
1466         dev_err(dev, "device doesn't have an associated iommu\n");
1467         return -EINVAL;
1468     }
1469 
1470     spin_lock(&omap_domain->lock);
1471 
1472     /* only a single client device can be attached to a domain */
1473     if (omap_domain->dev) {
1474         dev_err(dev, "iommu domain is already attached\n");
1475         ret = -EBUSY;
1476         goto out;
1477     }
1478 
1479     ret = omap_iommu_attach_init(dev, omap_domain);
1480     if (ret) {
1481         dev_err(dev, "failed to allocate required iommu data %d\n",
1482             ret);
1483         goto init_fail;
1484     }
1485 
1486     iommu = omap_domain->iommus;
1487     for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1488         /* configure and enable the omap iommu */
1489         oiommu = arch_data->iommu_dev;
1490         ret = omap_iommu_attach(oiommu, iommu->pgtable);
1491         if (ret) {
1492             dev_err(dev, "can't get omap iommu: %d\n", ret);
1493             goto attach_fail;
1494         }
1495 
1496         oiommu->domain = domain;
1497         iommu->iommu_dev = oiommu;
1498     }
1499 
1500     omap_domain->dev = dev;
1501 
1502     goto out;
1503 
1504 attach_fail:
1505     while (i--) {
1506         iommu--;
1507         arch_data--;
1508         oiommu = iommu->iommu_dev;
1509         omap_iommu_detach(oiommu);
1510         iommu->iommu_dev = NULL;
1511         oiommu->domain = NULL;
1512     }
1513 init_fail:
1514     omap_iommu_detach_fini(omap_domain);
1515 out:
1516     spin_unlock(&omap_domain->lock);
1517     return ret;
1518 }
1519 
1520 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1521                    struct device *dev)
1522 {
1523     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1524     struct omap_iommu_device *iommu = omap_domain->iommus;
1525     struct omap_iommu *oiommu;
1526     int i;
1527 
1528     if (!omap_domain->dev) {
1529         dev_err(dev, "domain has no attached device\n");
1530         return;
1531     }
1532 
1533     /* only a single device is supported per domain for now */
1534     if (omap_domain->dev != dev) {
1535         dev_err(dev, "invalid attached device\n");
1536         return;
1537     }
1538 
1539     /*
1540      * cleanup in the reverse order of attachment - this addresses
1541      * any h/w dependencies between multiple instances, if any
1542      */
1543     iommu += (omap_domain->num_iommus - 1);
1544     arch_data += (omap_domain->num_iommus - 1);
1545     for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1546         oiommu = iommu->iommu_dev;
1547         iopgtable_clear_entry_all(oiommu);
1548 
1549         omap_iommu_detach(oiommu);
1550         iommu->iommu_dev = NULL;
1551         oiommu->domain = NULL;
1552     }
1553 
1554     omap_iommu_detach_fini(omap_domain);
1555 
1556     omap_domain->dev = NULL;
1557 }
1558 
1559 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1560                   struct device *dev)
1561 {
1562     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1563 
1564     spin_lock(&omap_domain->lock);
1565     _omap_iommu_detach_dev(omap_domain, dev);
1566     spin_unlock(&omap_domain->lock);
1567 }
1568 
1569 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1570 {
1571     struct omap_iommu_domain *omap_domain;
1572 
1573     if (type != IOMMU_DOMAIN_UNMANAGED)
1574         return NULL;
1575 
1576     omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1577     if (!omap_domain)
1578         return NULL;
1579 
1580     spin_lock_init(&omap_domain->lock);
1581 
1582     omap_domain->domain.geometry.aperture_start = 0;
1583     omap_domain->domain.geometry.aperture_end   = (1ULL << 32) - 1;
1584     omap_domain->domain.geometry.force_aperture = true;
1585 
1586     return &omap_domain->domain;
1587 }
1588 
1589 static void omap_iommu_domain_free(struct iommu_domain *domain)
1590 {
1591     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1592 
1593     /*
1594      * An iommu device is still attached
1595      * (currently, only one device can be attached) ?
1596      */
1597     if (omap_domain->dev)
1598         _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1599 
1600     kfree(omap_domain);
1601 }
1602 
1603 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1604                        dma_addr_t da)
1605 {
1606     struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1607     struct omap_iommu_device *iommu = omap_domain->iommus;
1608     struct omap_iommu *oiommu = iommu->iommu_dev;
1609     struct device *dev = oiommu->dev;
1610     u32 *pgd, *pte;
1611     phys_addr_t ret = 0;
1612 
1613     /*
1614      * all the iommus within the domain will have identical programming,
1615      * so perform the lookup using just the first iommu
1616      */
1617     iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1618 
1619     if (pte) {
1620         if (iopte_is_small(*pte))
1621             ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1622         else if (iopte_is_large(*pte))
1623             ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1624         else
1625             dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1626                 (unsigned long long)da);
1627     } else {
1628         if (iopgd_is_section(*pgd))
1629             ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1630         else if (iopgd_is_super(*pgd))
1631             ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1632         else
1633             dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1634                 (unsigned long long)da);
1635     }
1636 
1637     return ret;
1638 }
1639 
1640 static struct iommu_device *omap_iommu_probe_device(struct device *dev)
1641 {
1642     struct omap_iommu_arch_data *arch_data, *tmp;
1643     struct platform_device *pdev;
1644     struct omap_iommu *oiommu;
1645     struct device_node *np;
1646     int num_iommus, i;
1647 
1648     /*
1649      * Allocate the per-device iommu structure for DT-based devices.
1650      *
1651      * TODO: Simplify this when removing non-DT support completely from the
1652      * IOMMU users.
1653      */
1654     if (!dev->of_node)
1655         return ERR_PTR(-ENODEV);
1656 
1657     /*
1658      * retrieve the count of IOMMU nodes using phandle size as element size
1659      * since #iommu-cells = 0 for OMAP
1660      */
1661     num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1662                              sizeof(phandle));
1663     if (num_iommus < 0)
1664         return ERR_PTR(-ENODEV);
1665 
1666     arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
1667     if (!arch_data)
1668         return ERR_PTR(-ENOMEM);
1669 
1670     for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1671         np = of_parse_phandle(dev->of_node, "iommus", i);
1672         if (!np) {
1673             kfree(arch_data);
1674             return ERR_PTR(-EINVAL);
1675         }
1676 
1677         pdev = of_find_device_by_node(np);
1678         if (!pdev) {
1679             of_node_put(np);
1680             kfree(arch_data);
1681             return ERR_PTR(-ENODEV);
1682         }
1683 
1684         oiommu = platform_get_drvdata(pdev);
1685         if (!oiommu) {
1686             of_node_put(np);
1687             kfree(arch_data);
1688             return ERR_PTR(-EINVAL);
1689         }
1690 
1691         tmp->iommu_dev = oiommu;
1692         tmp->dev = &pdev->dev;
1693 
1694         of_node_put(np);
1695     }
1696 
1697     dev_iommu_priv_set(dev, arch_data);
1698 
1699     /*
1700      * use the first IOMMU alone for the sysfs device linking.
1701      * TODO: Evaluate if a single iommu_group needs to be
1702      * maintained for both IOMMUs
1703      */
1704     oiommu = arch_data->iommu_dev;
1705 
1706     return &oiommu->iommu;
1707 }
1708 
1709 static void omap_iommu_release_device(struct device *dev)
1710 {
1711     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1712 
1713     if (!dev->of_node || !arch_data)
1714         return;
1715 
1716     dev_iommu_priv_set(dev, NULL);
1717     kfree(arch_data);
1718 
1719 }
1720 
1721 static struct iommu_group *omap_iommu_device_group(struct device *dev)
1722 {
1723     struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1724     struct iommu_group *group = ERR_PTR(-EINVAL);
1725 
1726     if (!arch_data)
1727         return ERR_PTR(-ENODEV);
1728 
1729     if (arch_data->iommu_dev)
1730         group = iommu_group_ref_get(arch_data->iommu_dev->group);
1731 
1732     return group;
1733 }
1734 
1735 static const struct iommu_ops omap_iommu_ops = {
1736     .domain_alloc   = omap_iommu_domain_alloc,
1737     .probe_device   = omap_iommu_probe_device,
1738     .release_device = omap_iommu_release_device,
1739     .device_group   = omap_iommu_device_group,
1740     .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
1741     .default_domain_ops = &(const struct iommu_domain_ops) {
1742         .attach_dev = omap_iommu_attach_dev,
1743         .detach_dev = omap_iommu_detach_dev,
1744         .map        = omap_iommu_map,
1745         .unmap      = omap_iommu_unmap,
1746         .iova_to_phys   = omap_iommu_iova_to_phys,
1747         .free       = omap_iommu_domain_free,
1748     }
1749 };
1750 
1751 static int __init omap_iommu_init(void)
1752 {
1753     struct kmem_cache *p;
1754     const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
1755     size_t align = 1 << 10; /* L2 pagetable alignement */
1756     struct device_node *np;
1757     int ret;
1758 
1759     np = of_find_matching_node(NULL, omap_iommu_of_match);
1760     if (!np)
1761         return 0;
1762 
1763     of_node_put(np);
1764 
1765     p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1766                   NULL);
1767     if (!p)
1768         return -ENOMEM;
1769     iopte_cachep = p;
1770 
1771     omap_iommu_debugfs_init();
1772 
1773     ret = platform_driver_register(&omap_iommu_driver);
1774     if (ret) {
1775         pr_err("%s: failed to register driver\n", __func__);
1776         goto fail_driver;
1777     }
1778 
1779     ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1780     if (ret)
1781         goto fail_bus;
1782 
1783     return 0;
1784 
1785 fail_bus:
1786     platform_driver_unregister(&omap_iommu_driver);
1787 fail_driver:
1788     kmem_cache_destroy(iopte_cachep);
1789     return ret;
1790 }
1791 subsys_initcall(omap_iommu_init);
1792 /* must be ready before omap3isp is probed */