Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * IOMMU API for MTK architected m4u v1 implementations
0004  *
0005  * Copyright (c) 2015-2016 MediaTek Inc.
0006  * Author: Honghui Zhang <honghui.zhang@mediatek.com>
0007  *
0008  * Based on driver/iommu/mtk_iommu.c
0009  */
0010 #include <linux/bug.h>
0011 #include <linux/clk.h>
0012 #include <linux/component.h>
0013 #include <linux/device.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/err.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/io.h>
0018 #include <linux/iommu.h>
0019 #include <linux/iopoll.h>
0020 #include <linux/list.h>
0021 #include <linux/module.h>
0022 #include <linux/of_address.h>
0023 #include <linux/of_irq.h>
0024 #include <linux/of_platform.h>
0025 #include <linux/platform_device.h>
0026 #include <linux/slab.h>
0027 #include <linux/spinlock.h>
0028 #include <asm/barrier.h>
0029 #include <asm/dma-iommu.h>
0030 #include <dt-bindings/memory/mtk-memory-port.h>
0031 #include <dt-bindings/memory/mt2701-larb-port.h>
0032 #include <soc/mediatek/smi.h>
0033 
0034 #define REG_MMU_PT_BASE_ADDR            0x000
0035 
0036 #define F_ALL_INVLD             0x2
0037 #define F_MMU_INV_RANGE             0x1
0038 #define F_INVLD_EN0             BIT(0)
0039 #define F_INVLD_EN1             BIT(1)
0040 
0041 #define F_MMU_FAULT_VA_MSK          0xfffff000
0042 #define MTK_PROTECT_PA_ALIGN            128
0043 
0044 #define REG_MMU_CTRL_REG            0x210
0045 #define F_MMU_CTRL_COHERENT_EN          BIT(8)
0046 #define REG_MMU_IVRP_PADDR          0x214
0047 #define REG_MMU_INT_CONTROL         0x220
0048 #define F_INT_TRANSLATION_FAULT         BIT(0)
0049 #define F_INT_MAIN_MULTI_HIT_FAULT      BIT(1)
0050 #define F_INT_INVALID_PA_FAULT          BIT(2)
0051 #define F_INT_ENTRY_REPLACEMENT_FAULT       BIT(3)
0052 #define F_INT_TABLE_WALK_FAULT          BIT(4)
0053 #define F_INT_TLB_MISS_FAULT            BIT(5)
0054 #define F_INT_PFH_DMA_FIFO_OVERFLOW     BIT(6)
0055 #define F_INT_MISS_DMA_FIFO_OVERFLOW        BIT(7)
0056 
0057 #define F_MMU_TF_PROTECT_SEL(prot)      (((prot) & 0x3) << 5)
0058 #define F_INT_CLR_BIT               BIT(12)
0059 
0060 #define REG_MMU_FAULT_ST            0x224
0061 #define REG_MMU_FAULT_VA            0x228
0062 #define REG_MMU_INVLD_PA            0x22C
0063 #define REG_MMU_INT_ID              0x388
0064 #define REG_MMU_INVALIDATE          0x5c0
0065 #define REG_MMU_INVLD_START_A           0x5c4
0066 #define REG_MMU_INVLD_END_A         0x5c8
0067 
0068 #define REG_MMU_INV_SEL             0x5d8
0069 #define REG_MMU_STANDARD_AXI_MODE       0x5e8
0070 
0071 #define REG_MMU_DCM             0x5f0
0072 #define F_MMU_DCM_ON                BIT(1)
0073 #define REG_MMU_CPE_DONE            0x60c
0074 #define F_DESC_VALID                0x2
0075 #define F_DESC_NONSEC               BIT(3)
0076 #define MT2701_M4U_TF_LARB(TF)          (6 - (((TF) >> 13) & 0x7))
0077 #define MT2701_M4U_TF_PORT(TF)          (((TF) >> 8) & 0xF)
0078 /* MTK generation one iommu HW only support 4K size mapping */
0079 #define MT2701_IOMMU_PAGE_SHIFT         12
0080 #define MT2701_IOMMU_PAGE_SIZE          (1UL << MT2701_IOMMU_PAGE_SHIFT)
0081 #define MT2701_LARB_NR_MAX          3
0082 
0083 /*
0084  * MTK m4u support 4GB iova address space, and only support 4K page
0085  * mapping. So the pagetable size should be exactly as 4M.
0086  */
0087 #define M2701_IOMMU_PGT_SIZE            SZ_4M
0088 
0089 struct mtk_iommu_v1_suspend_reg {
0090     u32         standard_axi_mode;
0091     u32         dcm_dis;
0092     u32         ctrl_reg;
0093     u32         int_control0;
0094 };
0095 
0096 struct mtk_iommu_v1_data {
0097     void __iomem            *base;
0098     int             irq;
0099     struct device           *dev;
0100     struct clk          *bclk;
0101     phys_addr_t         protect_base; /* protect memory base */
0102     struct mtk_iommu_v1_domain  *m4u_dom;
0103 
0104     struct iommu_device     iommu;
0105     struct dma_iommu_mapping    *mapping;
0106     struct mtk_smi_larb_iommu   larb_imu[MTK_LARB_NR_MAX];
0107 
0108     struct mtk_iommu_v1_suspend_reg reg;
0109 };
0110 
0111 struct mtk_iommu_v1_domain {
0112     spinlock_t          pgtlock; /* lock for page table */
0113     struct iommu_domain     domain;
0114     u32             *pgt_va;
0115     dma_addr_t          pgt_pa;
0116     struct mtk_iommu_v1_data    *data;
0117 };
0118 
0119 static int mtk_iommu_v1_bind(struct device *dev)
0120 {
0121     struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
0122 
0123     return component_bind_all(dev, &data->larb_imu);
0124 }
0125 
0126 static void mtk_iommu_v1_unbind(struct device *dev)
0127 {
0128     struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
0129 
0130     component_unbind_all(dev, &data->larb_imu);
0131 }
0132 
0133 static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
0134 {
0135     return container_of(dom, struct mtk_iommu_v1_domain, domain);
0136 }
0137 
0138 static const int mt2701_m4u_in_larb[] = {
0139     LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
0140     LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
0141 };
0142 
0143 static inline int mt2701_m4u_to_larb(int id)
0144 {
0145     int i;
0146 
0147     for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
0148         if ((id) >= mt2701_m4u_in_larb[i])
0149             return i;
0150 
0151     return 0;
0152 }
0153 
0154 static inline int mt2701_m4u_to_port(int id)
0155 {
0156     int larb = mt2701_m4u_to_larb(id);
0157 
0158     return id - mt2701_m4u_in_larb[larb];
0159 }
0160 
0161 static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
0162 {
0163     writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
0164             data->base + REG_MMU_INV_SEL);
0165     writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
0166     wmb(); /* Make sure the tlb flush all done */
0167 }
0168 
0169 static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
0170                      unsigned long iova, size_t size)
0171 {
0172     int ret;
0173     u32 tmp;
0174 
0175     writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
0176         data->base + REG_MMU_INV_SEL);
0177     writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
0178         data->base + REG_MMU_INVLD_START_A);
0179     writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
0180         data->base + REG_MMU_INVLD_END_A);
0181     writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
0182 
0183     ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
0184                 tmp, tmp != 0, 10, 100000);
0185     if (ret) {
0186         dev_warn(data->dev,
0187              "Partial TLB flush timed out, falling back to full flush\n");
0188         mtk_iommu_v1_tlb_flush_all(data);
0189     }
0190     /* Clear the CPE status */
0191     writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
0192 }
0193 
0194 static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
0195 {
0196     struct mtk_iommu_v1_data *data = dev_id;
0197     struct mtk_iommu_v1_domain *dom = data->m4u_dom;
0198     u32 int_state, regval, fault_iova, fault_pa;
0199     unsigned int fault_larb, fault_port;
0200 
0201     /* Read error information from registers */
0202     int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
0203     fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
0204 
0205     fault_iova &= F_MMU_FAULT_VA_MSK;
0206     fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
0207     regval = readl_relaxed(data->base + REG_MMU_INT_ID);
0208     fault_larb = MT2701_M4U_TF_LARB(regval);
0209     fault_port = MT2701_M4U_TF_PORT(regval);
0210 
0211     /*
0212      * MTK v1 iommu HW could not determine whether the fault is read or
0213      * write fault, report as read fault.
0214      */
0215     if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
0216             IOMMU_FAULT_READ))
0217         dev_err_ratelimited(data->dev,
0218             "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
0219             int_state, fault_iova, fault_pa,
0220             fault_larb, fault_port);
0221 
0222     /* Interrupt clear */
0223     regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
0224     regval |= F_INT_CLR_BIT;
0225     writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
0226 
0227     mtk_iommu_v1_tlb_flush_all(data);
0228 
0229     return IRQ_HANDLED;
0230 }
0231 
0232 static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
0233                 struct device *dev, bool enable)
0234 {
0235     struct mtk_smi_larb_iommu    *larb_mmu;
0236     unsigned int                 larbid, portid;
0237     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0238     int i;
0239 
0240     for (i = 0; i < fwspec->num_ids; ++i) {
0241         larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
0242         portid = mt2701_m4u_to_port(fwspec->ids[i]);
0243         larb_mmu = &data->larb_imu[larbid];
0244 
0245         dev_dbg(dev, "%s iommu port: %d\n",
0246             enable ? "enable" : "disable", portid);
0247 
0248         if (enable)
0249             larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
0250         else
0251             larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
0252     }
0253 }
0254 
0255 static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
0256 {
0257     struct mtk_iommu_v1_domain *dom = data->m4u_dom;
0258 
0259     spin_lock_init(&dom->pgtlock);
0260 
0261     dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
0262                      &dom->pgt_pa, GFP_KERNEL);
0263     if (!dom->pgt_va)
0264         return -ENOMEM;
0265 
0266     writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
0267 
0268     dom->data = data;
0269 
0270     return 0;
0271 }
0272 
0273 static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
0274 {
0275     struct mtk_iommu_v1_domain *dom;
0276 
0277     if (type != IOMMU_DOMAIN_UNMANAGED)
0278         return NULL;
0279 
0280     dom = kzalloc(sizeof(*dom), GFP_KERNEL);
0281     if (!dom)
0282         return NULL;
0283 
0284     return &dom->domain;
0285 }
0286 
0287 static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
0288 {
0289     struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
0290     struct mtk_iommu_v1_data *data = dom->data;
0291 
0292     dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
0293             dom->pgt_va, dom->pgt_pa);
0294     kfree(to_mtk_domain(domain));
0295 }
0296 
0297 static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
0298 {
0299     struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
0300     struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
0301     struct dma_iommu_mapping *mtk_mapping;
0302     int ret;
0303 
0304     /* Only allow the domain created internally. */
0305     mtk_mapping = data->mapping;
0306     if (mtk_mapping->domain != domain)
0307         return 0;
0308 
0309     if (!data->m4u_dom) {
0310         data->m4u_dom = dom;
0311         ret = mtk_iommu_v1_domain_finalise(data);
0312         if (ret) {
0313             data->m4u_dom = NULL;
0314             return ret;
0315         }
0316     }
0317 
0318     mtk_iommu_v1_config(data, dev, true);
0319     return 0;
0320 }
0321 
0322 static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
0323 {
0324     struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
0325 
0326     mtk_iommu_v1_config(data, dev, false);
0327 }
0328 
0329 static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
0330                 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0331 {
0332     struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
0333     unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
0334     unsigned long flags;
0335     unsigned int i;
0336     u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
0337     u32 pabase = (u32)paddr;
0338     int map_size = 0;
0339 
0340     spin_lock_irqsave(&dom->pgtlock, flags);
0341     for (i = 0; i < page_num; i++) {
0342         if (pgt_base_iova[i]) {
0343             memset(pgt_base_iova, 0, i * sizeof(u32));
0344             break;
0345         }
0346         pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
0347         pabase += MT2701_IOMMU_PAGE_SIZE;
0348         map_size += MT2701_IOMMU_PAGE_SIZE;
0349     }
0350 
0351     spin_unlock_irqrestore(&dom->pgtlock, flags);
0352 
0353     mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
0354 
0355     return map_size == size ? 0 : -EEXIST;
0356 }
0357 
0358 static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
0359                  size_t size, struct iommu_iotlb_gather *gather)
0360 {
0361     struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
0362     unsigned long flags;
0363     u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
0364     unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
0365 
0366     spin_lock_irqsave(&dom->pgtlock, flags);
0367     memset(pgt_base_iova, 0, page_num * sizeof(u32));
0368     spin_unlock_irqrestore(&dom->pgtlock, flags);
0369 
0370     mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
0371 
0372     return size;
0373 }
0374 
0375 static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
0376 {
0377     struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
0378     unsigned long flags;
0379     phys_addr_t pa;
0380 
0381     spin_lock_irqsave(&dom->pgtlock, flags);
0382     pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
0383     pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
0384     spin_unlock_irqrestore(&dom->pgtlock, flags);
0385 
0386     return pa;
0387 }
0388 
0389 static const struct iommu_ops mtk_iommu_v1_ops;
0390 
0391 /*
0392  * MTK generation one iommu HW only support one iommu domain, and all the client
0393  * sharing the same iova address space.
0394  */
0395 static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
0396 {
0397     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0398     struct mtk_iommu_v1_data *data;
0399     struct platform_device *m4updev;
0400     struct dma_iommu_mapping *mtk_mapping;
0401     int ret;
0402 
0403     if (args->args_count != 1) {
0404         dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
0405             args->args_count);
0406         return -EINVAL;
0407     }
0408 
0409     if (!fwspec) {
0410         ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
0411         if (ret)
0412             return ret;
0413         fwspec = dev_iommu_fwspec_get(dev);
0414     } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
0415         return -EINVAL;
0416     }
0417 
0418     if (!dev_iommu_priv_get(dev)) {
0419         /* Get the m4u device */
0420         m4updev = of_find_device_by_node(args->np);
0421         if (WARN_ON(!m4updev))
0422             return -EINVAL;
0423 
0424         dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
0425     }
0426 
0427     ret = iommu_fwspec_add_ids(dev, args->args, 1);
0428     if (ret)
0429         return ret;
0430 
0431     data = dev_iommu_priv_get(dev);
0432     mtk_mapping = data->mapping;
0433     if (!mtk_mapping) {
0434         /* MTK iommu support 4GB iova address space. */
0435         mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
0436                         0, 1ULL << 32);
0437         if (IS_ERR(mtk_mapping))
0438             return PTR_ERR(mtk_mapping);
0439 
0440         data->mapping = mtk_mapping;
0441     }
0442 
0443     return 0;
0444 }
0445 
0446 static int mtk_iommu_v1_def_domain_type(struct device *dev)
0447 {
0448     return IOMMU_DOMAIN_UNMANAGED;
0449 }
0450 
0451 static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
0452 {
0453     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0454     struct of_phandle_args iommu_spec;
0455     struct mtk_iommu_v1_data *data;
0456     int err, idx = 0, larbid, larbidx;
0457     struct device_link *link;
0458     struct device *larbdev;
0459 
0460     /*
0461      * In the deferred case, free the existed fwspec.
0462      * Always initialize the fwspec internally.
0463      */
0464     if (fwspec) {
0465         iommu_fwspec_free(dev);
0466         fwspec = dev_iommu_fwspec_get(dev);
0467     }
0468 
0469     while (!of_parse_phandle_with_args(dev->of_node, "iommus",
0470                        "#iommu-cells",
0471                        idx, &iommu_spec)) {
0472 
0473         err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
0474         of_node_put(iommu_spec.np);
0475         if (err)
0476             return ERR_PTR(err);
0477 
0478         /* dev->iommu_fwspec might have changed */
0479         fwspec = dev_iommu_fwspec_get(dev);
0480         idx++;
0481     }
0482 
0483     if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
0484         return ERR_PTR(-ENODEV); /* Not a iommu client device */
0485 
0486     data = dev_iommu_priv_get(dev);
0487 
0488     /* Link the consumer device with the smi-larb device(supplier) */
0489     larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
0490     if (larbid >= MT2701_LARB_NR_MAX)
0491         return ERR_PTR(-EINVAL);
0492 
0493     for (idx = 1; idx < fwspec->num_ids; idx++) {
0494         larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
0495         if (larbid != larbidx) {
0496             dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
0497                 larbid, larbidx);
0498             return ERR_PTR(-EINVAL);
0499         }
0500     }
0501 
0502     larbdev = data->larb_imu[larbid].dev;
0503     if (!larbdev)
0504         return ERR_PTR(-EINVAL);
0505 
0506     link = device_link_add(dev, larbdev,
0507                    DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
0508     if (!link)
0509         dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
0510 
0511     return &data->iommu;
0512 }
0513 
0514 static void mtk_iommu_v1_probe_finalize(struct device *dev)
0515 {
0516     struct dma_iommu_mapping *mtk_mapping;
0517     struct mtk_iommu_v1_data *data;
0518     int err;
0519 
0520     data        = dev_iommu_priv_get(dev);
0521     mtk_mapping = data->mapping;
0522 
0523     err = arm_iommu_attach_device(dev, mtk_mapping);
0524     if (err)
0525         dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
0526 }
0527 
0528 static void mtk_iommu_v1_release_device(struct device *dev)
0529 {
0530     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0531     struct mtk_iommu_v1_data *data;
0532     struct device *larbdev;
0533     unsigned int larbid;
0534 
0535     data = dev_iommu_priv_get(dev);
0536     larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
0537     larbdev = data->larb_imu[larbid].dev;
0538     device_link_remove(dev, larbdev);
0539 }
0540 
0541 static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
0542 {
0543     u32 regval;
0544     int ret;
0545 
0546     ret = clk_prepare_enable(data->bclk);
0547     if (ret) {
0548         dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
0549         return ret;
0550     }
0551 
0552     regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
0553     writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
0554 
0555     regval = F_INT_TRANSLATION_FAULT |
0556         F_INT_MAIN_MULTI_HIT_FAULT |
0557         F_INT_INVALID_PA_FAULT |
0558         F_INT_ENTRY_REPLACEMENT_FAULT |
0559         F_INT_TABLE_WALK_FAULT |
0560         F_INT_TLB_MISS_FAULT |
0561         F_INT_PFH_DMA_FIFO_OVERFLOW |
0562         F_INT_MISS_DMA_FIFO_OVERFLOW;
0563     writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
0564 
0565     /* protect memory,hw will write here while translation fault */
0566     writel_relaxed(data->protect_base,
0567             data->base + REG_MMU_IVRP_PADDR);
0568 
0569     writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
0570 
0571     if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
0572                  dev_name(data->dev), (void *)data)) {
0573         writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
0574         clk_disable_unprepare(data->bclk);
0575         dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
0576         return -ENODEV;
0577     }
0578 
0579     return 0;
0580 }
0581 
0582 static const struct iommu_ops mtk_iommu_v1_ops = {
0583     .domain_alloc   = mtk_iommu_v1_domain_alloc,
0584     .probe_device   = mtk_iommu_v1_probe_device,
0585     .probe_finalize = mtk_iommu_v1_probe_finalize,
0586     .release_device = mtk_iommu_v1_release_device,
0587     .def_domain_type = mtk_iommu_v1_def_domain_type,
0588     .device_group   = generic_device_group,
0589     .pgsize_bitmap  = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
0590     .owner          = THIS_MODULE,
0591     .default_domain_ops = &(const struct iommu_domain_ops) {
0592         .attach_dev = mtk_iommu_v1_attach_device,
0593         .detach_dev = mtk_iommu_v1_detach_device,
0594         .map        = mtk_iommu_v1_map,
0595         .unmap      = mtk_iommu_v1_unmap,
0596         .iova_to_phys   = mtk_iommu_v1_iova_to_phys,
0597         .free       = mtk_iommu_v1_domain_free,
0598     }
0599 };
0600 
0601 static const struct of_device_id mtk_iommu_v1_of_ids[] = {
0602     { .compatible = "mediatek,mt2701-m4u", },
0603     {}
0604 };
0605 
0606 static const struct component_master_ops mtk_iommu_v1_com_ops = {
0607     .bind       = mtk_iommu_v1_bind,
0608     .unbind     = mtk_iommu_v1_unbind,
0609 };
0610 
0611 static int mtk_iommu_v1_probe(struct platform_device *pdev)
0612 {
0613     struct device           *dev = &pdev->dev;
0614     struct mtk_iommu_v1_data    *data;
0615     struct resource         *res;
0616     struct component_match      *match = NULL;
0617     void                *protect;
0618     int             larb_nr, ret, i;
0619 
0620     data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
0621     if (!data)
0622         return -ENOMEM;
0623 
0624     data->dev = dev;
0625 
0626     /* Protect memory. HW will access here while translation fault.*/
0627     protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
0628             GFP_KERNEL | GFP_DMA);
0629     if (!protect)
0630         return -ENOMEM;
0631     data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
0632 
0633     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0634     data->base = devm_ioremap_resource(dev, res);
0635     if (IS_ERR(data->base))
0636         return PTR_ERR(data->base);
0637 
0638     data->irq = platform_get_irq(pdev, 0);
0639     if (data->irq < 0)
0640         return data->irq;
0641 
0642     data->bclk = devm_clk_get(dev, "bclk");
0643     if (IS_ERR(data->bclk))
0644         return PTR_ERR(data->bclk);
0645 
0646     larb_nr = of_count_phandle_with_args(dev->of_node,
0647                          "mediatek,larbs", NULL);
0648     if (larb_nr < 0)
0649         return larb_nr;
0650 
0651     for (i = 0; i < larb_nr; i++) {
0652         struct device_node *larbnode;
0653         struct platform_device *plarbdev;
0654 
0655         larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
0656         if (!larbnode)
0657             return -EINVAL;
0658 
0659         if (!of_device_is_available(larbnode)) {
0660             of_node_put(larbnode);
0661             continue;
0662         }
0663 
0664         plarbdev = of_find_device_by_node(larbnode);
0665         if (!plarbdev) {
0666             of_node_put(larbnode);
0667             return -ENODEV;
0668         }
0669         if (!plarbdev->dev.driver) {
0670             of_node_put(larbnode);
0671             return -EPROBE_DEFER;
0672         }
0673         data->larb_imu[i].dev = &plarbdev->dev;
0674 
0675         component_match_add_release(dev, &match, component_release_of,
0676                         component_compare_of, larbnode);
0677     }
0678 
0679     platform_set_drvdata(pdev, data);
0680 
0681     ret = mtk_iommu_v1_hw_init(data);
0682     if (ret)
0683         return ret;
0684 
0685     ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
0686                      dev_name(&pdev->dev));
0687     if (ret)
0688         return ret;
0689 
0690     ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
0691     if (ret)
0692         goto out_sysfs_remove;
0693 
0694     if (!iommu_present(&platform_bus_type)) {
0695         ret = bus_set_iommu(&platform_bus_type,  &mtk_iommu_v1_ops);
0696         if (ret)
0697             goto out_dev_unreg;
0698     }
0699 
0700     ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
0701     if (ret)
0702         goto out_bus_set_null;
0703     return ret;
0704 
0705 out_bus_set_null:
0706     bus_set_iommu(&platform_bus_type, NULL);
0707 out_dev_unreg:
0708     iommu_device_unregister(&data->iommu);
0709 out_sysfs_remove:
0710     iommu_device_sysfs_remove(&data->iommu);
0711     return ret;
0712 }
0713 
0714 static int mtk_iommu_v1_remove(struct platform_device *pdev)
0715 {
0716     struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
0717 
0718     iommu_device_sysfs_remove(&data->iommu);
0719     iommu_device_unregister(&data->iommu);
0720 
0721     if (iommu_present(&platform_bus_type))
0722         bus_set_iommu(&platform_bus_type, NULL);
0723 
0724     clk_disable_unprepare(data->bclk);
0725     devm_free_irq(&pdev->dev, data->irq, data);
0726     component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
0727     return 0;
0728 }
0729 
0730 static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
0731 {
0732     struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
0733     struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
0734     void __iomem *base = data->base;
0735 
0736     reg->standard_axi_mode = readl_relaxed(base +
0737                            REG_MMU_STANDARD_AXI_MODE);
0738     reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
0739     reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
0740     reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
0741     return 0;
0742 }
0743 
0744 static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
0745 {
0746     struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
0747     struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
0748     void __iomem *base = data->base;
0749 
0750     writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
0751     writel_relaxed(reg->standard_axi_mode,
0752                base + REG_MMU_STANDARD_AXI_MODE);
0753     writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
0754     writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
0755     writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
0756     writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
0757     return 0;
0758 }
0759 
0760 static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
0761     SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
0762 };
0763 
0764 static struct platform_driver mtk_iommu_v1_driver = {
0765     .probe  = mtk_iommu_v1_probe,
0766     .remove = mtk_iommu_v1_remove,
0767     .driver = {
0768         .name = "mtk-iommu-v1",
0769         .of_match_table = mtk_iommu_v1_of_ids,
0770         .pm = &mtk_iommu_v1_pm_ops,
0771     }
0772 };
0773 module_platform_driver(mtk_iommu_v1_driver);
0774 
0775 MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
0776 MODULE_LICENSE("GPL v2");