Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
0004  *      http://www.samsung.com
0005  */
0006 
0007 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
0008 #define DEBUG
0009 #endif
0010 
0011 #include <linux/clk.h>
0012 #include <linux/dma-mapping.h>
0013 #include <linux/err.h>
0014 #include <linux/io.h>
0015 #include <linux/iommu.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/kmemleak.h>
0018 #include <linux/list.h>
0019 #include <linux/of.h>
0020 #include <linux/of_platform.h>
0021 #include <linux/platform_device.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/slab.h>
0024 
0025 typedef u32 sysmmu_iova_t;
0026 typedef u32 sysmmu_pte_t;
0027 
0028 /* We do not consider super section mapping (16MB) */
0029 #define SECT_ORDER 20
0030 #define LPAGE_ORDER 16
0031 #define SPAGE_ORDER 12
0032 
0033 #define SECT_SIZE (1 << SECT_ORDER)
0034 #define LPAGE_SIZE (1 << LPAGE_ORDER)
0035 #define SPAGE_SIZE (1 << SPAGE_ORDER)
0036 
0037 #define SECT_MASK (~(SECT_SIZE - 1))
0038 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
0039 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
0040 
0041 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
0042                ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
0043 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
0044 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
0045 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
0046               ((*(sent) & 3) == 1))
0047 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
0048 
0049 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
0050 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
0051 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
0052 
0053 /*
0054  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
0055  * v5.0 introduced support for 36bit physical address space by shifting
0056  * all page entry values by 4 bits.
0057  * All SYSMMU controllers in the system support the address spaces of the same
0058  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
0059  * value (0 or 4).
0060  */
0061 static short PG_ENT_SHIFT = -1;
0062 #define SYSMMU_PG_ENT_SHIFT 0
0063 #define SYSMMU_V5_PG_ENT_SHIFT 4
0064 
0065 static const sysmmu_pte_t *LV1_PROT;
0066 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
0067     ((0 << 15) | (0 << 10)), /* no access */
0068     ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
0069     ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
0070     ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
0071 };
0072 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
0073     (0 << 4), /* no access */
0074     (1 << 4), /* IOMMU_READ only */
0075     (2 << 4), /* IOMMU_WRITE only */
0076     (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
0077 };
0078 
0079 static const sysmmu_pte_t *LV2_PROT;
0080 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
0081     ((0 << 9) | (0 << 4)), /* no access */
0082     ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
0083     ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
0084     ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
0085 };
0086 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
0087     (0 << 2), /* no access */
0088     (1 << 2), /* IOMMU_READ only */
0089     (2 << 2), /* IOMMU_WRITE only */
0090     (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
0091 };
0092 
0093 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
0094 
0095 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
0096 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
0097 #define section_offs(iova) (iova & (SECT_SIZE - 1))
0098 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
0099 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
0100 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
0101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
0102 
0103 #define NUM_LV1ENTRIES 4096
0104 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
0105 
0106 static u32 lv1ent_offset(sysmmu_iova_t iova)
0107 {
0108     return iova >> SECT_ORDER;
0109 }
0110 
0111 static u32 lv2ent_offset(sysmmu_iova_t iova)
0112 {
0113     return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
0114 }
0115 
0116 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
0117 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
0118 
0119 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
0120 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
0121 
0122 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
0123 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
0124 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
0125 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
0126 
0127 #define CTRL_ENABLE 0x5
0128 #define CTRL_BLOCK  0x7
0129 #define CTRL_DISABLE    0x0
0130 
0131 #define CFG_LRU     0x1
0132 #define CFG_EAP     (1 << 2)
0133 #define CFG_QOS(n)  ((n & 0xF) << 7)
0134 #define CFG_ACGEN   (1 << 24) /* System MMU 3.3 only */
0135 #define CFG_SYSSEL  (1 << 22) /* System MMU 3.2 only */
0136 #define CFG_FLPDCACHE   (1 << 20) /* System MMU 3.2+ only */
0137 
0138 #define CTRL_VM_ENABLE          BIT(0)
0139 #define CTRL_VM_FAULT_MODE_STALL    BIT(3)
0140 #define CAPA0_CAPA1_EXIST       BIT(11)
0141 #define CAPA1_VCR_ENABLED       BIT(14)
0142 
0143 /* common registers */
0144 #define REG_MMU_CTRL        0x000
0145 #define REG_MMU_CFG     0x004
0146 #define REG_MMU_STATUS      0x008
0147 #define REG_MMU_VERSION     0x034
0148 
0149 #define MMU_MAJ_VER(val)    ((val) >> 7)
0150 #define MMU_MIN_VER(val)    ((val) & 0x7F)
0151 #define MMU_RAW_VER(reg)    (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
0152 
0153 #define MAKE_MMU_VER(maj, min)  ((((maj) & 0xF) << 7) | ((min) & 0x7F))
0154 
0155 /* v1.x - v3.x registers */
0156 #define REG_PAGE_FAULT_ADDR 0x024
0157 #define REG_AW_FAULT_ADDR   0x028
0158 #define REG_AR_FAULT_ADDR   0x02C
0159 #define REG_DEFAULT_SLAVE_ADDR  0x030
0160 
0161 /* v5.x registers */
0162 #define REG_V5_FAULT_AR_VA  0x070
0163 #define REG_V5_FAULT_AW_VA  0x080
0164 
0165 /* v7.x registers */
0166 #define REG_V7_CAPA0        0x870
0167 #define REG_V7_CAPA1        0x874
0168 #define REG_V7_CTRL_VM      0x8000
0169 
0170 #define has_sysmmu(dev)     (dev_iommu_priv_get(dev) != NULL)
0171 
0172 static struct device *dma_dev;
0173 static struct kmem_cache *lv2table_kmem_cache;
0174 static sysmmu_pte_t *zero_lv2_table;
0175 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
0176 
0177 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
0178 {
0179     return pgtable + lv1ent_offset(iova);
0180 }
0181 
0182 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
0183 {
0184     return (sysmmu_pte_t *)phys_to_virt(
0185                 lv2table_base(sent)) + lv2ent_offset(iova);
0186 }
0187 
0188 /*
0189  * IOMMU fault information register
0190  */
0191 struct sysmmu_fault_info {
0192     unsigned int bit;   /* bit number in STATUS register */
0193     unsigned short addr_reg; /* register to read VA fault address */
0194     const char *name;   /* human readable fault name */
0195     unsigned int type;  /* fault type for report_iommu_fault */
0196 };
0197 
0198 static const struct sysmmu_fault_info sysmmu_faults[] = {
0199     { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
0200     { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
0201     { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
0202     { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
0203     { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
0204     { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
0205     { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
0206     { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
0207 };
0208 
0209 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
0210     { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
0211     { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
0212     { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
0213     { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
0214     { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
0215     { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
0216     { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
0217     { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
0218     { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
0219     { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
0220 };
0221 
0222 /*
0223  * This structure is attached to dev->iommu->priv of the master device
0224  * on device add, contains a list of SYSMMU controllers defined by device tree,
0225  * which are bound to given master device. It is usually referenced by 'owner'
0226  * pointer.
0227 */
0228 struct exynos_iommu_owner {
0229     struct list_head controllers;   /* list of sysmmu_drvdata.owner_node */
0230     struct iommu_domain *domain;    /* domain this device is attached */
0231     struct mutex rpm_lock;      /* for runtime pm of all sysmmus */
0232 };
0233 
0234 /*
0235  * This structure exynos specific generalization of struct iommu_domain.
0236  * It contains list of SYSMMU controllers from all master devices, which has
0237  * been attached to this domain and page tables of IO address space defined by
0238  * it. It is usually referenced by 'domain' pointer.
0239  */
0240 struct exynos_iommu_domain {
0241     struct list_head clients; /* list of sysmmu_drvdata.domain_node */
0242     sysmmu_pte_t *pgtable;  /* lv1 page table, 16KB */
0243     short *lv2entcnt;   /* free lv2 entry counter for each section */
0244     spinlock_t lock;    /* lock for modyfying list of clients */
0245     spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
0246     struct iommu_domain domain; /* generic domain data structure */
0247 };
0248 
0249 /*
0250  * SysMMU version specific data. Contains offsets for the registers which can
0251  * be found in different SysMMU variants, but have different offset values.
0252  */
0253 struct sysmmu_variant {
0254     u32 pt_base;        /* page table base address (physical) */
0255     u32 flush_all;      /* invalidate all TLB entries */
0256     u32 flush_entry;    /* invalidate specific TLB entry */
0257     u32 flush_range;    /* invalidate TLB entries in specified range */
0258     u32 flush_start;    /* start address of range invalidation */
0259     u32 flush_end;      /* end address of range invalidation */
0260     u32 int_status;     /* interrupt status information */
0261     u32 int_clear;      /* clear the interrupt */
0262 };
0263 
0264 /*
0265  * This structure hold all data of a single SYSMMU controller, this includes
0266  * hw resources like registers and clocks, pointers and list nodes to connect
0267  * it to all other structures, internal state and parameters read from device
0268  * tree. It is usually referenced by 'data' pointer.
0269  */
0270 struct sysmmu_drvdata {
0271     struct device *sysmmu;      /* SYSMMU controller device */
0272     struct device *master;      /* master device (owner) */
0273     struct device_link *link;   /* runtime PM link to master */
0274     void __iomem *sfrbase;      /* our registers */
0275     struct clk *clk;        /* SYSMMU's clock */
0276     struct clk *aclk;       /* SYSMMU's aclk clock */
0277     struct clk *pclk;       /* SYSMMU's pclk clock */
0278     struct clk *clk_master;     /* master's device clock */
0279     spinlock_t lock;        /* lock for modyfying state */
0280     bool active;            /* current status */
0281     struct exynos_iommu_domain *domain; /* domain we belong to */
0282     struct list_head domain_node;   /* node for domain clients list */
0283     struct list_head owner_node;    /* node for owner controllers list */
0284     phys_addr_t pgtable;        /* assigned page table structure */
0285     unsigned int version;       /* our version */
0286 
0287     struct iommu_device iommu;  /* IOMMU core handle */
0288     const struct sysmmu_variant *variant; /* version specific data */
0289 
0290     /* v7 fields */
0291     bool has_vcr;           /* virtual machine control register */
0292 };
0293 
0294 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
0295 
0296 /* SysMMU v1..v3 */
0297 static const struct sysmmu_variant sysmmu_v1_variant = {
0298     .flush_all  = 0x0c,
0299     .flush_entry    = 0x10,
0300     .pt_base    = 0x14,
0301     .int_status = 0x18,
0302     .int_clear  = 0x1c,
0303 };
0304 
0305 /* SysMMU v5 and v7 (non-VM capable) */
0306 static const struct sysmmu_variant sysmmu_v5_variant = {
0307     .pt_base    = 0x0c,
0308     .flush_all  = 0x10,
0309     .flush_entry    = 0x14,
0310     .flush_range    = 0x18,
0311     .flush_start    = 0x20,
0312     .flush_end  = 0x24,
0313     .int_status = 0x60,
0314     .int_clear  = 0x64,
0315 };
0316 
0317 /* SysMMU v7: VM capable register set */
0318 static const struct sysmmu_variant sysmmu_v7_vm_variant = {
0319     .pt_base    = 0x800c,
0320     .flush_all  = 0x8010,
0321     .flush_entry    = 0x8014,
0322     .flush_range    = 0x8018,
0323     .flush_start    = 0x8020,
0324     .flush_end  = 0x8024,
0325     .int_status = 0x60,
0326     .int_clear  = 0x64,
0327 };
0328 
0329 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
0330 {
0331     return container_of(dom, struct exynos_iommu_domain, domain);
0332 }
0333 
0334 static void sysmmu_unblock(struct sysmmu_drvdata *data)
0335 {
0336     writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
0337 }
0338 
0339 static bool sysmmu_block(struct sysmmu_drvdata *data)
0340 {
0341     int i = 120;
0342 
0343     writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
0344     while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
0345         --i;
0346 
0347     if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
0348         sysmmu_unblock(data);
0349         return false;
0350     }
0351 
0352     return true;
0353 }
0354 
0355 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
0356 {
0357     writel(0x1, SYSMMU_REG(data, flush_all));
0358 }
0359 
0360 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
0361                 sysmmu_iova_t iova, unsigned int num_inv)
0362 {
0363     unsigned int i;
0364 
0365     if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
0366         for (i = 0; i < num_inv; i++) {
0367             writel((iova & SPAGE_MASK) | 1,
0368                    SYSMMU_REG(data, flush_entry));
0369             iova += SPAGE_SIZE;
0370         }
0371     } else {
0372         writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
0373         writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
0374                SYSMMU_REG(data, flush_end));
0375         writel(0x1, SYSMMU_REG(data, flush_range));
0376     }
0377 }
0378 
0379 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
0380 {
0381     u32 pt_base;
0382 
0383     if (MMU_MAJ_VER(data->version) < 5)
0384         pt_base = pgd;
0385     else
0386         pt_base = pgd >> SPAGE_ORDER;
0387 
0388     writel(pt_base, SYSMMU_REG(data, pt_base));
0389     __sysmmu_tlb_invalidate(data);
0390 }
0391 
0392 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
0393 {
0394     BUG_ON(clk_prepare_enable(data->clk_master));
0395     BUG_ON(clk_prepare_enable(data->clk));
0396     BUG_ON(clk_prepare_enable(data->pclk));
0397     BUG_ON(clk_prepare_enable(data->aclk));
0398 }
0399 
0400 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
0401 {
0402     clk_disable_unprepare(data->aclk);
0403     clk_disable_unprepare(data->pclk);
0404     clk_disable_unprepare(data->clk);
0405     clk_disable_unprepare(data->clk_master);
0406 }
0407 
0408 static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
0409 {
0410     u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
0411 
0412     return capa0 & CAPA0_CAPA1_EXIST;
0413 }
0414 
0415 static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
0416 {
0417     u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
0418 
0419     data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
0420 }
0421 
0422 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
0423 {
0424     u32 ver;
0425 
0426     __sysmmu_enable_clocks(data);
0427 
0428     ver = readl(data->sfrbase + REG_MMU_VERSION);
0429 
0430     /* controllers on some SoCs don't report proper version */
0431     if (ver == 0x80000001u)
0432         data->version = MAKE_MMU_VER(1, 0);
0433     else
0434         data->version = MMU_RAW_VER(ver);
0435 
0436     dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
0437         MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
0438 
0439     if (MMU_MAJ_VER(data->version) < 5) {
0440         data->variant = &sysmmu_v1_variant;
0441     } else if (MMU_MAJ_VER(data->version) < 7) {
0442         data->variant = &sysmmu_v5_variant;
0443     } else {
0444         if (__sysmmu_has_capa1(data))
0445             __sysmmu_get_vcr(data);
0446         if (data->has_vcr)
0447             data->variant = &sysmmu_v7_vm_variant;
0448         else
0449             data->variant = &sysmmu_v5_variant;
0450     }
0451 
0452     __sysmmu_disable_clocks(data);
0453 }
0454 
0455 static void show_fault_information(struct sysmmu_drvdata *data,
0456                    const struct sysmmu_fault_info *finfo,
0457                    sysmmu_iova_t fault_addr)
0458 {
0459     sysmmu_pte_t *ent;
0460 
0461     dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
0462         dev_name(data->master), finfo->name, fault_addr);
0463     dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
0464     ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
0465     dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
0466     if (lv1ent_page(ent)) {
0467         ent = page_entry(ent, fault_addr);
0468         dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
0469     }
0470 }
0471 
0472 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
0473 {
0474     /* SYSMMU is in blocked state when interrupt occurred. */
0475     struct sysmmu_drvdata *data = dev_id;
0476     const struct sysmmu_fault_info *finfo;
0477     unsigned int i, n, itype;
0478     sysmmu_iova_t fault_addr;
0479     int ret = -ENOSYS;
0480 
0481     WARN_ON(!data->active);
0482 
0483     if (MMU_MAJ_VER(data->version) < 5) {
0484         finfo = sysmmu_faults;
0485         n = ARRAY_SIZE(sysmmu_faults);
0486     } else {
0487         finfo = sysmmu_v5_faults;
0488         n = ARRAY_SIZE(sysmmu_v5_faults);
0489     }
0490 
0491     spin_lock(&data->lock);
0492 
0493     clk_enable(data->clk_master);
0494 
0495     itype = __ffs(readl(SYSMMU_REG(data, int_status)));
0496     for (i = 0; i < n; i++, finfo++)
0497         if (finfo->bit == itype)
0498             break;
0499     /* unknown/unsupported fault */
0500     BUG_ON(i == n);
0501 
0502     /* print debug message */
0503     fault_addr = readl(data->sfrbase + finfo->addr_reg);
0504     show_fault_information(data, finfo, fault_addr);
0505 
0506     if (data->domain)
0507         ret = report_iommu_fault(&data->domain->domain,
0508                     data->master, fault_addr, finfo->type);
0509     /* fault is not recovered by fault handler */
0510     BUG_ON(ret != 0);
0511 
0512     writel(1 << itype, SYSMMU_REG(data, int_clear));
0513 
0514     sysmmu_unblock(data);
0515 
0516     clk_disable(data->clk_master);
0517 
0518     spin_unlock(&data->lock);
0519 
0520     return IRQ_HANDLED;
0521 }
0522 
0523 static void __sysmmu_disable(struct sysmmu_drvdata *data)
0524 {
0525     unsigned long flags;
0526 
0527     clk_enable(data->clk_master);
0528 
0529     spin_lock_irqsave(&data->lock, flags);
0530     writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
0531     writel(0, data->sfrbase + REG_MMU_CFG);
0532     data->active = false;
0533     spin_unlock_irqrestore(&data->lock, flags);
0534 
0535     __sysmmu_disable_clocks(data);
0536 }
0537 
0538 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
0539 {
0540     unsigned int cfg;
0541 
0542     if (data->version <= MAKE_MMU_VER(3, 1))
0543         cfg = CFG_LRU | CFG_QOS(15);
0544     else if (data->version <= MAKE_MMU_VER(3, 2))
0545         cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
0546     else
0547         cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
0548 
0549     cfg |= CFG_EAP; /* enable access protection bits check */
0550 
0551     writel(cfg, data->sfrbase + REG_MMU_CFG);
0552 }
0553 
0554 static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
0555 {
0556     u32 ctrl;
0557 
0558     if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
0559         return;
0560 
0561     ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
0562     ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
0563     writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
0564 }
0565 
0566 static void __sysmmu_enable(struct sysmmu_drvdata *data)
0567 {
0568     unsigned long flags;
0569 
0570     __sysmmu_enable_clocks(data);
0571 
0572     spin_lock_irqsave(&data->lock, flags);
0573     writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
0574     __sysmmu_init_config(data);
0575     __sysmmu_set_ptbase(data, data->pgtable);
0576     __sysmmu_enable_vid(data);
0577     writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
0578     data->active = true;
0579     spin_unlock_irqrestore(&data->lock, flags);
0580 
0581     /*
0582      * SYSMMU driver keeps master's clock enabled only for the short
0583      * time, while accessing the registers. For performing address
0584      * translation during DMA transaction it relies on the client
0585      * driver to enable it.
0586      */
0587     clk_disable(data->clk_master);
0588 }
0589 
0590 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
0591                         sysmmu_iova_t iova)
0592 {
0593     unsigned long flags;
0594 
0595     spin_lock_irqsave(&data->lock, flags);
0596     if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
0597         clk_enable(data->clk_master);
0598         if (sysmmu_block(data)) {
0599             if (data->version >= MAKE_MMU_VER(5, 0))
0600                 __sysmmu_tlb_invalidate(data);
0601             else
0602                 __sysmmu_tlb_invalidate_entry(data, iova, 1);
0603             sysmmu_unblock(data);
0604         }
0605         clk_disable(data->clk_master);
0606     }
0607     spin_unlock_irqrestore(&data->lock, flags);
0608 }
0609 
0610 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
0611                     sysmmu_iova_t iova, size_t size)
0612 {
0613     unsigned long flags;
0614 
0615     spin_lock_irqsave(&data->lock, flags);
0616     if (data->active) {
0617         unsigned int num_inv = 1;
0618 
0619         clk_enable(data->clk_master);
0620 
0621         /*
0622          * L2TLB invalidation required
0623          * 4KB page: 1 invalidation
0624          * 64KB page: 16 invalidations
0625          * 1MB page: 64 invalidations
0626          * because it is set-associative TLB
0627          * with 8-way and 64 sets.
0628          * 1MB page can be cached in one of all sets.
0629          * 64KB page can be one of 16 consecutive sets.
0630          */
0631         if (MMU_MAJ_VER(data->version) == 2)
0632             num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
0633 
0634         if (sysmmu_block(data)) {
0635             __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
0636             sysmmu_unblock(data);
0637         }
0638         clk_disable(data->clk_master);
0639     }
0640     spin_unlock_irqrestore(&data->lock, flags);
0641 }
0642 
0643 static const struct iommu_ops exynos_iommu_ops;
0644 
0645 static int exynos_sysmmu_probe(struct platform_device *pdev)
0646 {
0647     int irq, ret;
0648     struct device *dev = &pdev->dev;
0649     struct sysmmu_drvdata *data;
0650     struct resource *res;
0651 
0652     data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
0653     if (!data)
0654         return -ENOMEM;
0655 
0656     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0657     data->sfrbase = devm_ioremap_resource(dev, res);
0658     if (IS_ERR(data->sfrbase))
0659         return PTR_ERR(data->sfrbase);
0660 
0661     irq = platform_get_irq(pdev, 0);
0662     if (irq <= 0)
0663         return irq;
0664 
0665     ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
0666                 dev_name(dev), data);
0667     if (ret) {
0668         dev_err(dev, "Unabled to register handler of irq %d\n", irq);
0669         return ret;
0670     }
0671 
0672     data->clk = devm_clk_get(dev, "sysmmu");
0673     if (PTR_ERR(data->clk) == -ENOENT)
0674         data->clk = NULL;
0675     else if (IS_ERR(data->clk))
0676         return PTR_ERR(data->clk);
0677 
0678     data->aclk = devm_clk_get(dev, "aclk");
0679     if (PTR_ERR(data->aclk) == -ENOENT)
0680         data->aclk = NULL;
0681     else if (IS_ERR(data->aclk))
0682         return PTR_ERR(data->aclk);
0683 
0684     data->pclk = devm_clk_get(dev, "pclk");
0685     if (PTR_ERR(data->pclk) == -ENOENT)
0686         data->pclk = NULL;
0687     else if (IS_ERR(data->pclk))
0688         return PTR_ERR(data->pclk);
0689 
0690     if (!data->clk && (!data->aclk || !data->pclk)) {
0691         dev_err(dev, "Failed to get device clock(s)!\n");
0692         return -ENOSYS;
0693     }
0694 
0695     data->clk_master = devm_clk_get(dev, "master");
0696     if (PTR_ERR(data->clk_master) == -ENOENT)
0697         data->clk_master = NULL;
0698     else if (IS_ERR(data->clk_master))
0699         return PTR_ERR(data->clk_master);
0700 
0701     data->sysmmu = dev;
0702     spin_lock_init(&data->lock);
0703 
0704     __sysmmu_get_version(data);
0705 
0706     ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
0707                      dev_name(data->sysmmu));
0708     if (ret)
0709         return ret;
0710 
0711     ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
0712     if (ret)
0713         goto err_iommu_register;
0714 
0715     platform_set_drvdata(pdev, data);
0716 
0717     if (PG_ENT_SHIFT < 0) {
0718         if (MMU_MAJ_VER(data->version) < 5) {
0719             PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
0720             LV1_PROT = SYSMMU_LV1_PROT;
0721             LV2_PROT = SYSMMU_LV2_PROT;
0722         } else {
0723             PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
0724             LV1_PROT = SYSMMU_V5_LV1_PROT;
0725             LV2_PROT = SYSMMU_V5_LV2_PROT;
0726         }
0727     }
0728 
0729     if (MMU_MAJ_VER(data->version) >= 5) {
0730         ret = dma_set_mask(dev, DMA_BIT_MASK(36));
0731         if (ret) {
0732             dev_err(dev, "Unable to set DMA mask: %d\n", ret);
0733             goto err_dma_set_mask;
0734         }
0735     }
0736 
0737     /*
0738      * use the first registered sysmmu device for performing
0739      * dma mapping operations on iommu page tables (cpu cache flush)
0740      */
0741     if (!dma_dev)
0742         dma_dev = &pdev->dev;
0743 
0744     pm_runtime_enable(dev);
0745 
0746     return 0;
0747 
0748 err_dma_set_mask:
0749     iommu_device_unregister(&data->iommu);
0750 err_iommu_register:
0751     iommu_device_sysfs_remove(&data->iommu);
0752     return ret;
0753 }
0754 
0755 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
0756 {
0757     struct sysmmu_drvdata *data = dev_get_drvdata(dev);
0758     struct device *master = data->master;
0759 
0760     if (master) {
0761         struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
0762 
0763         mutex_lock(&owner->rpm_lock);
0764         if (data->domain) {
0765             dev_dbg(data->sysmmu, "saving state\n");
0766             __sysmmu_disable(data);
0767         }
0768         mutex_unlock(&owner->rpm_lock);
0769     }
0770     return 0;
0771 }
0772 
0773 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
0774 {
0775     struct sysmmu_drvdata *data = dev_get_drvdata(dev);
0776     struct device *master = data->master;
0777 
0778     if (master) {
0779         struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
0780 
0781         mutex_lock(&owner->rpm_lock);
0782         if (data->domain) {
0783             dev_dbg(data->sysmmu, "restoring state\n");
0784             __sysmmu_enable(data);
0785         }
0786         mutex_unlock(&owner->rpm_lock);
0787     }
0788     return 0;
0789 }
0790 
0791 static const struct dev_pm_ops sysmmu_pm_ops = {
0792     SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
0793     SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
0794                 pm_runtime_force_resume)
0795 };
0796 
0797 static const struct of_device_id sysmmu_of_match[] = {
0798     { .compatible   = "samsung,exynos-sysmmu", },
0799     { },
0800 };
0801 
0802 static struct platform_driver exynos_sysmmu_driver __refdata = {
0803     .probe  = exynos_sysmmu_probe,
0804     .driver = {
0805         .name       = "exynos-sysmmu",
0806         .of_match_table = sysmmu_of_match,
0807         .pm     = &sysmmu_pm_ops,
0808         .suppress_bind_attrs = true,
0809     }
0810 };
0811 
0812 static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
0813 {
0814     dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
0815                 DMA_TO_DEVICE);
0816     *ent = cpu_to_le32(val);
0817     dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
0818                    DMA_TO_DEVICE);
0819 }
0820 
0821 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
0822 {
0823     struct exynos_iommu_domain *domain;
0824     dma_addr_t handle;
0825     int i;
0826 
0827     /* Check if correct PTE offsets are initialized */
0828     BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
0829 
0830     if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
0831         return NULL;
0832 
0833     domain = kzalloc(sizeof(*domain), GFP_KERNEL);
0834     if (!domain)
0835         return NULL;
0836 
0837     domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
0838     if (!domain->pgtable)
0839         goto err_pgtable;
0840 
0841     domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
0842     if (!domain->lv2entcnt)
0843         goto err_counter;
0844 
0845     /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
0846     for (i = 0; i < NUM_LV1ENTRIES; i++)
0847         domain->pgtable[i] = ZERO_LV2LINK;
0848 
0849     handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
0850                 DMA_TO_DEVICE);
0851     /* For mapping page table entries we rely on dma == phys */
0852     BUG_ON(handle != virt_to_phys(domain->pgtable));
0853     if (dma_mapping_error(dma_dev, handle))
0854         goto err_lv2ent;
0855 
0856     spin_lock_init(&domain->lock);
0857     spin_lock_init(&domain->pgtablelock);
0858     INIT_LIST_HEAD(&domain->clients);
0859 
0860     domain->domain.geometry.aperture_start = 0;
0861     domain->domain.geometry.aperture_end   = ~0UL;
0862     domain->domain.geometry.force_aperture = true;
0863 
0864     return &domain->domain;
0865 
0866 err_lv2ent:
0867     free_pages((unsigned long)domain->lv2entcnt, 1);
0868 err_counter:
0869     free_pages((unsigned long)domain->pgtable, 2);
0870 err_pgtable:
0871     kfree(domain);
0872     return NULL;
0873 }
0874 
0875 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
0876 {
0877     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
0878     struct sysmmu_drvdata *data, *next;
0879     unsigned long flags;
0880     int i;
0881 
0882     WARN_ON(!list_empty(&domain->clients));
0883 
0884     spin_lock_irqsave(&domain->lock, flags);
0885 
0886     list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
0887         spin_lock(&data->lock);
0888         __sysmmu_disable(data);
0889         data->pgtable = 0;
0890         data->domain = NULL;
0891         list_del_init(&data->domain_node);
0892         spin_unlock(&data->lock);
0893     }
0894 
0895     spin_unlock_irqrestore(&domain->lock, flags);
0896 
0897     dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
0898              DMA_TO_DEVICE);
0899 
0900     for (i = 0; i < NUM_LV1ENTRIES; i++)
0901         if (lv1ent_page(domain->pgtable + i)) {
0902             phys_addr_t base = lv2table_base(domain->pgtable + i);
0903 
0904             dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
0905                      DMA_TO_DEVICE);
0906             kmem_cache_free(lv2table_kmem_cache,
0907                     phys_to_virt(base));
0908         }
0909 
0910     free_pages((unsigned long)domain->pgtable, 2);
0911     free_pages((unsigned long)domain->lv2entcnt, 1);
0912     kfree(domain);
0913 }
0914 
0915 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
0916                     struct device *dev)
0917 {
0918     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
0919     struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
0920     phys_addr_t pagetable = virt_to_phys(domain->pgtable);
0921     struct sysmmu_drvdata *data, *next;
0922     unsigned long flags;
0923 
0924     if (!has_sysmmu(dev) || owner->domain != iommu_domain)
0925         return;
0926 
0927     mutex_lock(&owner->rpm_lock);
0928 
0929     list_for_each_entry(data, &owner->controllers, owner_node) {
0930         pm_runtime_get_noresume(data->sysmmu);
0931         if (pm_runtime_active(data->sysmmu))
0932             __sysmmu_disable(data);
0933         pm_runtime_put(data->sysmmu);
0934     }
0935 
0936     spin_lock_irqsave(&domain->lock, flags);
0937     list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
0938         spin_lock(&data->lock);
0939         data->pgtable = 0;
0940         data->domain = NULL;
0941         list_del_init(&data->domain_node);
0942         spin_unlock(&data->lock);
0943     }
0944     owner->domain = NULL;
0945     spin_unlock_irqrestore(&domain->lock, flags);
0946 
0947     mutex_unlock(&owner->rpm_lock);
0948 
0949     dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
0950         &pagetable);
0951 }
0952 
0953 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
0954                    struct device *dev)
0955 {
0956     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
0957     struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
0958     struct sysmmu_drvdata *data;
0959     phys_addr_t pagetable = virt_to_phys(domain->pgtable);
0960     unsigned long flags;
0961 
0962     if (!has_sysmmu(dev))
0963         return -ENODEV;
0964 
0965     if (owner->domain)
0966         exynos_iommu_detach_device(owner->domain, dev);
0967 
0968     mutex_lock(&owner->rpm_lock);
0969 
0970     spin_lock_irqsave(&domain->lock, flags);
0971     list_for_each_entry(data, &owner->controllers, owner_node) {
0972         spin_lock(&data->lock);
0973         data->pgtable = pagetable;
0974         data->domain = domain;
0975         list_add_tail(&data->domain_node, &domain->clients);
0976         spin_unlock(&data->lock);
0977     }
0978     owner->domain = iommu_domain;
0979     spin_unlock_irqrestore(&domain->lock, flags);
0980 
0981     list_for_each_entry(data, &owner->controllers, owner_node) {
0982         pm_runtime_get_noresume(data->sysmmu);
0983         if (pm_runtime_active(data->sysmmu))
0984             __sysmmu_enable(data);
0985         pm_runtime_put(data->sysmmu);
0986     }
0987 
0988     mutex_unlock(&owner->rpm_lock);
0989 
0990     dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
0991         &pagetable);
0992 
0993     return 0;
0994 }
0995 
0996 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
0997         sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
0998 {
0999     if (lv1ent_section(sent)) {
1000         WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
1001         return ERR_PTR(-EADDRINUSE);
1002     }
1003 
1004     if (lv1ent_fault(sent)) {
1005         dma_addr_t handle;
1006         sysmmu_pte_t *pent;
1007         bool need_flush_flpd_cache = lv1ent_zero(sent);
1008 
1009         pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1010         BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
1011         if (!pent)
1012             return ERR_PTR(-ENOMEM);
1013 
1014         exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
1015         kmemleak_ignore(pent);
1016         *pgcounter = NUM_LV2ENTRIES;
1017         handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
1018                     DMA_TO_DEVICE);
1019         if (dma_mapping_error(dma_dev, handle)) {
1020             kmem_cache_free(lv2table_kmem_cache, pent);
1021             return ERR_PTR(-EADDRINUSE);
1022         }
1023 
1024         /*
1025          * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
1026          * FLPD cache may cache the address of zero_l2_table. This
1027          * function replaces the zero_l2_table with new L2 page table
1028          * to write valid mappings.
1029          * Accessing the valid area may cause page fault since FLPD
1030          * cache may still cache zero_l2_table for the valid area
1031          * instead of new L2 page table that has the mapping
1032          * information of the valid area.
1033          * Thus any replacement of zero_l2_table with other valid L2
1034          * page table must involve FLPD cache invalidation for System
1035          * MMU v3.3.
1036          * FLPD cache invalidation is performed with TLB invalidation
1037          * by VPN without blocking. It is safe to invalidate TLB without
1038          * blocking because the target address of TLB invalidation is
1039          * not currently mapped.
1040          */
1041         if (need_flush_flpd_cache) {
1042             struct sysmmu_drvdata *data;
1043 
1044             spin_lock(&domain->lock);
1045             list_for_each_entry(data, &domain->clients, domain_node)
1046                 sysmmu_tlb_invalidate_flpdcache(data, iova);
1047             spin_unlock(&domain->lock);
1048         }
1049     }
1050 
1051     return page_entry(sent, iova);
1052 }
1053 
1054 static int lv1set_section(struct exynos_iommu_domain *domain,
1055               sysmmu_pte_t *sent, sysmmu_iova_t iova,
1056               phys_addr_t paddr, int prot, short *pgcnt)
1057 {
1058     if (lv1ent_section(sent)) {
1059         WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1060             iova);
1061         return -EADDRINUSE;
1062     }
1063 
1064     if (lv1ent_page(sent)) {
1065         if (*pgcnt != NUM_LV2ENTRIES) {
1066             WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
1067                 iova);
1068             return -EADDRINUSE;
1069         }
1070 
1071         kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
1072         *pgcnt = 0;
1073     }
1074 
1075     exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
1076 
1077     spin_lock(&domain->lock);
1078     if (lv1ent_page_zero(sent)) {
1079         struct sysmmu_drvdata *data;
1080         /*
1081          * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1082          * entry by speculative prefetch of SLPD which has no mapping.
1083          */
1084         list_for_each_entry(data, &domain->clients, domain_node)
1085             sysmmu_tlb_invalidate_flpdcache(data, iova);
1086     }
1087     spin_unlock(&domain->lock);
1088 
1089     return 0;
1090 }
1091 
1092 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1093                int prot, short *pgcnt)
1094 {
1095     if (size == SPAGE_SIZE) {
1096         if (WARN_ON(!lv2ent_fault(pent)))
1097             return -EADDRINUSE;
1098 
1099         exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
1100         *pgcnt -= 1;
1101     } else { /* size == LPAGE_SIZE */
1102         int i;
1103         dma_addr_t pent_base = virt_to_phys(pent);
1104 
1105         dma_sync_single_for_cpu(dma_dev, pent_base,
1106                     sizeof(*pent) * SPAGES_PER_LPAGE,
1107                     DMA_TO_DEVICE);
1108         for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1109             if (WARN_ON(!lv2ent_fault(pent))) {
1110                 if (i > 0)
1111                     memset(pent - i, 0, sizeof(*pent) * i);
1112                 return -EADDRINUSE;
1113             }
1114 
1115             *pent = mk_lv2ent_lpage(paddr, prot);
1116         }
1117         dma_sync_single_for_device(dma_dev, pent_base,
1118                        sizeof(*pent) * SPAGES_PER_LPAGE,
1119                        DMA_TO_DEVICE);
1120         *pgcnt -= SPAGES_PER_LPAGE;
1121     }
1122 
1123     return 0;
1124 }
1125 
1126 /*
1127  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1128  *
1129  * System MMU v3.x has advanced logic to improve address translation
1130  * performance with caching more page table entries by a page table walk.
1131  * However, the logic has a bug that while caching faulty page table entries,
1132  * System MMU reports page fault if the cached fault entry is hit even though
1133  * the fault entry is updated to a valid entry after the entry is cached.
1134  * To prevent caching faulty page table entries which may be updated to valid
1135  * entries later, the virtual memory manager should care about the workaround
1136  * for the problem. The following describes the workaround.
1137  *
1138  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1139  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1140  *
1141  * Precisely, any start address of I/O virtual region must be aligned with
1142  * the following sizes for System MMU v3.1 and v3.2.
1143  * System MMU v3.1: 128KiB
1144  * System MMU v3.2: 256KiB
1145  *
1146  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1147  * more workarounds.
1148  * - Any two consecutive I/O virtual regions must have a hole of size larger
1149  *   than or equal to 128KiB.
1150  * - Start address of an I/O virtual region must be aligned by 128KiB.
1151  */
1152 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1153                 unsigned long l_iova, phys_addr_t paddr, size_t size,
1154                 int prot, gfp_t gfp)
1155 {
1156     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1157     sysmmu_pte_t *entry;
1158     sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1159     unsigned long flags;
1160     int ret = -ENOMEM;
1161 
1162     BUG_ON(domain->pgtable == NULL);
1163     prot &= SYSMMU_SUPPORTED_PROT_BITS;
1164 
1165     spin_lock_irqsave(&domain->pgtablelock, flags);
1166 
1167     entry = section_entry(domain->pgtable, iova);
1168 
1169     if (size == SECT_SIZE) {
1170         ret = lv1set_section(domain, entry, iova, paddr, prot,
1171                      &domain->lv2entcnt[lv1ent_offset(iova)]);
1172     } else {
1173         sysmmu_pte_t *pent;
1174 
1175         pent = alloc_lv2entry(domain, entry, iova,
1176                       &domain->lv2entcnt[lv1ent_offset(iova)]);
1177 
1178         if (IS_ERR(pent))
1179             ret = PTR_ERR(pent);
1180         else
1181             ret = lv2set_page(pent, paddr, size, prot,
1182                        &domain->lv2entcnt[lv1ent_offset(iova)]);
1183     }
1184 
1185     if (ret)
1186         pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1187             __func__, ret, size, iova);
1188 
1189     spin_unlock_irqrestore(&domain->pgtablelock, flags);
1190 
1191     return ret;
1192 }
1193 
1194 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1195                           sysmmu_iova_t iova, size_t size)
1196 {
1197     struct sysmmu_drvdata *data;
1198     unsigned long flags;
1199 
1200     spin_lock_irqsave(&domain->lock, flags);
1201 
1202     list_for_each_entry(data, &domain->clients, domain_node)
1203         sysmmu_tlb_invalidate_entry(data, iova, size);
1204 
1205     spin_unlock_irqrestore(&domain->lock, flags);
1206 }
1207 
1208 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1209                  unsigned long l_iova, size_t size,
1210                  struct iommu_iotlb_gather *gather)
1211 {
1212     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1213     sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1214     sysmmu_pte_t *ent;
1215     size_t err_pgsize;
1216     unsigned long flags;
1217 
1218     BUG_ON(domain->pgtable == NULL);
1219 
1220     spin_lock_irqsave(&domain->pgtablelock, flags);
1221 
1222     ent = section_entry(domain->pgtable, iova);
1223 
1224     if (lv1ent_section(ent)) {
1225         if (WARN_ON(size < SECT_SIZE)) {
1226             err_pgsize = SECT_SIZE;
1227             goto err;
1228         }
1229 
1230         /* workaround for h/w bug in System MMU v3.3 */
1231         exynos_iommu_set_pte(ent, ZERO_LV2LINK);
1232         size = SECT_SIZE;
1233         goto done;
1234     }
1235 
1236     if (unlikely(lv1ent_fault(ent))) {
1237         if (size > SECT_SIZE)
1238             size = SECT_SIZE;
1239         goto done;
1240     }
1241 
1242     /* lv1ent_page(sent) == true here */
1243 
1244     ent = page_entry(ent, iova);
1245 
1246     if (unlikely(lv2ent_fault(ent))) {
1247         size = SPAGE_SIZE;
1248         goto done;
1249     }
1250 
1251     if (lv2ent_small(ent)) {
1252         exynos_iommu_set_pte(ent, 0);
1253         size = SPAGE_SIZE;
1254         domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1255         goto done;
1256     }
1257 
1258     /* lv1ent_large(ent) == true here */
1259     if (WARN_ON(size < LPAGE_SIZE)) {
1260         err_pgsize = LPAGE_SIZE;
1261         goto err;
1262     }
1263 
1264     dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1265                 sizeof(*ent) * SPAGES_PER_LPAGE,
1266                 DMA_TO_DEVICE);
1267     memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1268     dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1269                    sizeof(*ent) * SPAGES_PER_LPAGE,
1270                    DMA_TO_DEVICE);
1271     size = LPAGE_SIZE;
1272     domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1273 done:
1274     spin_unlock_irqrestore(&domain->pgtablelock, flags);
1275 
1276     exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1277 
1278     return size;
1279 err:
1280     spin_unlock_irqrestore(&domain->pgtablelock, flags);
1281 
1282     pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1283         __func__, size, iova, err_pgsize);
1284 
1285     return 0;
1286 }
1287 
1288 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1289                       dma_addr_t iova)
1290 {
1291     struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1292     sysmmu_pte_t *entry;
1293     unsigned long flags;
1294     phys_addr_t phys = 0;
1295 
1296     spin_lock_irqsave(&domain->pgtablelock, flags);
1297 
1298     entry = section_entry(domain->pgtable, iova);
1299 
1300     if (lv1ent_section(entry)) {
1301         phys = section_phys(entry) + section_offs(iova);
1302     } else if (lv1ent_page(entry)) {
1303         entry = page_entry(entry, iova);
1304 
1305         if (lv2ent_large(entry))
1306             phys = lpage_phys(entry) + lpage_offs(iova);
1307         else if (lv2ent_small(entry))
1308             phys = spage_phys(entry) + spage_offs(iova);
1309     }
1310 
1311     spin_unlock_irqrestore(&domain->pgtablelock, flags);
1312 
1313     return phys;
1314 }
1315 
1316 static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1317 {
1318     struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1319     struct sysmmu_drvdata *data;
1320 
1321     if (!has_sysmmu(dev))
1322         return ERR_PTR(-ENODEV);
1323 
1324     list_for_each_entry(data, &owner->controllers, owner_node) {
1325         /*
1326          * SYSMMU will be runtime activated via device link
1327          * (dependency) to its master device, so there are no
1328          * direct calls to pm_runtime_get/put in this driver.
1329          */
1330         data->link = device_link_add(dev, data->sysmmu,
1331                          DL_FLAG_STATELESS |
1332                          DL_FLAG_PM_RUNTIME);
1333     }
1334 
1335     /* There is always at least one entry, see exynos_iommu_of_xlate() */
1336     data = list_first_entry(&owner->controllers,
1337                 struct sysmmu_drvdata, owner_node);
1338 
1339     return &data->iommu;
1340 }
1341 
1342 static void exynos_iommu_release_device(struct device *dev)
1343 {
1344     struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1345     struct sysmmu_drvdata *data;
1346 
1347     if (owner->domain) {
1348         struct iommu_group *group = iommu_group_get(dev);
1349 
1350         if (group) {
1351             WARN_ON(owner->domain !=
1352                 iommu_group_default_domain(group));
1353             exynos_iommu_detach_device(owner->domain, dev);
1354             iommu_group_put(group);
1355         }
1356     }
1357 
1358     list_for_each_entry(data, &owner->controllers, owner_node)
1359         device_link_del(data->link);
1360 }
1361 
1362 static int exynos_iommu_of_xlate(struct device *dev,
1363                  struct of_phandle_args *spec)
1364 {
1365     struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1366     struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1367     struct sysmmu_drvdata *data, *entry;
1368 
1369     if (!sysmmu)
1370         return -ENODEV;
1371 
1372     data = platform_get_drvdata(sysmmu);
1373     if (!data) {
1374         put_device(&sysmmu->dev);
1375         return -ENODEV;
1376     }
1377 
1378     if (!owner) {
1379         owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1380         if (!owner) {
1381             put_device(&sysmmu->dev);
1382             return -ENOMEM;
1383         }
1384 
1385         INIT_LIST_HEAD(&owner->controllers);
1386         mutex_init(&owner->rpm_lock);
1387         dev_iommu_priv_set(dev, owner);
1388     }
1389 
1390     list_for_each_entry(entry, &owner->controllers, owner_node)
1391         if (entry == data)
1392             return 0;
1393 
1394     list_add_tail(&data->owner_node, &owner->controllers);
1395     data->master = dev;
1396 
1397     return 0;
1398 }
1399 
1400 static const struct iommu_ops exynos_iommu_ops = {
1401     .domain_alloc = exynos_iommu_domain_alloc,
1402     .device_group = generic_device_group,
1403     .probe_device = exynos_iommu_probe_device,
1404     .release_device = exynos_iommu_release_device,
1405     .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1406     .of_xlate = exynos_iommu_of_xlate,
1407     .default_domain_ops = &(const struct iommu_domain_ops) {
1408         .attach_dev = exynos_iommu_attach_device,
1409         .detach_dev = exynos_iommu_detach_device,
1410         .map        = exynos_iommu_map,
1411         .unmap      = exynos_iommu_unmap,
1412         .iova_to_phys   = exynos_iommu_iova_to_phys,
1413         .free       = exynos_iommu_domain_free,
1414     }
1415 };
1416 
1417 static int __init exynos_iommu_init(void)
1418 {
1419     struct device_node *np;
1420     int ret;
1421 
1422     np = of_find_matching_node(NULL, sysmmu_of_match);
1423     if (!np)
1424         return 0;
1425 
1426     of_node_put(np);
1427 
1428     lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1429                 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1430     if (!lv2table_kmem_cache) {
1431         pr_err("%s: Failed to create kmem cache\n", __func__);
1432         return -ENOMEM;
1433     }
1434 
1435     ret = platform_driver_register(&exynos_sysmmu_driver);
1436     if (ret) {
1437         pr_err("%s: Failed to register driver\n", __func__);
1438         goto err_reg_driver;
1439     }
1440 
1441     zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1442     if (zero_lv2_table == NULL) {
1443         pr_err("%s: Failed to allocate zero level2 page table\n",
1444             __func__);
1445         ret = -ENOMEM;
1446         goto err_zero_lv2;
1447     }
1448 
1449     ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1450     if (ret) {
1451         pr_err("%s: Failed to register exynos-iommu driver.\n",
1452                                 __func__);
1453         goto err_set_iommu;
1454     }
1455 
1456     return 0;
1457 err_set_iommu:
1458     kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1459 err_zero_lv2:
1460     platform_driver_unregister(&exynos_sysmmu_driver);
1461 err_reg_driver:
1462     kmem_cache_destroy(lv2table_kmem_cache);
1463     return ret;
1464 }
1465 core_initcall(exynos_iommu_init);