Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * IOMMU API for Renesas VMSA-compatible IPMMU
0004  * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
0005  *
0006  * Copyright (C) 2014-2020 Renesas Electronics Corporation
0007  */
0008 
0009 #include <linux/bitmap.h>
0010 #include <linux/delay.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/err.h>
0013 #include <linux/export.h>
0014 #include <linux/init.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/io-pgtable.h>
0018 #include <linux/iommu.h>
0019 #include <linux/of.h>
0020 #include <linux/of_device.h>
0021 #include <linux/of_platform.h>
0022 #include <linux/platform_device.h>
0023 #include <linux/sizes.h>
0024 #include <linux/slab.h>
0025 #include <linux/sys_soc.h>
0026 
0027 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
0028 #include <asm/dma-iommu.h>
0029 #else
0030 #define arm_iommu_create_mapping(...)   NULL
0031 #define arm_iommu_attach_device(...)    -ENODEV
0032 #define arm_iommu_release_mapping(...)  do {} while (0)
0033 #define arm_iommu_detach_device(...)    do {} while (0)
0034 #endif
0035 
0036 #define IPMMU_CTX_MAX       16U
0037 #define IPMMU_CTX_INVALID   -1
0038 
0039 #define IPMMU_UTLB_MAX      64U
0040 
0041 struct ipmmu_features {
0042     bool use_ns_alias_offset;
0043     bool has_cache_leaf_nodes;
0044     unsigned int number_of_contexts;
0045     unsigned int num_utlbs;
0046     bool setup_imbuscr;
0047     bool twobit_imttbcr_sl0;
0048     bool reserved_context;
0049     bool cache_snoop;
0050     unsigned int ctx_offset_base;
0051     unsigned int ctx_offset_stride;
0052     unsigned int utlb_offset_base;
0053 };
0054 
0055 struct ipmmu_vmsa_device {
0056     struct device *dev;
0057     void __iomem *base;
0058     struct iommu_device iommu;
0059     struct ipmmu_vmsa_device *root;
0060     const struct ipmmu_features *features;
0061     unsigned int num_ctx;
0062     spinlock_t lock;            /* Protects ctx and domains[] */
0063     DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
0064     struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
0065     s8 utlb_ctx[IPMMU_UTLB_MAX];
0066 
0067     struct iommu_group *group;
0068     struct dma_iommu_mapping *mapping;
0069 };
0070 
0071 struct ipmmu_vmsa_domain {
0072     struct ipmmu_vmsa_device *mmu;
0073     struct iommu_domain io_domain;
0074 
0075     struct io_pgtable_cfg cfg;
0076     struct io_pgtable_ops *iop;
0077 
0078     unsigned int context_id;
0079     struct mutex mutex;         /* Protects mappings */
0080 };
0081 
0082 static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
0083 {
0084     return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
0085 }
0086 
0087 static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
0088 {
0089     return dev_iommu_priv_get(dev);
0090 }
0091 
0092 #define TLB_LOOP_TIMEOUT        100 /* 100us */
0093 
0094 /* -----------------------------------------------------------------------------
0095  * Registers Definition
0096  */
0097 
0098 #define IM_NS_ALIAS_OFFSET      0x800
0099 
0100 /* MMU "context" registers */
0101 #define IMCTR               0x0000      /* R-Car Gen2/3 */
0102 #define IMCTR_INTEN         (1 << 2)    /* R-Car Gen2/3 */
0103 #define IMCTR_FLUSH         (1 << 1)    /* R-Car Gen2/3 */
0104 #define IMCTR_MMUEN         (1 << 0)    /* R-Car Gen2/3 */
0105 
0106 #define IMTTBCR             0x0008      /* R-Car Gen2/3 */
0107 #define IMTTBCR_EAE         (1 << 31)   /* R-Car Gen2/3 */
0108 #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12)   /* R-Car Gen2 only */
0109 #define IMTTBCR_ORGN0_WB_WA     (1 << 10)   /* R-Car Gen2 only */
0110 #define IMTTBCR_IRGN0_WB_WA     (1 << 8)    /* R-Car Gen2 only */
0111 #define IMTTBCR_SL0_TWOBIT_LVL_1    (2 << 6)    /* R-Car Gen3 only */
0112 #define IMTTBCR_SL0_LVL_1       (1 << 4)    /* R-Car Gen2 only */
0113 
0114 #define IMBUSCR             0x000c      /* R-Car Gen2 only */
0115 #define IMBUSCR_DVM         (1 << 2)    /* R-Car Gen2 only */
0116 #define IMBUSCR_BUSSEL_MASK     (3 << 0)    /* R-Car Gen2 only */
0117 
0118 #define IMTTLBR0            0x0010      /* R-Car Gen2/3 */
0119 #define IMTTUBR0            0x0014      /* R-Car Gen2/3 */
0120 
0121 #define IMSTR               0x0020      /* R-Car Gen2/3 */
0122 #define IMSTR_MHIT          (1 << 4)    /* R-Car Gen2/3 */
0123 #define IMSTR_ABORT         (1 << 2)    /* R-Car Gen2/3 */
0124 #define IMSTR_PF            (1 << 1)    /* R-Car Gen2/3 */
0125 #define IMSTR_TF            (1 << 0)    /* R-Car Gen2/3 */
0126 
0127 #define IMMAIR0             0x0028      /* R-Car Gen2/3 */
0128 
0129 #define IMELAR              0x0030      /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
0130 #define IMEUAR              0x0034      /* R-Car Gen3 only */
0131 
0132 /* uTLB registers */
0133 #define IMUCTR(n)           ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
0134 #define IMUCTR0(n)          (0x0300 + ((n) * 16))       /* R-Car Gen2/3 */
0135 #define IMUCTR32(n)         (0x0600 + (((n) - 32) * 16))    /* R-Car Gen3 only */
0136 #define IMUCTR_TTSEL_MMU(n)     ((n) << 4)  /* R-Car Gen2/3 */
0137 #define IMUCTR_FLUSH            (1 << 1)    /* R-Car Gen2/3 */
0138 #define IMUCTR_MMUEN            (1 << 0)    /* R-Car Gen2/3 */
0139 
0140 #define IMUASID(n)          ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
0141 #define IMUASID0(n)         (0x0308 + ((n) * 16))       /* R-Car Gen2/3 */
0142 #define IMUASID32(n)            (0x0608 + (((n) - 32) * 16))    /* R-Car Gen3 only */
0143 
0144 /* -----------------------------------------------------------------------------
0145  * Root device handling
0146  */
0147 
0148 static struct platform_driver ipmmu_driver;
0149 
0150 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
0151 {
0152     return mmu->root == mmu;
0153 }
0154 
0155 static int __ipmmu_check_device(struct device *dev, void *data)
0156 {
0157     struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
0158     struct ipmmu_vmsa_device **rootp = data;
0159 
0160     if (ipmmu_is_root(mmu))
0161         *rootp = mmu;
0162 
0163     return 0;
0164 }
0165 
0166 static struct ipmmu_vmsa_device *ipmmu_find_root(void)
0167 {
0168     struct ipmmu_vmsa_device *root = NULL;
0169 
0170     return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
0171                       __ipmmu_check_device) == 0 ? root : NULL;
0172 }
0173 
0174 /* -----------------------------------------------------------------------------
0175  * Read/Write Access
0176  */
0177 
0178 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
0179 {
0180     return ioread32(mmu->base + offset);
0181 }
0182 
0183 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
0184             u32 data)
0185 {
0186     iowrite32(data, mmu->base + offset);
0187 }
0188 
0189 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
0190                   unsigned int context_id, unsigned int reg)
0191 {
0192     unsigned int base = mmu->features->ctx_offset_base;
0193 
0194     if (context_id > 7)
0195         base += 0x800 - 8 * 0x40;
0196 
0197     return base + context_id * mmu->features->ctx_offset_stride + reg;
0198 }
0199 
0200 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
0201               unsigned int context_id, unsigned int reg)
0202 {
0203     return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
0204 }
0205 
0206 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
0207                 unsigned int context_id, unsigned int reg, u32 data)
0208 {
0209     ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
0210 }
0211 
0212 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
0213                    unsigned int reg)
0214 {
0215     return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
0216 }
0217 
0218 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
0219                  unsigned int reg, u32 data)
0220 {
0221     ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
0222 }
0223 
0224 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
0225                 unsigned int reg, u32 data)
0226 {
0227     if (domain->mmu != domain->mmu->root)
0228         ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
0229 
0230     ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
0231 }
0232 
0233 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
0234 {
0235     return mmu->features->utlb_offset_base + reg;
0236 }
0237 
0238 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
0239                 unsigned int utlb, u32 data)
0240 {
0241     ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
0242 }
0243 
0244 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
0245                    unsigned int utlb, u32 data)
0246 {
0247     ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
0248 }
0249 
0250 /* -----------------------------------------------------------------------------
0251  * TLB and microTLB Management
0252  */
0253 
0254 /* Wait for any pending TLB invalidations to complete */
0255 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
0256 {
0257     unsigned int count = 0;
0258 
0259     while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
0260         cpu_relax();
0261         if (++count == TLB_LOOP_TIMEOUT) {
0262             dev_err_ratelimited(domain->mmu->dev,
0263             "TLB sync timed out -- MMU may be deadlocked\n");
0264             return;
0265         }
0266         udelay(1);
0267     }
0268 }
0269 
0270 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
0271 {
0272     u32 reg;
0273 
0274     reg = ipmmu_ctx_read_root(domain, IMCTR);
0275     reg |= IMCTR_FLUSH;
0276     ipmmu_ctx_write_all(domain, IMCTR, reg);
0277 
0278     ipmmu_tlb_sync(domain);
0279 }
0280 
0281 /*
0282  * Enable MMU translation for the microTLB.
0283  */
0284 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
0285                   unsigned int utlb)
0286 {
0287     struct ipmmu_vmsa_device *mmu = domain->mmu;
0288 
0289     /*
0290      * TODO: Reference-count the microTLB as several bus masters can be
0291      * connected to the same microTLB.
0292      */
0293 
0294     /* TODO: What should we set the ASID to ? */
0295     ipmmu_imuasid_write(mmu, utlb, 0);
0296     /* TODO: Do we need to flush the microTLB ? */
0297     ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
0298                       IMUCTR_FLUSH | IMUCTR_MMUEN);
0299     mmu->utlb_ctx[utlb] = domain->context_id;
0300 }
0301 
0302 /*
0303  * Disable MMU translation for the microTLB.
0304  */
0305 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
0306                    unsigned int utlb)
0307 {
0308     struct ipmmu_vmsa_device *mmu = domain->mmu;
0309 
0310     ipmmu_imuctr_write(mmu, utlb, 0);
0311     mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
0312 }
0313 
0314 static void ipmmu_tlb_flush_all(void *cookie)
0315 {
0316     struct ipmmu_vmsa_domain *domain = cookie;
0317 
0318     ipmmu_tlb_invalidate(domain);
0319 }
0320 
0321 static void ipmmu_tlb_flush(unsigned long iova, size_t size,
0322                 size_t granule, void *cookie)
0323 {
0324     ipmmu_tlb_flush_all(cookie);
0325 }
0326 
0327 static const struct iommu_flush_ops ipmmu_flush_ops = {
0328     .tlb_flush_all = ipmmu_tlb_flush_all,
0329     .tlb_flush_walk = ipmmu_tlb_flush,
0330 };
0331 
0332 /* -----------------------------------------------------------------------------
0333  * Domain/Context Management
0334  */
0335 
0336 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
0337                      struct ipmmu_vmsa_domain *domain)
0338 {
0339     unsigned long flags;
0340     int ret;
0341 
0342     spin_lock_irqsave(&mmu->lock, flags);
0343 
0344     ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
0345     if (ret != mmu->num_ctx) {
0346         mmu->domains[ret] = domain;
0347         set_bit(ret, mmu->ctx);
0348     } else
0349         ret = -EBUSY;
0350 
0351     spin_unlock_irqrestore(&mmu->lock, flags);
0352 
0353     return ret;
0354 }
0355 
0356 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
0357                       unsigned int context_id)
0358 {
0359     unsigned long flags;
0360 
0361     spin_lock_irqsave(&mmu->lock, flags);
0362 
0363     clear_bit(context_id, mmu->ctx);
0364     mmu->domains[context_id] = NULL;
0365 
0366     spin_unlock_irqrestore(&mmu->lock, flags);
0367 }
0368 
0369 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
0370 {
0371     u64 ttbr;
0372     u32 tmp;
0373 
0374     /* TTBR0 */
0375     ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
0376     ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
0377     ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
0378 
0379     /*
0380      * TTBCR
0381      * We use long descriptors and allocate the whole 32-bit VA space to
0382      * TTBR0.
0383      */
0384     if (domain->mmu->features->twobit_imttbcr_sl0)
0385         tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
0386     else
0387         tmp = IMTTBCR_SL0_LVL_1;
0388 
0389     if (domain->mmu->features->cache_snoop)
0390         tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
0391                IMTTBCR_IRGN0_WB_WA;
0392 
0393     ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
0394 
0395     /* MAIR0 */
0396     ipmmu_ctx_write_root(domain, IMMAIR0,
0397                  domain->cfg.arm_lpae_s1_cfg.mair);
0398 
0399     /* IMBUSCR */
0400     if (domain->mmu->features->setup_imbuscr)
0401         ipmmu_ctx_write_root(domain, IMBUSCR,
0402                      ipmmu_ctx_read_root(domain, IMBUSCR) &
0403                      ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
0404 
0405     /*
0406      * IMSTR
0407      * Clear all interrupt flags.
0408      */
0409     ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
0410 
0411     /*
0412      * IMCTR
0413      * Enable the MMU and interrupt generation. The long-descriptor
0414      * translation table format doesn't use TEX remapping. Don't enable AF
0415      * software management as we have no use for it. Flush the TLB as
0416      * required when modifying the context registers.
0417      */
0418     ipmmu_ctx_write_all(domain, IMCTR,
0419                 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
0420 }
0421 
0422 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
0423 {
0424     int ret;
0425 
0426     /*
0427      * Allocate the page table operations.
0428      *
0429      * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
0430      * access, Long-descriptor format" that the NStable bit being set in a
0431      * table descriptor will result in the NStable and NS bits of all child
0432      * entries being ignored and considered as being set. The IPMMU seems
0433      * not to comply with this, as it generates a secure access page fault
0434      * if any of the NStable and NS bits isn't set when running in
0435      * non-secure mode.
0436      */
0437     domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
0438     domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
0439     domain->cfg.ias = 32;
0440     domain->cfg.oas = 40;
0441     domain->cfg.tlb = &ipmmu_flush_ops;
0442     domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
0443     domain->io_domain.geometry.force_aperture = true;
0444     /*
0445      * TODO: Add support for coherent walk through CCI with DVM and remove
0446      * cache handling. For now, delegate it to the io-pgtable code.
0447      */
0448     domain->cfg.coherent_walk = false;
0449     domain->cfg.iommu_dev = domain->mmu->root->dev;
0450 
0451     /*
0452      * Find an unused context.
0453      */
0454     ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
0455     if (ret < 0)
0456         return ret;
0457 
0458     domain->context_id = ret;
0459 
0460     domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
0461                        domain);
0462     if (!domain->iop) {
0463         ipmmu_domain_free_context(domain->mmu->root,
0464                       domain->context_id);
0465         return -EINVAL;
0466     }
0467 
0468     ipmmu_domain_setup_context(domain);
0469     return 0;
0470 }
0471 
0472 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
0473 {
0474     if (!domain->mmu)
0475         return;
0476 
0477     /*
0478      * Disable the context. Flush the TLB as required when modifying the
0479      * context registers.
0480      *
0481      * TODO: Is TLB flush really needed ?
0482      */
0483     ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
0484     ipmmu_tlb_sync(domain);
0485     ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
0486 }
0487 
0488 /* -----------------------------------------------------------------------------
0489  * Fault Handling
0490  */
0491 
0492 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
0493 {
0494     const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
0495     struct ipmmu_vmsa_device *mmu = domain->mmu;
0496     unsigned long iova;
0497     u32 status;
0498 
0499     status = ipmmu_ctx_read_root(domain, IMSTR);
0500     if (!(status & err_mask))
0501         return IRQ_NONE;
0502 
0503     iova = ipmmu_ctx_read_root(domain, IMELAR);
0504     if (IS_ENABLED(CONFIG_64BIT))
0505         iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
0506 
0507     /*
0508      * Clear the error status flags. Unlike traditional interrupt flag
0509      * registers that must be cleared by writing 1, this status register
0510      * seems to require 0. The error address register must be read before,
0511      * otherwise its value will be 0.
0512      */
0513     ipmmu_ctx_write_root(domain, IMSTR, 0);
0514 
0515     /* Log fatal errors. */
0516     if (status & IMSTR_MHIT)
0517         dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
0518                     iova);
0519     if (status & IMSTR_ABORT)
0520         dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
0521                     iova);
0522 
0523     if (!(status & (IMSTR_PF | IMSTR_TF)))
0524         return IRQ_NONE;
0525 
0526     /*
0527      * Try to handle page faults and translation faults.
0528      *
0529      * TODO: We need to look up the faulty device based on the I/O VA. Use
0530      * the IOMMU device for now.
0531      */
0532     if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
0533         return IRQ_HANDLED;
0534 
0535     dev_err_ratelimited(mmu->dev,
0536                 "Unhandled fault: status 0x%08x iova 0x%lx\n",
0537                 status, iova);
0538 
0539     return IRQ_HANDLED;
0540 }
0541 
0542 static irqreturn_t ipmmu_irq(int irq, void *dev)
0543 {
0544     struct ipmmu_vmsa_device *mmu = dev;
0545     irqreturn_t status = IRQ_NONE;
0546     unsigned int i;
0547     unsigned long flags;
0548 
0549     spin_lock_irqsave(&mmu->lock, flags);
0550 
0551     /*
0552      * Check interrupts for all active contexts.
0553      */
0554     for (i = 0; i < mmu->num_ctx; i++) {
0555         if (!mmu->domains[i])
0556             continue;
0557         if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
0558             status = IRQ_HANDLED;
0559     }
0560 
0561     spin_unlock_irqrestore(&mmu->lock, flags);
0562 
0563     return status;
0564 }
0565 
0566 /* -----------------------------------------------------------------------------
0567  * IOMMU Operations
0568  */
0569 
0570 static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
0571 {
0572     struct ipmmu_vmsa_domain *domain;
0573 
0574     if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
0575         return NULL;
0576 
0577     domain = kzalloc(sizeof(*domain), GFP_KERNEL);
0578     if (!domain)
0579         return NULL;
0580 
0581     mutex_init(&domain->mutex);
0582 
0583     return &domain->io_domain;
0584 }
0585 
0586 static void ipmmu_domain_free(struct iommu_domain *io_domain)
0587 {
0588     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0589 
0590     /*
0591      * Free the domain resources. We assume that all devices have already
0592      * been detached.
0593      */
0594     ipmmu_domain_destroy_context(domain);
0595     free_io_pgtable_ops(domain->iop);
0596     kfree(domain);
0597 }
0598 
0599 static int ipmmu_attach_device(struct iommu_domain *io_domain,
0600                    struct device *dev)
0601 {
0602     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0603     struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
0604     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0605     unsigned int i;
0606     int ret = 0;
0607 
0608     if (!mmu) {
0609         dev_err(dev, "Cannot attach to IPMMU\n");
0610         return -ENXIO;
0611     }
0612 
0613     mutex_lock(&domain->mutex);
0614 
0615     if (!domain->mmu) {
0616         /* The domain hasn't been used yet, initialize it. */
0617         domain->mmu = mmu;
0618         ret = ipmmu_domain_init_context(domain);
0619         if (ret < 0) {
0620             dev_err(dev, "Unable to initialize IPMMU context\n");
0621             domain->mmu = NULL;
0622         } else {
0623             dev_info(dev, "Using IPMMU context %u\n",
0624                  domain->context_id);
0625         }
0626     } else if (domain->mmu != mmu) {
0627         /*
0628          * Something is wrong, we can't attach two devices using
0629          * different IOMMUs to the same domain.
0630          */
0631         dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
0632             dev_name(mmu->dev), dev_name(domain->mmu->dev));
0633         ret = -EINVAL;
0634     } else
0635         dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
0636 
0637     mutex_unlock(&domain->mutex);
0638 
0639     if (ret < 0)
0640         return ret;
0641 
0642     for (i = 0; i < fwspec->num_ids; ++i)
0643         ipmmu_utlb_enable(domain, fwspec->ids[i]);
0644 
0645     return 0;
0646 }
0647 
0648 static void ipmmu_detach_device(struct iommu_domain *io_domain,
0649                 struct device *dev)
0650 {
0651     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0652     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0653     unsigned int i;
0654 
0655     for (i = 0; i < fwspec->num_ids; ++i)
0656         ipmmu_utlb_disable(domain, fwspec->ids[i]);
0657 
0658     /*
0659      * TODO: Optimize by disabling the context when no device is attached.
0660      */
0661 }
0662 
0663 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
0664              phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0665 {
0666     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0667 
0668     if (!domain)
0669         return -ENODEV;
0670 
0671     return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
0672 }
0673 
0674 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
0675               size_t size, struct iommu_iotlb_gather *gather)
0676 {
0677     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0678 
0679     return domain->iop->unmap(domain->iop, iova, size, gather);
0680 }
0681 
0682 static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
0683 {
0684     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0685 
0686     if (domain->mmu)
0687         ipmmu_tlb_flush_all(domain);
0688 }
0689 
0690 static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
0691                  struct iommu_iotlb_gather *gather)
0692 {
0693     ipmmu_flush_iotlb_all(io_domain);
0694 }
0695 
0696 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
0697                       dma_addr_t iova)
0698 {
0699     struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
0700 
0701     /* TODO: Is locking needed ? */
0702 
0703     return domain->iop->iova_to_phys(domain->iop, iova);
0704 }
0705 
0706 static int ipmmu_init_platform_device(struct device *dev,
0707                       struct of_phandle_args *args)
0708 {
0709     struct platform_device *ipmmu_pdev;
0710 
0711     ipmmu_pdev = of_find_device_by_node(args->np);
0712     if (!ipmmu_pdev)
0713         return -ENODEV;
0714 
0715     dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
0716 
0717     return 0;
0718 }
0719 
0720 static const struct soc_device_attribute soc_needs_opt_in[] = {
0721     { .family = "R-Car Gen3", },
0722     { .family = "R-Car Gen4", },
0723     { .family = "RZ/G2", },
0724     { /* sentinel */ }
0725 };
0726 
0727 static const struct soc_device_attribute soc_denylist[] = {
0728     { .soc_id = "r8a774a1", },
0729     { .soc_id = "r8a7795", .revision = "ES1.*" },
0730     { .soc_id = "r8a7795", .revision = "ES2.*" },
0731     { .soc_id = "r8a7796", },
0732     { /* sentinel */ }
0733 };
0734 
0735 static const char * const devices_allowlist[] = {
0736     "ee100000.mmc",
0737     "ee120000.mmc",
0738     "ee140000.mmc",
0739     "ee160000.mmc"
0740 };
0741 
0742 static bool ipmmu_device_is_allowed(struct device *dev)
0743 {
0744     unsigned int i;
0745 
0746     /*
0747      * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
0748      * For Other SoCs, this returns true anyway.
0749      */
0750     if (!soc_device_match(soc_needs_opt_in))
0751         return true;
0752 
0753     /* Check whether this SoC can use the IPMMU correctly or not */
0754     if (soc_device_match(soc_denylist))
0755         return false;
0756 
0757     /* Check whether this device can work with the IPMMU */
0758     for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
0759         if (!strcmp(dev_name(dev), devices_allowlist[i]))
0760             return true;
0761     }
0762 
0763     /* Otherwise, do not allow use of IPMMU */
0764     return false;
0765 }
0766 
0767 static int ipmmu_of_xlate(struct device *dev,
0768               struct of_phandle_args *spec)
0769 {
0770     if (!ipmmu_device_is_allowed(dev))
0771         return -ENODEV;
0772 
0773     iommu_fwspec_add_ids(dev, spec->args, 1);
0774 
0775     /* Initialize once - xlate() will call multiple times */
0776     if (to_ipmmu(dev))
0777         return 0;
0778 
0779     return ipmmu_init_platform_device(dev, spec);
0780 }
0781 
0782 static int ipmmu_init_arm_mapping(struct device *dev)
0783 {
0784     struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
0785     int ret;
0786 
0787     /*
0788      * Create the ARM mapping, used by the ARM DMA mapping core to allocate
0789      * VAs. This will allocate a corresponding IOMMU domain.
0790      *
0791      * TODO:
0792      * - Create one mapping per context (TLB).
0793      * - Make the mapping size configurable ? We currently use a 2GB mapping
0794      *   at a 1GB offset to ensure that NULL VAs will fault.
0795      */
0796     if (!mmu->mapping) {
0797         struct dma_iommu_mapping *mapping;
0798 
0799         mapping = arm_iommu_create_mapping(&platform_bus_type,
0800                            SZ_1G, SZ_2G);
0801         if (IS_ERR(mapping)) {
0802             dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
0803             ret = PTR_ERR(mapping);
0804             goto error;
0805         }
0806 
0807         mmu->mapping = mapping;
0808     }
0809 
0810     /* Attach the ARM VA mapping to the device. */
0811     ret = arm_iommu_attach_device(dev, mmu->mapping);
0812     if (ret < 0) {
0813         dev_err(dev, "Failed to attach device to VA mapping\n");
0814         goto error;
0815     }
0816 
0817     return 0;
0818 
0819 error:
0820     if (mmu->mapping)
0821         arm_iommu_release_mapping(mmu->mapping);
0822 
0823     return ret;
0824 }
0825 
0826 static struct iommu_device *ipmmu_probe_device(struct device *dev)
0827 {
0828     struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
0829 
0830     /*
0831      * Only let through devices that have been verified in xlate()
0832      */
0833     if (!mmu)
0834         return ERR_PTR(-ENODEV);
0835 
0836     return &mmu->iommu;
0837 }
0838 
0839 static void ipmmu_probe_finalize(struct device *dev)
0840 {
0841     int ret = 0;
0842 
0843     if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
0844         ret = ipmmu_init_arm_mapping(dev);
0845 
0846     if (ret)
0847         dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
0848 }
0849 
0850 static void ipmmu_release_device(struct device *dev)
0851 {
0852     arm_iommu_detach_device(dev);
0853 }
0854 
0855 static struct iommu_group *ipmmu_find_group(struct device *dev)
0856 {
0857     struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
0858     struct iommu_group *group;
0859 
0860     if (mmu->group)
0861         return iommu_group_ref_get(mmu->group);
0862 
0863     group = iommu_group_alloc();
0864     if (!IS_ERR(group))
0865         mmu->group = group;
0866 
0867     return group;
0868 }
0869 
0870 static const struct iommu_ops ipmmu_ops = {
0871     .domain_alloc = ipmmu_domain_alloc,
0872     .probe_device = ipmmu_probe_device,
0873     .release_device = ipmmu_release_device,
0874     .probe_finalize = ipmmu_probe_finalize,
0875     .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
0876             ? generic_device_group : ipmmu_find_group,
0877     .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
0878     .of_xlate = ipmmu_of_xlate,
0879     .default_domain_ops = &(const struct iommu_domain_ops) {
0880         .attach_dev = ipmmu_attach_device,
0881         .detach_dev = ipmmu_detach_device,
0882         .map        = ipmmu_map,
0883         .unmap      = ipmmu_unmap,
0884         .flush_iotlb_all = ipmmu_flush_iotlb_all,
0885         .iotlb_sync = ipmmu_iotlb_sync,
0886         .iova_to_phys   = ipmmu_iova_to_phys,
0887         .free       = ipmmu_domain_free,
0888     }
0889 };
0890 
0891 /* -----------------------------------------------------------------------------
0892  * Probe/remove and init
0893  */
0894 
0895 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
0896 {
0897     unsigned int i;
0898 
0899     /* Disable all contexts. */
0900     for (i = 0; i < mmu->num_ctx; ++i)
0901         ipmmu_ctx_write(mmu, i, IMCTR, 0);
0902 }
0903 
0904 static const struct ipmmu_features ipmmu_features_default = {
0905     .use_ns_alias_offset = true,
0906     .has_cache_leaf_nodes = false,
0907     .number_of_contexts = 1, /* software only tested with one context */
0908     .num_utlbs = 32,
0909     .setup_imbuscr = true,
0910     .twobit_imttbcr_sl0 = false,
0911     .reserved_context = false,
0912     .cache_snoop = true,
0913     .ctx_offset_base = 0,
0914     .ctx_offset_stride = 0x40,
0915     .utlb_offset_base = 0,
0916 };
0917 
0918 static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
0919     .use_ns_alias_offset = false,
0920     .has_cache_leaf_nodes = true,
0921     .number_of_contexts = 8,
0922     .num_utlbs = 48,
0923     .setup_imbuscr = false,
0924     .twobit_imttbcr_sl0 = true,
0925     .reserved_context = true,
0926     .cache_snoop = false,
0927     .ctx_offset_base = 0,
0928     .ctx_offset_stride = 0x40,
0929     .utlb_offset_base = 0,
0930 };
0931 
0932 static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
0933     .use_ns_alias_offset = false,
0934     .has_cache_leaf_nodes = true,
0935     .number_of_contexts = 16,
0936     .num_utlbs = 64,
0937     .setup_imbuscr = false,
0938     .twobit_imttbcr_sl0 = true,
0939     .reserved_context = true,
0940     .cache_snoop = false,
0941     .ctx_offset_base = 0x10000,
0942     .ctx_offset_stride = 0x1040,
0943     .utlb_offset_base = 0x3000,
0944 };
0945 
0946 static const struct of_device_id ipmmu_of_ids[] = {
0947     {
0948         .compatible = "renesas,ipmmu-vmsa",
0949         .data = &ipmmu_features_default,
0950     }, {
0951         .compatible = "renesas,ipmmu-r8a774a1",
0952         .data = &ipmmu_features_rcar_gen3,
0953     }, {
0954         .compatible = "renesas,ipmmu-r8a774b1",
0955         .data = &ipmmu_features_rcar_gen3,
0956     }, {
0957         .compatible = "renesas,ipmmu-r8a774c0",
0958         .data = &ipmmu_features_rcar_gen3,
0959     }, {
0960         .compatible = "renesas,ipmmu-r8a774e1",
0961         .data = &ipmmu_features_rcar_gen3,
0962     }, {
0963         .compatible = "renesas,ipmmu-r8a7795",
0964         .data = &ipmmu_features_rcar_gen3,
0965     }, {
0966         .compatible = "renesas,ipmmu-r8a7796",
0967         .data = &ipmmu_features_rcar_gen3,
0968     }, {
0969         .compatible = "renesas,ipmmu-r8a77961",
0970         .data = &ipmmu_features_rcar_gen3,
0971     }, {
0972         .compatible = "renesas,ipmmu-r8a77965",
0973         .data = &ipmmu_features_rcar_gen3,
0974     }, {
0975         .compatible = "renesas,ipmmu-r8a77970",
0976         .data = &ipmmu_features_rcar_gen3,
0977     }, {
0978         .compatible = "renesas,ipmmu-r8a77980",
0979         .data = &ipmmu_features_rcar_gen3,
0980     }, {
0981         .compatible = "renesas,ipmmu-r8a77990",
0982         .data = &ipmmu_features_rcar_gen3,
0983     }, {
0984         .compatible = "renesas,ipmmu-r8a77995",
0985         .data = &ipmmu_features_rcar_gen3,
0986     }, {
0987         .compatible = "renesas,ipmmu-r8a779a0",
0988         .data = &ipmmu_features_rcar_gen4,
0989     }, {
0990         .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
0991         .data = &ipmmu_features_rcar_gen4,
0992     }, {
0993         /* Terminator */
0994     },
0995 };
0996 
0997 static int ipmmu_probe(struct platform_device *pdev)
0998 {
0999     struct ipmmu_vmsa_device *mmu;
1000     struct resource *res;
1001     int irq;
1002     int ret;
1003 
1004     mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1005     if (!mmu) {
1006         dev_err(&pdev->dev, "cannot allocate device data\n");
1007         return -ENOMEM;
1008     }
1009 
1010     mmu->dev = &pdev->dev;
1011     spin_lock_init(&mmu->lock);
1012     bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1013     mmu->features = of_device_get_match_data(&pdev->dev);
1014     memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1015     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1016     if (ret)
1017         return ret;
1018 
1019     /* Map I/O memory and request IRQ. */
1020     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1021     mmu->base = devm_ioremap_resource(&pdev->dev, res);
1022     if (IS_ERR(mmu->base))
1023         return PTR_ERR(mmu->base);
1024 
1025     /*
1026      * The IPMMU has two register banks, for secure and non-secure modes.
1027      * The bank mapped at the beginning of the IPMMU address space
1028      * corresponds to the running mode of the CPU. When running in secure
1029      * mode the non-secure register bank is also available at an offset.
1030      *
1031      * Secure mode operation isn't clearly documented and is thus currently
1032      * not implemented in the driver. Furthermore, preliminary tests of
1033      * non-secure operation with the main register bank were not successful.
1034      * Offset the registers base unconditionally to point to the non-secure
1035      * alias space for now.
1036      */
1037     if (mmu->features->use_ns_alias_offset)
1038         mmu->base += IM_NS_ALIAS_OFFSET;
1039 
1040     mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1041 
1042     /*
1043      * Determine if this IPMMU instance is a root device by checking for
1044      * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1045      */
1046     if (!mmu->features->has_cache_leaf_nodes ||
1047         !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1048         mmu->root = mmu;
1049     else
1050         mmu->root = ipmmu_find_root();
1051 
1052     /*
1053      * Wait until the root device has been registered for sure.
1054      */
1055     if (!mmu->root)
1056         return -EPROBE_DEFER;
1057 
1058     /* Root devices have mandatory IRQs */
1059     if (ipmmu_is_root(mmu)) {
1060         irq = platform_get_irq(pdev, 0);
1061         if (irq < 0)
1062             return irq;
1063 
1064         ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1065                        dev_name(&pdev->dev), mmu);
1066         if (ret < 0) {
1067             dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1068             return ret;
1069         }
1070 
1071         ipmmu_device_reset(mmu);
1072 
1073         if (mmu->features->reserved_context) {
1074             dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1075             set_bit(0, mmu->ctx);
1076         }
1077     }
1078 
1079     /*
1080      * Register the IPMMU to the IOMMU subsystem in the following cases:
1081      * - R-Car Gen2 IPMMU (all devices registered)
1082      * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1083      */
1084     if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1085         ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1086                          dev_name(&pdev->dev));
1087         if (ret)
1088             return ret;
1089 
1090         ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
1091         if (ret)
1092             return ret;
1093 
1094 #if defined(CONFIG_IOMMU_DMA)
1095         if (!iommu_present(&platform_bus_type))
1096             bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1097 #endif
1098     }
1099 
1100     /*
1101      * We can't create the ARM mapping here as it requires the bus to have
1102      * an IOMMU, which only happens when bus_set_iommu() is called in
1103      * ipmmu_init() after the probe function returns.
1104      */
1105 
1106     platform_set_drvdata(pdev, mmu);
1107 
1108     return 0;
1109 }
1110 
1111 static int ipmmu_remove(struct platform_device *pdev)
1112 {
1113     struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1114 
1115     iommu_device_sysfs_remove(&mmu->iommu);
1116     iommu_device_unregister(&mmu->iommu);
1117 
1118     arm_iommu_release_mapping(mmu->mapping);
1119 
1120     ipmmu_device_reset(mmu);
1121 
1122     return 0;
1123 }
1124 
1125 #ifdef CONFIG_PM_SLEEP
1126 static int ipmmu_resume_noirq(struct device *dev)
1127 {
1128     struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1129     unsigned int i;
1130 
1131     /* Reset root MMU and restore contexts */
1132     if (ipmmu_is_root(mmu)) {
1133         ipmmu_device_reset(mmu);
1134 
1135         for (i = 0; i < mmu->num_ctx; i++) {
1136             if (!mmu->domains[i])
1137                 continue;
1138 
1139             ipmmu_domain_setup_context(mmu->domains[i]);
1140         }
1141     }
1142 
1143     /* Re-enable active micro-TLBs */
1144     for (i = 0; i < mmu->features->num_utlbs; i++) {
1145         if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1146             continue;
1147 
1148         ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1149     }
1150 
1151     return 0;
1152 }
1153 
1154 static const struct dev_pm_ops ipmmu_pm  = {
1155     SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1156 };
1157 #define DEV_PM_OPS  &ipmmu_pm
1158 #else
1159 #define DEV_PM_OPS  NULL
1160 #endif /* CONFIG_PM_SLEEP */
1161 
1162 static struct platform_driver ipmmu_driver = {
1163     .driver = {
1164         .name = "ipmmu-vmsa",
1165         .of_match_table = of_match_ptr(ipmmu_of_ids),
1166         .pm = DEV_PM_OPS,
1167     },
1168     .probe = ipmmu_probe,
1169     .remove = ipmmu_remove,
1170 };
1171 
1172 static int __init ipmmu_init(void)
1173 {
1174     struct device_node *np;
1175     static bool setup_done;
1176     int ret;
1177 
1178     if (setup_done)
1179         return 0;
1180 
1181     np = of_find_matching_node(NULL, ipmmu_of_ids);
1182     if (!np)
1183         return 0;
1184 
1185     of_node_put(np);
1186 
1187     ret = platform_driver_register(&ipmmu_driver);
1188     if (ret < 0)
1189         return ret;
1190 
1191 #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1192     if (!iommu_present(&platform_bus_type))
1193         bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1194 #endif
1195 
1196     setup_done = true;
1197     return 0;
1198 }
1199 subsys_initcall(ipmmu_init);