Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * IOMMU API for QCOM secure IOMMUs.  Somewhat based on arm-smmu.c
0004  *
0005  * Copyright (C) 2013 ARM Limited
0006  * Copyright (C) 2017 Red Hat
0007  */
0008 
0009 #include <linux/atomic.h>
0010 #include <linux/bitfield.h>
0011 #include <linux/clk.h>
0012 #include <linux/delay.h>
0013 #include <linux/dma-mapping.h>
0014 #include <linux/err.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/io.h>
0017 #include <linux/io-64-nonatomic-hi-lo.h>
0018 #include <linux/io-pgtable.h>
0019 #include <linux/iommu.h>
0020 #include <linux/iopoll.h>
0021 #include <linux/kconfig.h>
0022 #include <linux/init.h>
0023 #include <linux/mutex.h>
0024 #include <linux/of.h>
0025 #include <linux/of_address.h>
0026 #include <linux/of_device.h>
0027 #include <linux/platform_device.h>
0028 #include <linux/pm.h>
0029 #include <linux/pm_runtime.h>
0030 #include <linux/qcom_scm.h>
0031 #include <linux/slab.h>
0032 #include <linux/spinlock.h>
0033 
0034 #include "arm-smmu.h"
0035 
0036 #define SMMU_INTR_SEL_NS     0x2000
0037 
0038 enum qcom_iommu_clk {
0039     CLK_IFACE,
0040     CLK_BUS,
0041     CLK_TBU,
0042     CLK_NUM,
0043 };
0044 
0045 struct qcom_iommu_ctx;
0046 
0047 struct qcom_iommu_dev {
0048     /* IOMMU core code handle */
0049     struct iommu_device  iommu;
0050     struct device       *dev;
0051     struct clk_bulk_data clks[CLK_NUM];
0052     void __iomem        *local_base;
0053     u32          sec_id;
0054     u8           num_ctxs;
0055     struct qcom_iommu_ctx   *ctxs[];   /* indexed by asid-1 */
0056 };
0057 
0058 struct qcom_iommu_ctx {
0059     struct device       *dev;
0060     void __iomem        *base;
0061     bool             secure_init;
0062     u8           asid;      /* asid and ctx bank # are 1:1 */
0063     struct iommu_domain *domain;
0064 };
0065 
0066 struct qcom_iommu_domain {
0067     struct io_pgtable_ops   *pgtbl_ops;
0068     spinlock_t       pgtbl_lock;
0069     struct mutex         init_mutex; /* Protects iommu pointer */
0070     struct iommu_domain  domain;
0071     struct qcom_iommu_dev   *iommu;
0072     struct iommu_fwspec *fwspec;
0073 };
0074 
0075 static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
0076 {
0077     return container_of(dom, struct qcom_iommu_domain, domain);
0078 }
0079 
0080 static const struct iommu_ops qcom_iommu_ops;
0081 
0082 static struct qcom_iommu_dev * to_iommu(struct device *dev)
0083 {
0084     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0085 
0086     if (!fwspec || fwspec->ops != &qcom_iommu_ops)
0087         return NULL;
0088 
0089     return dev_iommu_priv_get(dev);
0090 }
0091 
0092 static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
0093 {
0094     struct qcom_iommu_dev *qcom_iommu = d->iommu;
0095     if (!qcom_iommu)
0096         return NULL;
0097     return qcom_iommu->ctxs[asid - 1];
0098 }
0099 
0100 static inline void
0101 iommu_writel(struct qcom_iommu_ctx *ctx, unsigned reg, u32 val)
0102 {
0103     writel_relaxed(val, ctx->base + reg);
0104 }
0105 
0106 static inline void
0107 iommu_writeq(struct qcom_iommu_ctx *ctx, unsigned reg, u64 val)
0108 {
0109     writeq_relaxed(val, ctx->base + reg);
0110 }
0111 
0112 static inline u32
0113 iommu_readl(struct qcom_iommu_ctx *ctx, unsigned reg)
0114 {
0115     return readl_relaxed(ctx->base + reg);
0116 }
0117 
0118 static inline u64
0119 iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
0120 {
0121     return readq_relaxed(ctx->base + reg);
0122 }
0123 
0124 static void qcom_iommu_tlb_sync(void *cookie)
0125 {
0126     struct qcom_iommu_domain *qcom_domain = cookie;
0127     struct iommu_fwspec *fwspec = qcom_domain->fwspec;
0128     unsigned i;
0129 
0130     for (i = 0; i < fwspec->num_ids; i++) {
0131         struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
0132         unsigned int val, ret;
0133 
0134         iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
0135 
0136         ret = readl_poll_timeout(ctx->base + ARM_SMMU_CB_TLBSTATUS, val,
0137                      (val & 0x1) == 0, 0, 5000000);
0138         if (ret)
0139             dev_err(ctx->dev, "timeout waiting for TLB SYNC\n");
0140     }
0141 }
0142 
0143 static void qcom_iommu_tlb_inv_context(void *cookie)
0144 {
0145     struct qcom_iommu_domain *qcom_domain = cookie;
0146     struct iommu_fwspec *fwspec = qcom_domain->fwspec;
0147     unsigned i;
0148 
0149     for (i = 0; i < fwspec->num_ids; i++) {
0150         struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
0151         iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
0152     }
0153 
0154     qcom_iommu_tlb_sync(cookie);
0155 }
0156 
0157 static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
0158                         size_t granule, bool leaf, void *cookie)
0159 {
0160     struct qcom_iommu_domain *qcom_domain = cookie;
0161     struct iommu_fwspec *fwspec = qcom_domain->fwspec;
0162     unsigned i, reg;
0163 
0164     reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
0165 
0166     for (i = 0; i < fwspec->num_ids; i++) {
0167         struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
0168         size_t s = size;
0169 
0170         iova = (iova >> 12) << 12;
0171         iova |= ctx->asid;
0172         do {
0173             iommu_writel(ctx, reg, iova);
0174             iova += granule;
0175         } while (s -= granule);
0176     }
0177 }
0178 
0179 static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
0180                       size_t granule, void *cookie)
0181 {
0182     qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
0183     qcom_iommu_tlb_sync(cookie);
0184 }
0185 
0186 static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
0187                     unsigned long iova, size_t granule,
0188                     void *cookie)
0189 {
0190     qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
0191 }
0192 
0193 static const struct iommu_flush_ops qcom_flush_ops = {
0194     .tlb_flush_all  = qcom_iommu_tlb_inv_context,
0195     .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
0196     .tlb_add_page   = qcom_iommu_tlb_add_page,
0197 };
0198 
0199 static irqreturn_t qcom_iommu_fault(int irq, void *dev)
0200 {
0201     struct qcom_iommu_ctx *ctx = dev;
0202     u32 fsr, fsynr;
0203     u64 iova;
0204 
0205     fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
0206 
0207     if (!(fsr & ARM_SMMU_FSR_FAULT))
0208         return IRQ_NONE;
0209 
0210     fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
0211     iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
0212 
0213     if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
0214         dev_err_ratelimited(ctx->dev,
0215                     "Unhandled context fault: fsr=0x%x, "
0216                     "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
0217                     fsr, iova, fsynr, ctx->asid);
0218     }
0219 
0220     iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
0221     iommu_writel(ctx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
0222 
0223     return IRQ_HANDLED;
0224 }
0225 
0226 static int qcom_iommu_init_domain(struct iommu_domain *domain,
0227                   struct qcom_iommu_dev *qcom_iommu,
0228                   struct device *dev)
0229 {
0230     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0231     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0232     struct io_pgtable_ops *pgtbl_ops;
0233     struct io_pgtable_cfg pgtbl_cfg;
0234     int i, ret = 0;
0235     u32 reg;
0236 
0237     mutex_lock(&qcom_domain->init_mutex);
0238     if (qcom_domain->iommu)
0239         goto out_unlock;
0240 
0241     pgtbl_cfg = (struct io_pgtable_cfg) {
0242         .pgsize_bitmap  = qcom_iommu_ops.pgsize_bitmap,
0243         .ias        = 32,
0244         .oas        = 40,
0245         .tlb        = &qcom_flush_ops,
0246         .iommu_dev  = qcom_iommu->dev,
0247     };
0248 
0249     qcom_domain->iommu = qcom_iommu;
0250     qcom_domain->fwspec = fwspec;
0251 
0252     pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
0253     if (!pgtbl_ops) {
0254         dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
0255         ret = -ENOMEM;
0256         goto out_clear_iommu;
0257     }
0258 
0259     /* Update the domain's page sizes to reflect the page table format */
0260     domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
0261     domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1;
0262     domain->geometry.force_aperture = true;
0263 
0264     for (i = 0; i < fwspec->num_ids; i++) {
0265         struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
0266 
0267         if (!ctx->secure_init) {
0268             ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
0269             if (ret) {
0270                 dev_err(qcom_iommu->dev, "secure init failed: %d\n", ret);
0271                 goto out_clear_iommu;
0272             }
0273             ctx->secure_init = true;
0274         }
0275 
0276         /* TTBRs */
0277         iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
0278                 pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
0279                 FIELD_PREP(ARM_SMMU_TTBRn_ASID, ctx->asid));
0280         iommu_writeq(ctx, ARM_SMMU_CB_TTBR1, 0);
0281 
0282         /* TCR */
0283         iommu_writel(ctx, ARM_SMMU_CB_TCR2,
0284                 arm_smmu_lpae_tcr2(&pgtbl_cfg));
0285         iommu_writel(ctx, ARM_SMMU_CB_TCR,
0286                  arm_smmu_lpae_tcr(&pgtbl_cfg) | ARM_SMMU_TCR_EAE);
0287 
0288         /* MAIRs (stage-1 only) */
0289         iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR0,
0290                 pgtbl_cfg.arm_lpae_s1_cfg.mair);
0291         iommu_writel(ctx, ARM_SMMU_CB_S1_MAIR1,
0292                 pgtbl_cfg.arm_lpae_s1_cfg.mair >> 32);
0293 
0294         /* SCTLR */
0295         reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE |
0296               ARM_SMMU_SCTLR_AFE | ARM_SMMU_SCTLR_TRE |
0297               ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE |
0298               ARM_SMMU_SCTLR_CFCFG;
0299 
0300         if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
0301             reg |= ARM_SMMU_SCTLR_E;
0302 
0303         iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
0304 
0305         ctx->domain = domain;
0306     }
0307 
0308     mutex_unlock(&qcom_domain->init_mutex);
0309 
0310     /* Publish page table ops for map/unmap */
0311     qcom_domain->pgtbl_ops = pgtbl_ops;
0312 
0313     return 0;
0314 
0315 out_clear_iommu:
0316     qcom_domain->iommu = NULL;
0317 out_unlock:
0318     mutex_unlock(&qcom_domain->init_mutex);
0319     return ret;
0320 }
0321 
0322 static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
0323 {
0324     struct qcom_iommu_domain *qcom_domain;
0325 
0326     if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
0327         return NULL;
0328     /*
0329      * Allocate the domain and initialise some of its data structures.
0330      * We can't really do anything meaningful until we've added a
0331      * master.
0332      */
0333     qcom_domain = kzalloc(sizeof(*qcom_domain), GFP_KERNEL);
0334     if (!qcom_domain)
0335         return NULL;
0336 
0337     mutex_init(&qcom_domain->init_mutex);
0338     spin_lock_init(&qcom_domain->pgtbl_lock);
0339 
0340     return &qcom_domain->domain;
0341 }
0342 
0343 static void qcom_iommu_domain_free(struct iommu_domain *domain)
0344 {
0345     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0346 
0347     if (qcom_domain->iommu) {
0348         /*
0349          * NOTE: unmap can be called after client device is powered
0350          * off, for example, with GPUs or anything involving dma-buf.
0351          * So we cannot rely on the device_link.  Make sure the IOMMU
0352          * is on to avoid unclocked accesses in the TLB inv path:
0353          */
0354         pm_runtime_get_sync(qcom_domain->iommu->dev);
0355         free_io_pgtable_ops(qcom_domain->pgtbl_ops);
0356         pm_runtime_put_sync(qcom_domain->iommu->dev);
0357     }
0358 
0359     kfree(qcom_domain);
0360 }
0361 
0362 static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
0363 {
0364     struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
0365     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0366     int ret;
0367 
0368     if (!qcom_iommu) {
0369         dev_err(dev, "cannot attach to IOMMU, is it on the same bus?\n");
0370         return -ENXIO;
0371     }
0372 
0373     /* Ensure that the domain is finalized */
0374     pm_runtime_get_sync(qcom_iommu->dev);
0375     ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
0376     pm_runtime_put_sync(qcom_iommu->dev);
0377     if (ret < 0)
0378         return ret;
0379 
0380     /*
0381      * Sanity check the domain. We don't support domains across
0382      * different IOMMUs.
0383      */
0384     if (qcom_domain->iommu != qcom_iommu) {
0385         dev_err(dev, "cannot attach to IOMMU %s while already "
0386             "attached to domain on IOMMU %s\n",
0387             dev_name(qcom_domain->iommu->dev),
0388             dev_name(qcom_iommu->dev));
0389         return -EINVAL;
0390     }
0391 
0392     return 0;
0393 }
0394 
0395 static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
0396 {
0397     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0398     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0399     struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
0400     unsigned i;
0401 
0402     if (WARN_ON(!qcom_domain->iommu))
0403         return;
0404 
0405     pm_runtime_get_sync(qcom_iommu->dev);
0406     for (i = 0; i < fwspec->num_ids; i++) {
0407         struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
0408 
0409         /* Disable the context bank: */
0410         iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
0411 
0412         ctx->domain = NULL;
0413     }
0414     pm_runtime_put_sync(qcom_iommu->dev);
0415 }
0416 
0417 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
0418               phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
0419 {
0420     int ret;
0421     unsigned long flags;
0422     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0423     struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
0424 
0425     if (!ops)
0426         return -ENODEV;
0427 
0428     spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
0429     ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
0430     spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
0431     return ret;
0432 }
0433 
0434 static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
0435                    size_t size, struct iommu_iotlb_gather *gather)
0436 {
0437     size_t ret;
0438     unsigned long flags;
0439     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0440     struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
0441 
0442     if (!ops)
0443         return 0;
0444 
0445     /* NOTE: unmap can be called after client device is powered off,
0446      * for example, with GPUs or anything involving dma-buf.  So we
0447      * cannot rely on the device_link.  Make sure the IOMMU is on to
0448      * avoid unclocked accesses in the TLB inv path:
0449      */
0450     pm_runtime_get_sync(qcom_domain->iommu->dev);
0451     spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
0452     ret = ops->unmap(ops, iova, size, gather);
0453     spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
0454     pm_runtime_put_sync(qcom_domain->iommu->dev);
0455 
0456     return ret;
0457 }
0458 
0459 static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
0460 {
0461     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0462     struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
0463                           struct io_pgtable, ops);
0464     if (!qcom_domain->pgtbl_ops)
0465         return;
0466 
0467     pm_runtime_get_sync(qcom_domain->iommu->dev);
0468     qcom_iommu_tlb_sync(pgtable->cookie);
0469     pm_runtime_put_sync(qcom_domain->iommu->dev);
0470 }
0471 
0472 static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
0473                   struct iommu_iotlb_gather *gather)
0474 {
0475     qcom_iommu_flush_iotlb_all(domain);
0476 }
0477 
0478 static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
0479                        dma_addr_t iova)
0480 {
0481     phys_addr_t ret;
0482     unsigned long flags;
0483     struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
0484     struct io_pgtable_ops *ops = qcom_domain->pgtbl_ops;
0485 
0486     if (!ops)
0487         return 0;
0488 
0489     spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
0490     ret = ops->iova_to_phys(ops, iova);
0491     spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
0492 
0493     return ret;
0494 }
0495 
0496 static bool qcom_iommu_capable(enum iommu_cap cap)
0497 {
0498     switch (cap) {
0499     case IOMMU_CAP_CACHE_COHERENCY:
0500         /*
0501          * Return true here as the SMMU can always send out coherent
0502          * requests.
0503          */
0504         return true;
0505     case IOMMU_CAP_NOEXEC:
0506         return true;
0507     default:
0508         return false;
0509     }
0510 }
0511 
0512 static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
0513 {
0514     struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
0515     struct device_link *link;
0516 
0517     if (!qcom_iommu)
0518         return ERR_PTR(-ENODEV);
0519 
0520     /*
0521      * Establish the link between iommu and master, so that the
0522      * iommu gets runtime enabled/disabled as per the master's
0523      * needs.
0524      */
0525     link = device_link_add(dev, qcom_iommu->dev, DL_FLAG_PM_RUNTIME);
0526     if (!link) {
0527         dev_err(qcom_iommu->dev, "Unable to create device link between %s and %s\n",
0528             dev_name(qcom_iommu->dev), dev_name(dev));
0529         return ERR_PTR(-ENODEV);
0530     }
0531 
0532     return &qcom_iommu->iommu;
0533 }
0534 
0535 static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
0536 {
0537     struct qcom_iommu_dev *qcom_iommu;
0538     struct platform_device *iommu_pdev;
0539     unsigned asid = args->args[0];
0540 
0541     if (args->args_count != 1) {
0542         dev_err(dev, "incorrect number of iommu params found for %s "
0543             "(found %d, expected 1)\n",
0544             args->np->full_name, args->args_count);
0545         return -EINVAL;
0546     }
0547 
0548     iommu_pdev = of_find_device_by_node(args->np);
0549     if (WARN_ON(!iommu_pdev))
0550         return -EINVAL;
0551 
0552     qcom_iommu = platform_get_drvdata(iommu_pdev);
0553 
0554     /* make sure the asid specified in dt is valid, so we don't have
0555      * to sanity check this elsewhere, since 'asid - 1' is used to
0556      * index into qcom_iommu->ctxs:
0557      */
0558     if (WARN_ON(asid < 1) ||
0559         WARN_ON(asid > qcom_iommu->num_ctxs)) {
0560         put_device(&iommu_pdev->dev);
0561         return -EINVAL;
0562     }
0563 
0564     if (!dev_iommu_priv_get(dev)) {
0565         dev_iommu_priv_set(dev, qcom_iommu);
0566     } else {
0567         /* make sure devices iommus dt node isn't referring to
0568          * multiple different iommu devices.  Multiple context
0569          * banks are ok, but multiple devices are not:
0570          */
0571         if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
0572             put_device(&iommu_pdev->dev);
0573             return -EINVAL;
0574         }
0575     }
0576 
0577     return iommu_fwspec_add_ids(dev, &asid, 1);
0578 }
0579 
0580 static const struct iommu_ops qcom_iommu_ops = {
0581     .capable    = qcom_iommu_capable,
0582     .domain_alloc   = qcom_iommu_domain_alloc,
0583     .probe_device   = qcom_iommu_probe_device,
0584     .device_group   = generic_device_group,
0585     .of_xlate   = qcom_iommu_of_xlate,
0586     .pgsize_bitmap  = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
0587     .default_domain_ops = &(const struct iommu_domain_ops) {
0588         .attach_dev = qcom_iommu_attach_dev,
0589         .detach_dev = qcom_iommu_detach_dev,
0590         .map        = qcom_iommu_map,
0591         .unmap      = qcom_iommu_unmap,
0592         .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
0593         .iotlb_sync = qcom_iommu_iotlb_sync,
0594         .iova_to_phys   = qcom_iommu_iova_to_phys,
0595         .free       = qcom_iommu_domain_free,
0596     }
0597 };
0598 
0599 static int qcom_iommu_sec_ptbl_init(struct device *dev)
0600 {
0601     size_t psize = 0;
0602     unsigned int spare = 0;
0603     void *cpu_addr;
0604     dma_addr_t paddr;
0605     unsigned long attrs;
0606     static bool allocated = false;
0607     int ret;
0608 
0609     if (allocated)
0610         return 0;
0611 
0612     ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize);
0613     if (ret) {
0614         dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
0615             ret);
0616         return ret;
0617     }
0618 
0619     dev_info(dev, "iommu sec: pgtable size: %zu\n", psize);
0620 
0621     attrs = DMA_ATTR_NO_KERNEL_MAPPING;
0622 
0623     cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs);
0624     if (!cpu_addr) {
0625         dev_err(dev, "failed to allocate %zu bytes for pgtable\n",
0626             psize);
0627         return -ENOMEM;
0628     }
0629 
0630     ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare);
0631     if (ret) {
0632         dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
0633         goto free_mem;
0634     }
0635 
0636     allocated = true;
0637     return 0;
0638 
0639 free_mem:
0640     dma_free_attrs(dev, psize, cpu_addr, paddr, attrs);
0641     return ret;
0642 }
0643 
0644 static int get_asid(const struct device_node *np)
0645 {
0646     u32 reg;
0647 
0648     /* read the "reg" property directly to get the relative address
0649      * of the context bank, and calculate the asid from that:
0650      */
0651     if (of_property_read_u32_index(np, "reg", 0, &reg))
0652         return -ENODEV;
0653 
0654     return reg / 0x1000;      /* context banks are 0x1000 apart */
0655 }
0656 
0657 static int qcom_iommu_ctx_probe(struct platform_device *pdev)
0658 {
0659     struct qcom_iommu_ctx *ctx;
0660     struct device *dev = &pdev->dev;
0661     struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
0662     struct resource *res;
0663     int ret, irq;
0664 
0665     ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
0666     if (!ctx)
0667         return -ENOMEM;
0668 
0669     ctx->dev = dev;
0670     platform_set_drvdata(pdev, ctx);
0671 
0672     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0673     ctx->base = devm_ioremap_resource(dev, res);
0674     if (IS_ERR(ctx->base))
0675         return PTR_ERR(ctx->base);
0676 
0677     irq = platform_get_irq(pdev, 0);
0678     if (irq < 0)
0679         return -ENODEV;
0680 
0681     /* clear IRQs before registering fault handler, just in case the
0682      * boot-loader left us a surprise:
0683      */
0684     iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
0685 
0686     ret = devm_request_irq(dev, irq,
0687                    qcom_iommu_fault,
0688                    IRQF_SHARED,
0689                    "qcom-iommu-fault",
0690                    ctx);
0691     if (ret) {
0692         dev_err(dev, "failed to request IRQ %u\n", irq);
0693         return ret;
0694     }
0695 
0696     ret = get_asid(dev->of_node);
0697     if (ret < 0) {
0698         dev_err(dev, "missing reg property\n");
0699         return ret;
0700     }
0701 
0702     ctx->asid = ret;
0703 
0704     dev_dbg(dev, "found asid %u\n", ctx->asid);
0705 
0706     qcom_iommu->ctxs[ctx->asid - 1] = ctx;
0707 
0708     return 0;
0709 }
0710 
0711 static int qcom_iommu_ctx_remove(struct platform_device *pdev)
0712 {
0713     struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(pdev->dev.parent);
0714     struct qcom_iommu_ctx *ctx = platform_get_drvdata(pdev);
0715 
0716     platform_set_drvdata(pdev, NULL);
0717 
0718     qcom_iommu->ctxs[ctx->asid - 1] = NULL;
0719 
0720     return 0;
0721 }
0722 
0723 static const struct of_device_id ctx_of_match[] = {
0724     { .compatible = "qcom,msm-iommu-v1-ns" },
0725     { .compatible = "qcom,msm-iommu-v1-sec" },
0726     { /* sentinel */ }
0727 };
0728 
0729 static struct platform_driver qcom_iommu_ctx_driver = {
0730     .driver = {
0731         .name       = "qcom-iommu-ctx",
0732         .of_match_table = ctx_of_match,
0733     },
0734     .probe  = qcom_iommu_ctx_probe,
0735     .remove = qcom_iommu_ctx_remove,
0736 };
0737 
0738 static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
0739 {
0740     struct device_node *child;
0741 
0742     for_each_child_of_node(qcom_iommu->dev->of_node, child) {
0743         if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
0744             of_node_put(child);
0745             return true;
0746         }
0747     }
0748 
0749     return false;
0750 }
0751 
0752 static int qcom_iommu_device_probe(struct platform_device *pdev)
0753 {
0754     struct device_node *child;
0755     struct qcom_iommu_dev *qcom_iommu;
0756     struct device *dev = &pdev->dev;
0757     struct resource *res;
0758     struct clk *clk;
0759     int ret, max_asid = 0;
0760 
0761     /* find the max asid (which is 1:1 to ctx bank idx), so we know how
0762      * many child ctx devices we have:
0763      */
0764     for_each_child_of_node(dev->of_node, child)
0765         max_asid = max(max_asid, get_asid(child));
0766 
0767     qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
0768                   GFP_KERNEL);
0769     if (!qcom_iommu)
0770         return -ENOMEM;
0771     qcom_iommu->num_ctxs = max_asid;
0772     qcom_iommu->dev = dev;
0773 
0774     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0775     if (res) {
0776         qcom_iommu->local_base = devm_ioremap_resource(dev, res);
0777         if (IS_ERR(qcom_iommu->local_base))
0778             return PTR_ERR(qcom_iommu->local_base);
0779     }
0780 
0781     clk = devm_clk_get(dev, "iface");
0782     if (IS_ERR(clk)) {
0783         dev_err(dev, "failed to get iface clock\n");
0784         return PTR_ERR(clk);
0785     }
0786     qcom_iommu->clks[CLK_IFACE].clk = clk;
0787 
0788     clk = devm_clk_get(dev, "bus");
0789     if (IS_ERR(clk)) {
0790         dev_err(dev, "failed to get bus clock\n");
0791         return PTR_ERR(clk);
0792     }
0793     qcom_iommu->clks[CLK_BUS].clk = clk;
0794 
0795     clk = devm_clk_get_optional(dev, "tbu");
0796     if (IS_ERR(clk)) {
0797         dev_err(dev, "failed to get tbu clock\n");
0798         return PTR_ERR(clk);
0799     }
0800     qcom_iommu->clks[CLK_TBU].clk = clk;
0801 
0802     if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id",
0803                  &qcom_iommu->sec_id)) {
0804         dev_err(dev, "missing qcom,iommu-secure-id property\n");
0805         return -ENODEV;
0806     }
0807 
0808     if (qcom_iommu_has_secure_context(qcom_iommu)) {
0809         ret = qcom_iommu_sec_ptbl_init(dev);
0810         if (ret) {
0811             dev_err(dev, "cannot init secure pg table(%d)\n", ret);
0812             return ret;
0813         }
0814     }
0815 
0816     platform_set_drvdata(pdev, qcom_iommu);
0817 
0818     pm_runtime_enable(dev);
0819 
0820     /* register context bank devices, which are child nodes: */
0821     ret = devm_of_platform_populate(dev);
0822     if (ret) {
0823         dev_err(dev, "Failed to populate iommu contexts\n");
0824         goto err_pm_disable;
0825     }
0826 
0827     ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
0828                      dev_name(dev));
0829     if (ret) {
0830         dev_err(dev, "Failed to register iommu in sysfs\n");
0831         goto err_pm_disable;
0832     }
0833 
0834     ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
0835     if (ret) {
0836         dev_err(dev, "Failed to register iommu\n");
0837         goto err_pm_disable;
0838     }
0839 
0840     bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
0841 
0842     if (qcom_iommu->local_base) {
0843         pm_runtime_get_sync(dev);
0844         writel_relaxed(0xffffffff, qcom_iommu->local_base + SMMU_INTR_SEL_NS);
0845         pm_runtime_put_sync(dev);
0846     }
0847 
0848     return 0;
0849 
0850 err_pm_disable:
0851     pm_runtime_disable(dev);
0852     return ret;
0853 }
0854 
0855 static int qcom_iommu_device_remove(struct platform_device *pdev)
0856 {
0857     struct qcom_iommu_dev *qcom_iommu = platform_get_drvdata(pdev);
0858 
0859     bus_set_iommu(&platform_bus_type, NULL);
0860 
0861     pm_runtime_force_suspend(&pdev->dev);
0862     platform_set_drvdata(pdev, NULL);
0863     iommu_device_sysfs_remove(&qcom_iommu->iommu);
0864     iommu_device_unregister(&qcom_iommu->iommu);
0865 
0866     return 0;
0867 }
0868 
0869 static int __maybe_unused qcom_iommu_resume(struct device *dev)
0870 {
0871     struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
0872 
0873     return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
0874 }
0875 
0876 static int __maybe_unused qcom_iommu_suspend(struct device *dev)
0877 {
0878     struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
0879 
0880     clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks);
0881 
0882     return 0;
0883 }
0884 
0885 static const struct dev_pm_ops qcom_iommu_pm_ops = {
0886     SET_RUNTIME_PM_OPS(qcom_iommu_suspend, qcom_iommu_resume, NULL)
0887     SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
0888                 pm_runtime_force_resume)
0889 };
0890 
0891 static const struct of_device_id qcom_iommu_of_match[] = {
0892     { .compatible = "qcom,msm-iommu-v1" },
0893     { /* sentinel */ }
0894 };
0895 
0896 static struct platform_driver qcom_iommu_driver = {
0897     .driver = {
0898         .name       = "qcom-iommu",
0899         .of_match_table = qcom_iommu_of_match,
0900         .pm     = &qcom_iommu_pm_ops,
0901     },
0902     .probe  = qcom_iommu_device_probe,
0903     .remove = qcom_iommu_device_remove,
0904 };
0905 
0906 static int __init qcom_iommu_init(void)
0907 {
0908     int ret;
0909 
0910     ret = platform_driver_register(&qcom_iommu_ctx_driver);
0911     if (ret)
0912         return ret;
0913 
0914     ret = platform_driver_register(&qcom_iommu_driver);
0915     if (ret)
0916         platform_driver_unregister(&qcom_iommu_ctx_driver);
0917 
0918     return ret;
0919 }
0920 device_initcall(qcom_iommu_init);