Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 // Miscellaneous Arm SMMU implementation and integration quirks
0003 // Copyright (C) 2019 Arm Limited
0004 
0005 #define pr_fmt(fmt) "arm-smmu: " fmt
0006 
0007 #include <linux/bitfield.h>
0008 #include <linux/of.h>
0009 
0010 #include "arm-smmu.h"
0011 
0012 
0013 static int arm_smmu_gr0_ns(int offset)
0014 {
0015     switch (offset) {
0016     case ARM_SMMU_GR0_sCR0:
0017     case ARM_SMMU_GR0_sACR:
0018     case ARM_SMMU_GR0_sGFSR:
0019     case ARM_SMMU_GR0_sGFSYNR0:
0020     case ARM_SMMU_GR0_sGFSYNR1:
0021     case ARM_SMMU_GR0_sGFSYNR2:
0022         return offset + 0x400;
0023     default:
0024         return offset;
0025     }
0026 }
0027 
0028 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
0029                 int offset)
0030 {
0031     if (page == ARM_SMMU_GR0)
0032         offset = arm_smmu_gr0_ns(offset);
0033     return readl_relaxed(arm_smmu_page(smmu, page) + offset);
0034 }
0035 
0036 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
0037                   int offset, u32 val)
0038 {
0039     if (page == ARM_SMMU_GR0)
0040         offset = arm_smmu_gr0_ns(offset);
0041     writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
0042 }
0043 
0044 /* Since we don't care for sGFAR, we can do without 64-bit accessors */
0045 static const struct arm_smmu_impl calxeda_impl = {
0046     .read_reg = arm_smmu_read_ns,
0047     .write_reg = arm_smmu_write_ns,
0048 };
0049 
0050 
0051 struct cavium_smmu {
0052     struct arm_smmu_device smmu;
0053     u32 id_base;
0054 };
0055 
0056 static int cavium_cfg_probe(struct arm_smmu_device *smmu)
0057 {
0058     static atomic_t context_count = ATOMIC_INIT(0);
0059     struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
0060     /*
0061      * Cavium CN88xx erratum #27704.
0062      * Ensure ASID and VMID allocation is unique across all SMMUs in
0063      * the system.
0064      */
0065     cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
0066     dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
0067 
0068     return 0;
0069 }
0070 
0071 static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
0072         struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
0073 {
0074     struct cavium_smmu *cs = container_of(smmu_domain->smmu,
0075                           struct cavium_smmu, smmu);
0076 
0077     if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
0078         smmu_domain->cfg.vmid += cs->id_base;
0079     else
0080         smmu_domain->cfg.asid += cs->id_base;
0081 
0082     return 0;
0083 }
0084 
0085 static const struct arm_smmu_impl cavium_impl = {
0086     .cfg_probe = cavium_cfg_probe,
0087     .init_context = cavium_init_context,
0088 };
0089 
0090 static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
0091 {
0092     struct cavium_smmu *cs;
0093 
0094     cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL);
0095     if (!cs)
0096         return ERR_PTR(-ENOMEM);
0097 
0098     cs->smmu.impl = &cavium_impl;
0099 
0100     return &cs->smmu;
0101 }
0102 
0103 
0104 #define ARM_MMU500_ACTLR_CPRE       (1 << 1)
0105 
0106 #define ARM_MMU500_ACR_CACHE_LOCK   (1 << 26)
0107 #define ARM_MMU500_ACR_S2CRB_TLBEN  (1 << 10)
0108 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
0109 
0110 int arm_mmu500_reset(struct arm_smmu_device *smmu)
0111 {
0112     u32 reg, major;
0113     int i;
0114     /*
0115      * On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
0116      * writes to the context bank ACTLRs will stick. And we just hope that
0117      * Secure has also cleared SACR.CACHE_LOCK for this to take effect...
0118      */
0119     reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
0120     major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
0121     reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
0122     if (major >= 2)
0123         reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
0124     /*
0125      * Allow unmatched Stream IDs to allocate bypass
0126      * TLB entries for reduced latency.
0127      */
0128     reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
0129     arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
0130 
0131     /*
0132      * Disable MMU-500's not-particularly-beneficial next-page
0133      * prefetcher for the sake of errata #841119 and #826419.
0134      */
0135     for (i = 0; i < smmu->num_context_banks; ++i) {
0136         reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
0137         reg &= ~ARM_MMU500_ACTLR_CPRE;
0138         arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
0139     }
0140 
0141     return 0;
0142 }
0143 
0144 static const struct arm_smmu_impl arm_mmu500_impl = {
0145     .reset = arm_mmu500_reset,
0146 };
0147 
0148 static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
0149 {
0150     /*
0151      * Marvell Armada-AP806 erratum #582743.
0152      * Split all the readq to double readl
0153      */
0154     return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
0155 }
0156 
0157 static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
0158                    u64 val)
0159 {
0160     /*
0161      * Marvell Armada-AP806 erratum #582743.
0162      * Split all the writeq to double writel
0163      */
0164     hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
0165 }
0166 
0167 static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
0168 {
0169 
0170     /*
0171      * Armada-AP806 erratum #582743.
0172      * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
0173      * formats altogether and allow using 32 bits access on the
0174      * interconnect.
0175      */
0176     smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
0177                 ARM_SMMU_FEAT_FMT_AARCH64_16K |
0178                 ARM_SMMU_FEAT_FMT_AARCH64_64K);
0179 
0180     return 0;
0181 }
0182 
0183 static const struct arm_smmu_impl mrvl_mmu500_impl = {
0184     .read_reg64 = mrvl_mmu500_readq,
0185     .write_reg64 = mrvl_mmu500_writeq,
0186     .cfg_probe = mrvl_mmu500_cfg_probe,
0187     .reset = arm_mmu500_reset,
0188 };
0189 
0190 
0191 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
0192 {
0193     const struct device_node *np = smmu->dev->of_node;
0194 
0195     /*
0196      * Set the impl for model-specific implementation quirks first,
0197      * such that platform integration quirks can pick it up and
0198      * inherit from it if necessary.
0199      */
0200     switch (smmu->model) {
0201     case ARM_MMU500:
0202         smmu->impl = &arm_mmu500_impl;
0203         break;
0204     case CAVIUM_SMMUV2:
0205         return cavium_smmu_impl_init(smmu);
0206     default:
0207         break;
0208     }
0209 
0210     /* This is implicitly MMU-400 */
0211     if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
0212         smmu->impl = &calxeda_impl;
0213 
0214     if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
0215         of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
0216         of_device_is_compatible(np, "nvidia,tegra186-smmu"))
0217         return nvidia_smmu_impl_init(smmu);
0218 
0219     if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
0220         smmu = qcom_smmu_impl_init(smmu);
0221 
0222     if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
0223         smmu->impl = &mrvl_mmu500_impl;
0224 
0225     return smmu;
0226 }