0001
0002
0003
0004
0005
0006 #include <linux/bitops.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/platform_device.h>
0009 #include <linux/sizes.h>
0010 #include <linux/slab.h>
0011 #include <linux/vmalloc.h>
0012
0013 #include "etnaviv_cmdbuf.h"
0014 #include "etnaviv_gpu.h"
0015 #include "etnaviv_mmu.h"
0016 #include "state.xml.h"
0017 #include "state_hi.xml.h"
0018
0019 #define MMUv2_PTE_PRESENT BIT(0)
0020 #define MMUv2_PTE_EXCEPTION BIT(1)
0021 #define MMUv2_PTE_WRITEABLE BIT(2)
0022
0023 #define MMUv2_MTLB_MASK 0xffc00000
0024 #define MMUv2_MTLB_SHIFT 22
0025 #define MMUv2_STLB_MASK 0x003ff000
0026 #define MMUv2_STLB_SHIFT 12
0027
0028 #define MMUv2_MAX_STLB_ENTRIES 1024
0029
0030 struct etnaviv_iommuv2_context {
0031 struct etnaviv_iommu_context base;
0032 unsigned short id;
0033
0034 u32 *mtlb_cpu;
0035 dma_addr_t mtlb_dma;
0036
0037 u32 *stlb_cpu[MMUv2_MAX_STLB_ENTRIES];
0038 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES];
0039 };
0040
0041 static struct etnaviv_iommuv2_context *
0042 to_v2_context(struct etnaviv_iommu_context *context)
0043 {
0044 return container_of(context, struct etnaviv_iommuv2_context, base);
0045 }
0046
0047 static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context)
0048 {
0049 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0050 int i;
0051
0052 drm_mm_takedown(&context->mm);
0053
0054 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) {
0055 if (v2_context->stlb_cpu[i])
0056 dma_free_wc(context->global->dev, SZ_4K,
0057 v2_context->stlb_cpu[i],
0058 v2_context->stlb_dma[i]);
0059 }
0060
0061 dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu,
0062 v2_context->mtlb_dma);
0063
0064 clear_bit(v2_context->id, context->global->v2.pta_alloc);
0065
0066 vfree(v2_context);
0067 }
0068 static int
0069 etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context,
0070 int stlb)
0071 {
0072 if (v2_context->stlb_cpu[stlb])
0073 return 0;
0074
0075 v2_context->stlb_cpu[stlb] =
0076 dma_alloc_wc(v2_context->base.global->dev, SZ_4K,
0077 &v2_context->stlb_dma[stlb],
0078 GFP_KERNEL);
0079
0080 if (!v2_context->stlb_cpu[stlb])
0081 return -ENOMEM;
0082
0083 memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION,
0084 SZ_4K / sizeof(u32));
0085
0086 v2_context->mtlb_cpu[stlb] =
0087 v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT;
0088
0089 return 0;
0090 }
0091
0092 static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context,
0093 unsigned long iova, phys_addr_t paddr,
0094 size_t size, int prot)
0095 {
0096 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0097 int mtlb_entry, stlb_entry, ret;
0098 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT;
0099
0100 if (size != SZ_4K)
0101 return -EINVAL;
0102
0103 if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
0104 entry |= (upper_32_bits(paddr) & 0xff) << 4;
0105
0106 if (prot & ETNAVIV_PROT_WRITE)
0107 entry |= MMUv2_PTE_WRITEABLE;
0108
0109 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
0110 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
0111
0112 ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry);
0113 if (ret)
0114 return ret;
0115
0116 v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry;
0117
0118 return 0;
0119 }
0120
0121 static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context,
0122 unsigned long iova, size_t size)
0123 {
0124 struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context);
0125 int mtlb_entry, stlb_entry;
0126
0127 if (size != SZ_4K)
0128 return -EINVAL;
0129
0130 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
0131 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
0132
0133 etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = MMUv2_PTE_EXCEPTION;
0134
0135 return SZ_4K;
0136 }
0137
0138 static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context)
0139 {
0140 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0141 size_t dump_size = SZ_4K;
0142 int i;
0143
0144 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
0145 if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
0146 dump_size += SZ_4K;
0147
0148 return dump_size;
0149 }
0150
0151 static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf)
0152 {
0153 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0154 int i;
0155
0156 memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
0157 buf += SZ_4K;
0158 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
0159 if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
0160 memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
0161 buf += SZ_4K;
0162 }
0163 }
0164
0165 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
0166 struct etnaviv_iommu_context *context)
0167 {
0168 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0169 u16 prefetch;
0170
0171
0172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE)
0173 return;
0174
0175 if (gpu->mmu_context)
0176 etnaviv_iommu_context_put(gpu->mmu_context);
0177 gpu->mmu_context = etnaviv_iommu_context_get(context);
0178
0179 prefetch = etnaviv_buffer_config_mmuv2(gpu,
0180 (u32)v2_context->mtlb_dma,
0181 (u32)context->global->bad_page_dma);
0182 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
0183 prefetch);
0184 etnaviv_gpu_wait_idle(gpu, 100);
0185
0186 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
0187 }
0188
0189 static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu,
0190 struct etnaviv_iommu_context *context)
0191 {
0192 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0193 u16 prefetch;
0194
0195
0196 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
0197 return;
0198
0199 if (gpu->mmu_context)
0200 etnaviv_iommu_context_put(gpu->mmu_context);
0201 gpu->mmu_context = etnaviv_iommu_context_get(context);
0202
0203 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
0204 lower_32_bits(context->global->v2.pta_dma));
0205 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
0206 upper_32_bits(context->global->v2.pta_dma));
0207 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
0208
0209 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
0210 lower_32_bits(context->global->bad_page_dma));
0211 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
0212 lower_32_bits(context->global->bad_page_dma));
0213 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
0214 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
0215 upper_32_bits(context->global->bad_page_dma)) |
0216 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
0217 upper_32_bits(context->global->bad_page_dma)));
0218
0219 context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma |
0220 VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
0221
0222
0223 prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id);
0224 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
0225 prefetch);
0226 etnaviv_gpu_wait_idle(gpu, 100);
0227
0228 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
0229 }
0230
0231 u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context)
0232 {
0233 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0234
0235 return v2_context->mtlb_dma;
0236 }
0237
0238 unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context)
0239 {
0240 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context);
0241
0242 return v2_context->id;
0243 }
0244 static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu,
0245 struct etnaviv_iommu_context *context)
0246 {
0247 switch (gpu->sec_mode) {
0248 case ETNA_SEC_NONE:
0249 etnaviv_iommuv2_restore_nonsec(gpu, context);
0250 break;
0251 case ETNA_SEC_KERNEL:
0252 etnaviv_iommuv2_restore_sec(gpu, context);
0253 break;
0254 default:
0255 WARN(1, "unhandled GPU security mode\n");
0256 break;
0257 }
0258 }
0259
0260 const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = {
0261 .free = etnaviv_iommuv2_free,
0262 .map = etnaviv_iommuv2_map,
0263 .unmap = etnaviv_iommuv2_unmap,
0264 .dump_size = etnaviv_iommuv2_dump_size,
0265 .dump = etnaviv_iommuv2_dump,
0266 .restore = etnaviv_iommuv2_restore,
0267 };
0268
0269 struct etnaviv_iommu_context *
0270 etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global)
0271 {
0272 struct etnaviv_iommuv2_context *v2_context;
0273 struct etnaviv_iommu_context *context;
0274
0275 v2_context = vzalloc(sizeof(*v2_context));
0276 if (!v2_context)
0277 return NULL;
0278
0279 mutex_lock(&global->lock);
0280 v2_context->id = find_first_zero_bit(global->v2.pta_alloc,
0281 ETNAVIV_PTA_ENTRIES);
0282 if (v2_context->id < ETNAVIV_PTA_ENTRIES) {
0283 set_bit(v2_context->id, global->v2.pta_alloc);
0284 } else {
0285 mutex_unlock(&global->lock);
0286 goto out_free;
0287 }
0288 mutex_unlock(&global->lock);
0289
0290 v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K,
0291 &v2_context->mtlb_dma, GFP_KERNEL);
0292 if (!v2_context->mtlb_cpu)
0293 goto out_free_id;
0294
0295 memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION,
0296 MMUv2_MAX_STLB_ENTRIES);
0297
0298 global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma;
0299
0300 context = &v2_context->base;
0301 context->global = global;
0302 kref_init(&context->refcount);
0303 mutex_init(&context->lock);
0304 INIT_LIST_HEAD(&context->mappings);
0305 drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K);
0306
0307 return context;
0308
0309 out_free_id:
0310 clear_bit(v2_context->id, global->v2.pta_alloc);
0311 out_free:
0312 vfree(v2_context);
0313 return NULL;
0314 }