0001
0002
0003
0004
0005
0006 #include <linux/bitops.h>
0007 #include <linux/dma-mapping.h>
0008 #include <linux/platform_device.h>
0009 #include <linux/sizes.h>
0010 #include <linux/slab.h>
0011
0012 #include "etnaviv_gpu.h"
0013 #include "etnaviv_mmu.h"
0014 #include "state_hi.xml.h"
0015
0016 #define PT_SIZE SZ_2M
0017 #define PT_ENTRIES (PT_SIZE / sizeof(u32))
0018
0019 #define GPU_MEM_START 0x80000000
0020
0021 struct etnaviv_iommuv1_context {
0022 struct etnaviv_iommu_context base;
0023 u32 *pgtable_cpu;
0024 dma_addr_t pgtable_dma;
0025 };
0026
0027 static struct etnaviv_iommuv1_context *
0028 to_v1_context(struct etnaviv_iommu_context *context)
0029 {
0030 return container_of(context, struct etnaviv_iommuv1_context, base);
0031 }
0032
0033 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
0034 {
0035 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
0036
0037 drm_mm_takedown(&context->mm);
0038
0039 dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
0040 v1_context->pgtable_dma);
0041
0042 context->global->v1.shared_context = NULL;
0043
0044 kfree(v1_context);
0045 }
0046
0047 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
0048 unsigned long iova, phys_addr_t paddr,
0049 size_t size, int prot)
0050 {
0051 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
0052 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
0053
0054 if (size != SZ_4K)
0055 return -EINVAL;
0056
0057 v1_context->pgtable_cpu[index] = paddr;
0058
0059 return 0;
0060 }
0061
0062 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
0063 unsigned long iova, size_t size)
0064 {
0065 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
0066 unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
0067
0068 if (size != SZ_4K)
0069 return -EINVAL;
0070
0071 v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
0072
0073 return SZ_4K;
0074 }
0075
0076 static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
0077 {
0078 return PT_SIZE;
0079 }
0080
0081 static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
0082 void *buf)
0083 {
0084 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
0085
0086 memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
0087 }
0088
0089 static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
0090 struct etnaviv_iommu_context *context)
0091 {
0092 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
0093 u32 pgtable;
0094
0095 if (gpu->mmu_context)
0096 etnaviv_iommu_context_put(gpu->mmu_context);
0097 gpu->mmu_context = etnaviv_iommu_context_get(context);
0098
0099
0100 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base);
0101 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
0102 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, context->global->memory_base);
0103 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, context->global->memory_base);
0104 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, context->global->memory_base);
0105
0106
0107 pgtable = (u32)v1_context->pgtable_dma;
0108
0109 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
0110 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
0111 gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
0112 gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
0113 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
0114 }
0115
0116
0117 const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
0118 .free = etnaviv_iommuv1_free,
0119 .map = etnaviv_iommuv1_map,
0120 .unmap = etnaviv_iommuv1_unmap,
0121 .dump_size = etnaviv_iommuv1_dump_size,
0122 .dump = etnaviv_iommuv1_dump,
0123 .restore = etnaviv_iommuv1_restore,
0124 };
0125
0126 struct etnaviv_iommu_context *
0127 etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
0128 {
0129 struct etnaviv_iommuv1_context *v1_context;
0130 struct etnaviv_iommu_context *context;
0131
0132 mutex_lock(&global->lock);
0133
0134
0135
0136
0137
0138
0139 if (global->v1.shared_context) {
0140 context = global->v1.shared_context;
0141 etnaviv_iommu_context_get(context);
0142 mutex_unlock(&global->lock);
0143 return context;
0144 }
0145
0146 v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
0147 if (!v1_context) {
0148 mutex_unlock(&global->lock);
0149 return NULL;
0150 }
0151
0152 v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
0153 &v1_context->pgtable_dma,
0154 GFP_KERNEL);
0155 if (!v1_context->pgtable_cpu)
0156 goto out_free;
0157
0158 memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
0159
0160 context = &v1_context->base;
0161 context->global = global;
0162 kref_init(&context->refcount);
0163 mutex_init(&context->lock);
0164 INIT_LIST_HEAD(&context->mappings);
0165 drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
0166 context->global->v1.shared_context = context;
0167
0168 mutex_unlock(&global->lock);
0169
0170 return context;
0171
0172 out_free:
0173 mutex_unlock(&global->lock);
0174 kfree(v1_context);
0175 return NULL;
0176 }