0001
0002
0003
0004 #include <linux/dma-mapping.h>
0005
0006 #include "msm_drv.h"
0007 #include "msm_mmu.h"
0008 #include "adreno/adreno_gpu.h"
0009 #include "adreno/a2xx.xml.h"
0010
0011 struct msm_gpummu {
0012 struct msm_mmu base;
0013 struct msm_gpu *gpu;
0014 dma_addr_t pt_base;
0015 uint32_t *table;
0016 };
0017 #define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
0018
0019 #define GPUMMU_VA_START SZ_16M
0020 #define GPUMMU_VA_RANGE (0xfff * SZ_64K)
0021 #define GPUMMU_PAGE_SIZE SZ_4K
0022 #define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
0023
0024 static void msm_gpummu_detach(struct msm_mmu *mmu)
0025 {
0026 }
0027
0028 static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
0029 struct sg_table *sgt, size_t len, int prot)
0030 {
0031 struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
0032 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
0033 struct sg_dma_page_iter dma_iter;
0034 unsigned prot_bits = 0;
0035
0036 if (prot & IOMMU_WRITE)
0037 prot_bits |= 1;
0038 if (prot & IOMMU_READ)
0039 prot_bits |= 2;
0040
0041 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
0042 dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
0043 int i;
0044
0045 for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
0046 gpummu->table[idx++] = (addr + i) | prot_bits;
0047 }
0048
0049
0050 gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
0051 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
0052 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
0053 return 0;
0054 }
0055
0056 static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
0057 {
0058 struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
0059 unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
0060 unsigned i;
0061
0062 for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
0063 gpummu->table[idx] = 0;
0064
0065 gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
0066 A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
0067 A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
0068 return 0;
0069 }
0070
0071 static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
0072 {
0073 }
0074
0075 static void msm_gpummu_destroy(struct msm_mmu *mmu)
0076 {
0077 struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
0078
0079 dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
0080 DMA_ATTR_FORCE_CONTIGUOUS);
0081
0082 kfree(gpummu);
0083 }
0084
0085 static const struct msm_mmu_funcs funcs = {
0086 .detach = msm_gpummu_detach,
0087 .map = msm_gpummu_map,
0088 .unmap = msm_gpummu_unmap,
0089 .destroy = msm_gpummu_destroy,
0090 .resume_translation = msm_gpummu_resume_translation,
0091 };
0092
0093 struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
0094 {
0095 struct msm_gpummu *gpummu;
0096
0097 gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
0098 if (!gpummu)
0099 return ERR_PTR(-ENOMEM);
0100
0101 gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
0102 GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
0103 if (!gpummu->table) {
0104 kfree(gpummu);
0105 return ERR_PTR(-ENOMEM);
0106 }
0107
0108 gpummu->gpu = gpu;
0109 msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
0110
0111 return &gpummu->base;
0112 }
0113
0114 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
0115 dma_addr_t *tran_error)
0116 {
0117 dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
0118
0119 *pt_base = base;
0120 *tran_error = base + TABLE_SIZE;
0121 }