0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include "v3d_drv.h"
0022 #include "v3d_regs.h"
0023
0024 #define V3D_MMU_PAGE_SHIFT 12
0025
0026
0027
0028
0029 #define V3D_PTE_SUPERPAGE BIT(31)
0030 #define V3D_PTE_WRITEABLE BIT(29)
0031 #define V3D_PTE_VALID BIT(28)
0032
0033 static int v3d_mmu_flush_all(struct v3d_dev *v3d)
0034 {
0035 int ret;
0036
0037
0038
0039
0040 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
0041 V3D_MMU_CTL_TLB_CLEARING), 100);
0042 if (ret)
0043 dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
0044
0045 V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
0046 V3D_MMU_CTL_TLB_CLEAR);
0047
0048 V3D_WRITE(V3D_MMUC_CONTROL,
0049 V3D_MMUC_CONTROL_FLUSH |
0050 V3D_MMUC_CONTROL_ENABLE);
0051
0052 ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
0053 V3D_MMU_CTL_TLB_CLEARING), 100);
0054 if (ret) {
0055 dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
0056 return ret;
0057 }
0058
0059 ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
0060 V3D_MMUC_CONTROL_FLUSHING), 100);
0061 if (ret)
0062 dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
0063
0064 return ret;
0065 }
0066
0067 int v3d_mmu_set_page_table(struct v3d_dev *v3d)
0068 {
0069 V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT);
0070 V3D_WRITE(V3D_MMU_CTL,
0071 V3D_MMU_CTL_ENABLE |
0072 V3D_MMU_CTL_PT_INVALID_ENABLE |
0073 V3D_MMU_CTL_PT_INVALID_ABORT |
0074 V3D_MMU_CTL_PT_INVALID_INT |
0075 V3D_MMU_CTL_WRITE_VIOLATION_ABORT |
0076 V3D_MMU_CTL_WRITE_VIOLATION_INT |
0077 V3D_MMU_CTL_CAP_EXCEEDED_ABORT |
0078 V3D_MMU_CTL_CAP_EXCEEDED_INT);
0079 V3D_WRITE(V3D_MMU_ILLEGAL_ADDR,
0080 (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) |
0081 V3D_MMU_ILLEGAL_ADDR_ENABLE);
0082 V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE);
0083
0084 return v3d_mmu_flush_all(v3d);
0085 }
0086
0087 void v3d_mmu_insert_ptes(struct v3d_bo *bo)
0088 {
0089 struct drm_gem_shmem_object *shmem_obj = &bo->base;
0090 struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
0091 u32 page = bo->node.start;
0092 u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
0093 struct sg_dma_page_iter dma_iter;
0094
0095 for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) {
0096 dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter);
0097 u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT;
0098 u32 pte = page_prot | page_address;
0099 u32 i;
0100
0101 BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >=
0102 BIT(24));
0103 for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++)
0104 v3d->pt[page++] = pte + i;
0105 }
0106
0107 WARN_ON_ONCE(page - bo->node.start !=
0108 shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
0109
0110 if (v3d_mmu_flush_all(v3d))
0111 dev_err(v3d->drm.dev, "MMU flush timeout\n");
0112 }
0113
0114 void v3d_mmu_remove_ptes(struct v3d_bo *bo)
0115 {
0116 struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
0117 u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
0118 u32 page;
0119
0120 for (page = bo->node.start; page < bo->node.start + npages; page++)
0121 v3d->pt[page] = 0;
0122
0123 if (v3d_mmu_flush_all(v3d))
0124 dev_err(v3d->drm.dev, "MMU flush timeout\n");
0125 }