0001
0002
0003
0004 #include <linux/slab.h>
0005 #include <linux/dma-mapping.h>
0006
0007 #include "lima_device.h"
0008 #include "lima_vm.h"
0009 #include "lima_gem.h"
0010 #include "lima_regs.h"
0011
0012 struct lima_bo_va {
0013 struct list_head list;
0014 unsigned int ref_count;
0015
0016 struct drm_mm_node node;
0017
0018 struct lima_vm *vm;
0019 };
0020
0021 #define LIMA_VM_PD_SHIFT 22
0022 #define LIMA_VM_PT_SHIFT 12
0023 #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
0024 #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
0025
0026 #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
0027 #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
0028
0029 #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
0030 #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
0031 #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
0032 #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
0033
0034
0035 static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end)
0036 {
0037 u32 addr;
0038
0039 for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
0040 u32 pbe = LIMA_PBE(addr);
0041 u32 bte = LIMA_BTE(addr);
0042
0043 vm->bts[pbe].cpu[bte] = 0;
0044 }
0045 }
0046
0047 static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va)
0048 {
0049 u32 pbe = LIMA_PBE(va);
0050 u32 bte = LIMA_BTE(va);
0051
0052 if (!vm->bts[pbe].cpu) {
0053 dma_addr_t pts;
0054 u32 *pd;
0055 int j;
0056
0057 vm->bts[pbe].cpu = dma_alloc_wc(
0058 vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
0059 &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
0060 if (!vm->bts[pbe].cpu)
0061 return -ENOMEM;
0062
0063 pts = vm->bts[pbe].dma;
0064 pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
0065 for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
0066 pd[j] = pts | LIMA_VM_FLAG_PRESENT;
0067 pts += LIMA_PAGE_SIZE;
0068 }
0069 }
0070
0071 vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE;
0072
0073 return 0;
0074 }
0075
0076 static struct lima_bo_va *
0077 lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
0078 {
0079 struct lima_bo_va *bo_va, *ret = NULL;
0080
0081 list_for_each_entry(bo_va, &bo->va, list) {
0082 if (bo_va->vm == vm) {
0083 ret = bo_va;
0084 break;
0085 }
0086 }
0087
0088 return ret;
0089 }
0090
0091 int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
0092 {
0093 struct lima_bo_va *bo_va;
0094 struct sg_dma_page_iter sg_iter;
0095 int offset = 0, err;
0096
0097 mutex_lock(&bo->lock);
0098
0099 bo_va = lima_vm_bo_find(vm, bo);
0100 if (bo_va) {
0101 bo_va->ref_count++;
0102 mutex_unlock(&bo->lock);
0103 return 0;
0104 }
0105
0106
0107 if (!create) {
0108 mutex_unlock(&bo->lock);
0109 return -ENOENT;
0110 }
0111
0112 bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
0113 if (!bo_va) {
0114 err = -ENOMEM;
0115 goto err_out0;
0116 }
0117
0118 bo_va->vm = vm;
0119 bo_va->ref_count = 1;
0120
0121 mutex_lock(&vm->lock);
0122
0123 err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
0124 if (err)
0125 goto err_out1;
0126
0127 for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
0128 err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
0129 bo_va->node.start + offset);
0130 if (err)
0131 goto err_out2;
0132
0133 offset += PAGE_SIZE;
0134 }
0135
0136 mutex_unlock(&vm->lock);
0137
0138 list_add_tail(&bo_va->list, &bo->va);
0139
0140 mutex_unlock(&bo->lock);
0141 return 0;
0142
0143 err_out2:
0144 if (offset)
0145 lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1);
0146 drm_mm_remove_node(&bo_va->node);
0147 err_out1:
0148 mutex_unlock(&vm->lock);
0149 kfree(bo_va);
0150 err_out0:
0151 mutex_unlock(&bo->lock);
0152 return err;
0153 }
0154
0155 void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
0156 {
0157 struct lima_bo_va *bo_va;
0158 u32 size;
0159
0160 mutex_lock(&bo->lock);
0161
0162 bo_va = lima_vm_bo_find(vm, bo);
0163 if (--bo_va->ref_count > 0) {
0164 mutex_unlock(&bo->lock);
0165 return;
0166 }
0167
0168 mutex_lock(&vm->lock);
0169
0170 size = bo->heap_size ? bo->heap_size : bo_va->node.size;
0171 lima_vm_unmap_range(vm, bo_va->node.start,
0172 bo_va->node.start + size - 1);
0173
0174 drm_mm_remove_node(&bo_va->node);
0175
0176 mutex_unlock(&vm->lock);
0177
0178 list_del(&bo_va->list);
0179
0180 mutex_unlock(&bo->lock);
0181
0182 kfree(bo_va);
0183 }
0184
0185 u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
0186 {
0187 struct lima_bo_va *bo_va;
0188 u32 ret;
0189
0190 mutex_lock(&bo->lock);
0191
0192 bo_va = lima_vm_bo_find(vm, bo);
0193 ret = bo_va->node.start;
0194
0195 mutex_unlock(&bo->lock);
0196
0197 return ret;
0198 }
0199
0200 struct lima_vm *lima_vm_create(struct lima_device *dev)
0201 {
0202 struct lima_vm *vm;
0203
0204 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
0205 if (!vm)
0206 return NULL;
0207
0208 vm->dev = dev;
0209 mutex_init(&vm->lock);
0210 kref_init(&vm->refcount);
0211
0212 vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
0213 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
0214 if (!vm->pd.cpu)
0215 goto err_out0;
0216
0217 if (dev->dlbu_cpu) {
0218 int err = lima_vm_map_page(
0219 vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU);
0220 if (err)
0221 goto err_out1;
0222 }
0223
0224 drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
0225
0226 return vm;
0227
0228 err_out1:
0229 dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
0230 err_out0:
0231 kfree(vm);
0232 return NULL;
0233 }
0234
0235 void lima_vm_release(struct kref *kref)
0236 {
0237 struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
0238 int i;
0239
0240 drm_mm_takedown(&vm->mm);
0241
0242 for (i = 0; i < LIMA_VM_NUM_BT; i++) {
0243 if (vm->bts[i].cpu)
0244 dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
0245 vm->bts[i].cpu, vm->bts[i].dma);
0246 }
0247
0248 if (vm->pd.cpu)
0249 dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
0250
0251 kfree(vm);
0252 }
0253
0254 void lima_vm_print(struct lima_vm *vm)
0255 {
0256 int i, j, k;
0257 u32 *pd, *pt;
0258
0259 if (!vm->pd.cpu)
0260 return;
0261
0262 pd = vm->pd.cpu;
0263 for (i = 0; i < LIMA_VM_NUM_BT; i++) {
0264 if (!vm->bts[i].cpu)
0265 continue;
0266
0267 pt = vm->bts[i].cpu;
0268 for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
0269 int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
0270
0271 printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
0272
0273 for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
0274 u32 pte = *pt++;
0275
0276 if (pte)
0277 printk(KERN_INFO " pt %03x:%08x\n", k, pte);
0278 }
0279 }
0280 }
0281 }
0282
0283 int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
0284 {
0285 struct lima_bo_va *bo_va;
0286 struct sg_dma_page_iter sg_iter;
0287 int offset = 0, err;
0288 u32 base;
0289
0290 mutex_lock(&bo->lock);
0291
0292 bo_va = lima_vm_bo_find(vm, bo);
0293 if (!bo_va) {
0294 err = -ENOENT;
0295 goto err_out0;
0296 }
0297
0298 mutex_lock(&vm->lock);
0299
0300 base = bo_va->node.start + (pageoff << PAGE_SHIFT);
0301 for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) {
0302 err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
0303 base + offset);
0304 if (err)
0305 goto err_out1;
0306
0307 offset += PAGE_SIZE;
0308 }
0309
0310 mutex_unlock(&vm->lock);
0311
0312 mutex_unlock(&bo->lock);
0313 return 0;
0314
0315 err_out1:
0316 if (offset)
0317 lima_vm_unmap_range(vm, base, base + offset - 1);
0318 mutex_unlock(&vm->lock);
0319 err_out0:
0320 mutex_unlock(&bo->lock);
0321 return err;
0322 }