0001
0002
0003
0004 #include <linux/mm.h>
0005 #include <linux/iosys-map.h>
0006 #include <linux/sync_file.h>
0007 #include <linux/pagemap.h>
0008 #include <linux/shmem_fs.h>
0009 #include <linux/dma-mapping.h>
0010
0011 #include <drm/drm_file.h>
0012 #include <drm/drm_syncobj.h>
0013 #include <drm/drm_utils.h>
0014
0015 #include <drm/lima_drm.h>
0016
0017 #include "lima_drv.h"
0018 #include "lima_gem.h"
0019 #include "lima_vm.h"
0020
0021 int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
0022 {
0023 struct page **pages;
0024 struct address_space *mapping = bo->base.base.filp->f_mapping;
0025 struct device *dev = bo->base.base.dev->dev;
0026 size_t old_size = bo->heap_size;
0027 size_t new_size = bo->heap_size ? bo->heap_size * 2 :
0028 (lima_heap_init_nr_pages << PAGE_SHIFT);
0029 struct sg_table sgt;
0030 int i, ret;
0031
0032 if (bo->heap_size >= bo->base.base.size)
0033 return -ENOSPC;
0034
0035 new_size = min(new_size, bo->base.base.size);
0036
0037 mutex_lock(&bo->base.pages_lock);
0038
0039 if (bo->base.pages) {
0040 pages = bo->base.pages;
0041 } else {
0042 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
0043 sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
0044 if (!pages) {
0045 mutex_unlock(&bo->base.pages_lock);
0046 return -ENOMEM;
0047 }
0048
0049 bo->base.pages = pages;
0050 bo->base.pages_use_count = 1;
0051
0052 mapping_set_unevictable(mapping);
0053 }
0054
0055 for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
0056 struct page *page = shmem_read_mapping_page(mapping, i);
0057
0058 if (IS_ERR(page)) {
0059 mutex_unlock(&bo->base.pages_lock);
0060 return PTR_ERR(page);
0061 }
0062 pages[i] = page;
0063 }
0064
0065 mutex_unlock(&bo->base.pages_lock);
0066
0067 ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
0068 new_size, GFP_KERNEL);
0069 if (ret)
0070 return ret;
0071
0072 if (bo->base.sgt) {
0073 dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
0074 sg_free_table(bo->base.sgt);
0075 } else {
0076 bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
0077 if (!bo->base.sgt) {
0078 sg_free_table(&sgt);
0079 return -ENOMEM;
0080 }
0081 }
0082
0083 ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
0084 if (ret) {
0085 sg_free_table(&sgt);
0086 kfree(bo->base.sgt);
0087 bo->base.sgt = NULL;
0088 return ret;
0089 }
0090
0091 *bo->base.sgt = sgt;
0092
0093 if (vm) {
0094 ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
0095 if (ret)
0096 return ret;
0097 }
0098
0099 bo->heap_size = new_size;
0100 return 0;
0101 }
0102
0103 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
0104 u32 size, u32 flags, u32 *handle)
0105 {
0106 int err;
0107 gfp_t mask;
0108 struct drm_gem_shmem_object *shmem;
0109 struct drm_gem_object *obj;
0110 struct lima_bo *bo;
0111 bool is_heap = flags & LIMA_BO_FLAG_HEAP;
0112
0113 shmem = drm_gem_shmem_create(dev, size);
0114 if (IS_ERR(shmem))
0115 return PTR_ERR(shmem);
0116
0117 obj = &shmem->base;
0118
0119
0120 mask = mapping_gfp_mask(obj->filp->f_mapping);
0121 mask &= ~__GFP_HIGHMEM;
0122 mask |= __GFP_DMA32;
0123 mapping_set_gfp_mask(obj->filp->f_mapping, mask);
0124
0125 if (is_heap) {
0126 bo = to_lima_bo(obj);
0127 err = lima_heap_alloc(bo, NULL);
0128 if (err)
0129 goto out;
0130 } else {
0131 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem);
0132
0133 if (IS_ERR(sgt)) {
0134 err = PTR_ERR(sgt);
0135 goto out;
0136 }
0137 }
0138
0139 err = drm_gem_handle_create(file, obj, handle);
0140
0141 out:
0142
0143 drm_gem_object_put(obj);
0144
0145 return err;
0146 }
0147
0148 static void lima_gem_free_object(struct drm_gem_object *obj)
0149 {
0150 struct lima_bo *bo = to_lima_bo(obj);
0151
0152 if (!list_empty(&bo->va))
0153 dev_err(obj->dev->dev, "lima gem free bo still has va\n");
0154
0155 drm_gem_shmem_free(&bo->base);
0156 }
0157
0158 static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
0159 {
0160 struct lima_bo *bo = to_lima_bo(obj);
0161 struct lima_drm_priv *priv = to_lima_drm_priv(file);
0162 struct lima_vm *vm = priv->vm;
0163
0164 return lima_vm_bo_add(vm, bo, true);
0165 }
0166
0167 static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
0168 {
0169 struct lima_bo *bo = to_lima_bo(obj);
0170 struct lima_drm_priv *priv = to_lima_drm_priv(file);
0171 struct lima_vm *vm = priv->vm;
0172
0173 lima_vm_bo_del(vm, bo);
0174 }
0175
0176 static int lima_gem_pin(struct drm_gem_object *obj)
0177 {
0178 struct lima_bo *bo = to_lima_bo(obj);
0179
0180 if (bo->heap_size)
0181 return -EINVAL;
0182
0183 return drm_gem_shmem_pin(&bo->base);
0184 }
0185
0186 static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
0187 {
0188 struct lima_bo *bo = to_lima_bo(obj);
0189
0190 if (bo->heap_size)
0191 return -EINVAL;
0192
0193 return drm_gem_shmem_vmap(&bo->base, map);
0194 }
0195
0196 static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
0197 {
0198 struct lima_bo *bo = to_lima_bo(obj);
0199
0200 if (bo->heap_size)
0201 return -EINVAL;
0202
0203 return drm_gem_shmem_mmap(&bo->base, vma);
0204 }
0205
0206 static const struct drm_gem_object_funcs lima_gem_funcs = {
0207 .free = lima_gem_free_object,
0208 .open = lima_gem_object_open,
0209 .close = lima_gem_object_close,
0210 .print_info = drm_gem_shmem_object_print_info,
0211 .pin = lima_gem_pin,
0212 .unpin = drm_gem_shmem_object_unpin,
0213 .get_sg_table = drm_gem_shmem_object_get_sg_table,
0214 .vmap = lima_gem_vmap,
0215 .vunmap = drm_gem_shmem_object_vunmap,
0216 .mmap = lima_gem_mmap,
0217 .vm_ops = &drm_gem_shmem_vm_ops,
0218 };
0219
0220 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
0221 {
0222 struct lima_bo *bo;
0223
0224 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
0225 if (!bo)
0226 return ERR_PTR(-ENOMEM);
0227
0228 mutex_init(&bo->lock);
0229 INIT_LIST_HEAD(&bo->va);
0230 bo->base.map_wc = true;
0231 bo->base.base.funcs = &lima_gem_funcs;
0232
0233 return &bo->base.base;
0234 }
0235
0236 int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
0237 {
0238 struct drm_gem_object *obj;
0239 struct lima_bo *bo;
0240 struct lima_drm_priv *priv = to_lima_drm_priv(file);
0241 struct lima_vm *vm = priv->vm;
0242
0243 obj = drm_gem_object_lookup(file, handle);
0244 if (!obj)
0245 return -ENOENT;
0246
0247 bo = to_lima_bo(obj);
0248
0249 *va = lima_vm_get_va(vm, bo);
0250
0251 *offset = drm_vma_node_offset_addr(&obj->vma_node);
0252
0253 drm_gem_object_put(obj);
0254 return 0;
0255 }
0256
0257 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
0258 bool write, bool explicit)
0259 {
0260 int err;
0261
0262 err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
0263 if (err)
0264 return err;
0265
0266
0267 if (explicit)
0268 return 0;
0269
0270 return drm_sched_job_add_implicit_dependencies(&task->base,
0271 &bo->base.base,
0272 write);
0273 }
0274
0275 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
0276 {
0277 int i, err;
0278
0279 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
0280 struct dma_fence *fence = NULL;
0281
0282 if (!submit->in_sync[i])
0283 continue;
0284
0285 err = drm_syncobj_find_fence(file, submit->in_sync[i],
0286 0, 0, &fence);
0287 if (err)
0288 return err;
0289
0290 err = drm_sched_job_add_dependency(&submit->task->base, fence);
0291 if (err) {
0292 dma_fence_put(fence);
0293 return err;
0294 }
0295 }
0296
0297 return 0;
0298 }
0299
0300 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
0301 {
0302 int i, err = 0;
0303 struct ww_acquire_ctx ctx;
0304 struct lima_drm_priv *priv = to_lima_drm_priv(file);
0305 struct lima_vm *vm = priv->vm;
0306 struct drm_syncobj *out_sync = NULL;
0307 struct dma_fence *fence;
0308 struct lima_bo **bos = submit->lbos;
0309
0310 if (submit->out_sync) {
0311 out_sync = drm_syncobj_find(file, submit->out_sync);
0312 if (!out_sync)
0313 return -ENOENT;
0314 }
0315
0316 for (i = 0; i < submit->nr_bos; i++) {
0317 struct drm_gem_object *obj;
0318 struct lima_bo *bo;
0319
0320 obj = drm_gem_object_lookup(file, submit->bos[i].handle);
0321 if (!obj) {
0322 err = -ENOENT;
0323 goto err_out0;
0324 }
0325
0326 bo = to_lima_bo(obj);
0327
0328
0329
0330
0331 err = lima_vm_bo_add(vm, bo, false);
0332 if (err) {
0333 drm_gem_object_put(obj);
0334 goto err_out0;
0335 }
0336
0337 bos[i] = bo;
0338 }
0339
0340 err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
0341 submit->nr_bos, &ctx);
0342 if (err)
0343 goto err_out0;
0344
0345 err = lima_sched_task_init(
0346 submit->task, submit->ctx->context + submit->pipe,
0347 bos, submit->nr_bos, vm);
0348 if (err)
0349 goto err_out1;
0350
0351 err = lima_gem_add_deps(file, submit);
0352 if (err)
0353 goto err_out2;
0354
0355 for (i = 0; i < submit->nr_bos; i++) {
0356 err = lima_gem_sync_bo(
0357 submit->task, bos[i],
0358 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
0359 submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
0360 if (err)
0361 goto err_out2;
0362 }
0363
0364 fence = lima_sched_context_queue_task(submit->task);
0365
0366 for (i = 0; i < submit->nr_bos; i++) {
0367 dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
0368 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
0369 DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
0370 }
0371
0372 drm_gem_unlock_reservations((struct drm_gem_object **)bos,
0373 submit->nr_bos, &ctx);
0374
0375 for (i = 0; i < submit->nr_bos; i++)
0376 drm_gem_object_put(&bos[i]->base.base);
0377
0378 if (out_sync) {
0379 drm_syncobj_replace_fence(out_sync, fence);
0380 drm_syncobj_put(out_sync);
0381 }
0382
0383 dma_fence_put(fence);
0384
0385 return 0;
0386
0387 err_out2:
0388 lima_sched_task_fini(submit->task);
0389 err_out1:
0390 drm_gem_unlock_reservations((struct drm_gem_object **)bos,
0391 submit->nr_bos, &ctx);
0392 err_out0:
0393 for (i = 0; i < submit->nr_bos; i++) {
0394 if (!bos[i])
0395 break;
0396 lima_vm_bo_del(vm, bos[i]);
0397 drm_gem_object_put(&bos[i]->base.base);
0398 }
0399 if (out_sync)
0400 drm_syncobj_put(out_sync);
0401 return err;
0402 }
0403
0404 int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
0405 {
0406 bool write = op & LIMA_GEM_WAIT_WRITE;
0407 long ret, timeout;
0408
0409 if (!op)
0410 return 0;
0411
0412 timeout = drm_timeout_abs_to_jiffies(timeout_ns);
0413
0414 ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
0415 if (ret == -ETIME)
0416 ret = timeout ? -ETIMEDOUT : -EBUSY;
0417
0418 return ret;
0419 }