0001
0002
0003
0004
0005
0006 #include <drm/drm_file.h>
0007 #include <linux/dma-fence-array.h>
0008 #include <linux/file.h>
0009 #include <linux/pm_runtime.h>
0010 #include <linux/dma-resv.h>
0011 #include <linux/sync_file.h>
0012 #include <linux/uaccess.h>
0013 #include <linux/vmalloc.h>
0014
0015 #include "etnaviv_cmdbuf.h"
0016 #include "etnaviv_drv.h"
0017 #include "etnaviv_gpu.h"
0018 #include "etnaviv_gem.h"
0019 #include "etnaviv_perfmon.h"
0020 #include "etnaviv_sched.h"
0021
0022
0023
0024
0025
0026 #define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
0027
0028 #define BO_LOCKED 0x4000
0029 #define BO_PINNED 0x2000
0030
0031 static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
0032 struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
0033 {
0034 struct etnaviv_gem_submit *submit;
0035 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
0036
0037 submit = kzalloc(sz, GFP_KERNEL);
0038 if (!submit)
0039 return NULL;
0040
0041 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
0042 GFP_KERNEL);
0043 if (!submit->pmrs) {
0044 kfree(submit);
0045 return NULL;
0046 }
0047 submit->nr_pmrs = nr_pmrs;
0048
0049 submit->gpu = gpu;
0050 kref_init(&submit->refcount);
0051
0052 return submit;
0053 }
0054
0055 static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
0056 struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
0057 unsigned nr_bos)
0058 {
0059 struct drm_etnaviv_gem_submit_bo *bo;
0060 unsigned i;
0061 int ret = 0;
0062
0063 spin_lock(&file->table_lock);
0064
0065 for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
0066 struct drm_gem_object *obj;
0067
0068 if (bo->flags & BO_INVALID_FLAGS) {
0069 DRM_ERROR("invalid flags: %x\n", bo->flags);
0070 ret = -EINVAL;
0071 goto out_unlock;
0072 }
0073
0074 submit->bos[i].flags = bo->flags;
0075 if (submit->flags & ETNA_SUBMIT_SOFTPIN) {
0076 if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) {
0077 DRM_ERROR("invalid softpin address\n");
0078 ret = -EINVAL;
0079 goto out_unlock;
0080 }
0081 submit->bos[i].va = bo->presumed;
0082 }
0083
0084
0085
0086
0087 obj = idr_find(&file->object_idr, bo->handle);
0088 if (!obj) {
0089 DRM_ERROR("invalid handle %u at index %u\n",
0090 bo->handle, i);
0091 ret = -EINVAL;
0092 goto out_unlock;
0093 }
0094
0095
0096
0097
0098
0099 drm_gem_object_get(obj);
0100
0101 submit->bos[i].obj = to_etnaviv_bo(obj);
0102 }
0103
0104 out_unlock:
0105 submit->nr_bos = i;
0106 spin_unlock(&file->table_lock);
0107
0108 return ret;
0109 }
0110
0111 static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
0112 {
0113 if (submit->bos[i].flags & BO_LOCKED) {
0114 struct drm_gem_object *obj = &submit->bos[i].obj->base;
0115
0116 dma_resv_unlock(obj->resv);
0117 submit->bos[i].flags &= ~BO_LOCKED;
0118 }
0119 }
0120
0121 static int submit_lock_objects(struct etnaviv_gem_submit *submit,
0122 struct ww_acquire_ctx *ticket)
0123 {
0124 int contended, slow_locked = -1, i, ret = 0;
0125
0126 retry:
0127 for (i = 0; i < submit->nr_bos; i++) {
0128 struct drm_gem_object *obj = &submit->bos[i].obj->base;
0129
0130 if (slow_locked == i)
0131 slow_locked = -1;
0132
0133 contended = i;
0134
0135 if (!(submit->bos[i].flags & BO_LOCKED)) {
0136 ret = dma_resv_lock_interruptible(obj->resv, ticket);
0137 if (ret == -EALREADY)
0138 DRM_ERROR("BO at index %u already on submit list\n",
0139 i);
0140 if (ret)
0141 goto fail;
0142 submit->bos[i].flags |= BO_LOCKED;
0143 }
0144 }
0145
0146 ww_acquire_done(ticket);
0147
0148 return 0;
0149
0150 fail:
0151 for (; i >= 0; i--)
0152 submit_unlock_object(submit, i);
0153
0154 if (slow_locked > 0)
0155 submit_unlock_object(submit, slow_locked);
0156
0157 if (ret == -EDEADLK) {
0158 struct drm_gem_object *obj;
0159
0160 obj = &submit->bos[contended].obj->base;
0161
0162
0163 ret = dma_resv_lock_slow_interruptible(obj->resv, ticket);
0164 if (!ret) {
0165 submit->bos[contended].flags |= BO_LOCKED;
0166 slow_locked = contended;
0167 goto retry;
0168 }
0169 }
0170
0171 return ret;
0172 }
0173
0174 static int submit_fence_sync(struct etnaviv_gem_submit *submit)
0175 {
0176 int i, ret = 0;
0177
0178 for (i = 0; i < submit->nr_bos; i++) {
0179 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
0180 struct dma_resv *robj = bo->obj->base.resv;
0181
0182 ret = dma_resv_reserve_fences(robj, 1);
0183 if (ret)
0184 return ret;
0185
0186 if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
0187 continue;
0188
0189 ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
0190 &bo->obj->base,
0191 bo->flags & ETNA_SUBMIT_BO_WRITE);
0192 if (ret)
0193 return ret;
0194 }
0195
0196 return ret;
0197 }
0198
0199 static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
0200 {
0201 int i;
0202
0203 for (i = 0; i < submit->nr_bos; i++) {
0204 struct drm_gem_object *obj = &submit->bos[i].obj->base;
0205 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
0206
0207 dma_resv_add_fence(obj->resv, submit->out_fence, write ?
0208 DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
0209 submit_unlock_object(submit, i);
0210 }
0211 }
0212
0213 static int submit_pin_objects(struct etnaviv_gem_submit *submit)
0214 {
0215 int i, ret = 0;
0216
0217 for (i = 0; i < submit->nr_bos; i++) {
0218 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
0219 struct etnaviv_vram_mapping *mapping;
0220
0221 mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base,
0222 submit->mmu_context,
0223 submit->bos[i].va);
0224 if (IS_ERR(mapping)) {
0225 ret = PTR_ERR(mapping);
0226 break;
0227 }
0228
0229 if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
0230 submit->bos[i].va != mapping->iova) {
0231 etnaviv_gem_mapping_unreference(mapping);
0232 return -EINVAL;
0233 }
0234
0235 atomic_inc(&etnaviv_obj->gpu_active);
0236
0237 submit->bos[i].flags |= BO_PINNED;
0238 submit->bos[i].mapping = mapping;
0239 }
0240
0241 return ret;
0242 }
0243
0244 static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
0245 struct etnaviv_gem_submit_bo **bo)
0246 {
0247 if (idx >= submit->nr_bos) {
0248 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
0249 idx, submit->nr_bos);
0250 return -EINVAL;
0251 }
0252
0253 *bo = &submit->bos[idx];
0254
0255 return 0;
0256 }
0257
0258
0259 static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
0260 u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
0261 u32 nr_relocs)
0262 {
0263 u32 i, last_offset = 0;
0264 u32 *ptr = stream;
0265 int ret;
0266
0267
0268 if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0)
0269 return -EINVAL;
0270
0271 for (i = 0; i < nr_relocs; i++) {
0272 const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
0273 struct etnaviv_gem_submit_bo *bo;
0274 u32 off;
0275
0276 if (unlikely(r->flags)) {
0277 DRM_ERROR("invalid reloc flags\n");
0278 return -EINVAL;
0279 }
0280
0281 if (r->submit_offset % 4) {
0282 DRM_ERROR("non-aligned reloc offset: %u\n",
0283 r->submit_offset);
0284 return -EINVAL;
0285 }
0286
0287
0288 off = r->submit_offset / 4;
0289
0290 if ((off >= size ) ||
0291 (off < last_offset)) {
0292 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
0293 return -EINVAL;
0294 }
0295
0296 ret = submit_bo(submit, r->reloc_idx, &bo);
0297 if (ret)
0298 return ret;
0299
0300 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
0301 DRM_ERROR("relocation %u outside object\n", i);
0302 return -EINVAL;
0303 }
0304
0305 ptr[off] = bo->mapping->iova + r->reloc_offset;
0306
0307 last_offset = off;
0308 }
0309
0310 return 0;
0311 }
0312
0313 static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
0314 u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
0315 {
0316 u32 i;
0317
0318 for (i = 0; i < submit->nr_pmrs; i++) {
0319 const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
0320 struct etnaviv_gem_submit_bo *bo;
0321 int ret;
0322
0323 ret = submit_bo(submit, r->read_idx, &bo);
0324 if (ret)
0325 return ret;
0326
0327
0328 if (r->read_offset == 0) {
0329 DRM_ERROR("perfmon request: offset is 0");
0330 return -EINVAL;
0331 }
0332
0333 if (r->read_offset >= bo->obj->base.size - sizeof(u32)) {
0334 DRM_ERROR("perfmon request: offset %u outside object", i);
0335 return -EINVAL;
0336 }
0337
0338 if (r->flags & ~(ETNA_PM_PROCESS_PRE | ETNA_PM_PROCESS_POST)) {
0339 DRM_ERROR("perfmon request: flags are not valid");
0340 return -EINVAL;
0341 }
0342
0343 if (etnaviv_pm_req_validate(r, exec_state)) {
0344 DRM_ERROR("perfmon request: domain or signal not valid");
0345 return -EINVAL;
0346 }
0347
0348 submit->pmrs[i].flags = r->flags;
0349 submit->pmrs[i].domain = r->domain;
0350 submit->pmrs[i].signal = r->signal;
0351 submit->pmrs[i].sequence = r->sequence;
0352 submit->pmrs[i].offset = r->read_offset;
0353 submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
0354 }
0355
0356 return 0;
0357 }
0358
0359 static void submit_cleanup(struct kref *kref)
0360 {
0361 struct etnaviv_gem_submit *submit =
0362 container_of(kref, struct etnaviv_gem_submit, refcount);
0363 unsigned i;
0364
0365 if (submit->runtime_resumed)
0366 pm_runtime_put_autosuspend(submit->gpu->dev);
0367
0368 if (submit->cmdbuf.suballoc)
0369 etnaviv_cmdbuf_free(&submit->cmdbuf);
0370
0371 if (submit->mmu_context)
0372 etnaviv_iommu_context_put(submit->mmu_context);
0373
0374 if (submit->prev_mmu_context)
0375 etnaviv_iommu_context_put(submit->prev_mmu_context);
0376
0377 for (i = 0; i < submit->nr_bos; i++) {
0378 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
0379
0380
0381 if (submit->bos[i].flags & BO_PINNED) {
0382 etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
0383 atomic_dec(&etnaviv_obj->gpu_active);
0384 submit->bos[i].mapping = NULL;
0385 submit->bos[i].flags &= ~BO_PINNED;
0386 }
0387
0388
0389 submit_unlock_object(submit, i);
0390 drm_gem_object_put(&etnaviv_obj->base);
0391 }
0392
0393 wake_up_all(&submit->gpu->fence_event);
0394
0395 if (submit->out_fence) {
0396
0397 mutex_lock(&submit->gpu->fence_lock);
0398 idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
0399 mutex_unlock(&submit->gpu->fence_lock);
0400 dma_fence_put(submit->out_fence);
0401 }
0402 kfree(submit->pmrs);
0403 kfree(submit);
0404 }
0405
0406 void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
0407 {
0408 kref_put(&submit->refcount, submit_cleanup);
0409 }
0410
0411 int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
0412 struct drm_file *file)
0413 {
0414 struct etnaviv_file_private *ctx = file->driver_priv;
0415 struct etnaviv_drm_private *priv = dev->dev_private;
0416 struct drm_etnaviv_gem_submit *args = data;
0417 struct drm_etnaviv_gem_submit_reloc *relocs;
0418 struct drm_etnaviv_gem_submit_pmr *pmrs;
0419 struct drm_etnaviv_gem_submit_bo *bos;
0420 struct etnaviv_gem_submit *submit;
0421 struct etnaviv_gpu *gpu;
0422 struct sync_file *sync_file = NULL;
0423 struct ww_acquire_ctx ticket;
0424 int out_fence_fd = -1;
0425 void *stream;
0426 int ret;
0427
0428 if (args->pipe >= ETNA_MAX_PIPES)
0429 return -EINVAL;
0430
0431 gpu = priv->gpu[args->pipe];
0432 if (!gpu)
0433 return -ENXIO;
0434
0435 if (args->stream_size % 4) {
0436 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
0437 args->stream_size);
0438 return -EINVAL;
0439 }
0440
0441 if (args->exec_state != ETNA_PIPE_3D &&
0442 args->exec_state != ETNA_PIPE_2D &&
0443 args->exec_state != ETNA_PIPE_VG) {
0444 DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
0445 return -EINVAL;
0446 }
0447
0448 if (args->flags & ~ETNA_SUBMIT_FLAGS) {
0449 DRM_ERROR("invalid flags: 0x%x\n", args->flags);
0450 return -EINVAL;
0451 }
0452
0453 if ((args->flags & ETNA_SUBMIT_SOFTPIN) &&
0454 priv->mmu_global->version != ETNAVIV_IOMMU_V2) {
0455 DRM_ERROR("softpin requested on incompatible MMU\n");
0456 return -EINVAL;
0457 }
0458
0459 if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
0460 args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
0461 DRM_ERROR("submit arguments out of size limits\n");
0462 return -EINVAL;
0463 }
0464
0465
0466
0467
0468
0469 bos = kvmalloc_array(args->nr_bos, sizeof(*bos), GFP_KERNEL);
0470 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
0471 pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
0472 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
0473 if (!bos || !relocs || !pmrs || !stream) {
0474 ret = -ENOMEM;
0475 goto err_submit_cmds;
0476 }
0477
0478 ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
0479 args->nr_bos * sizeof(*bos));
0480 if (ret) {
0481 ret = -EFAULT;
0482 goto err_submit_cmds;
0483 }
0484
0485 ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
0486 args->nr_relocs * sizeof(*relocs));
0487 if (ret) {
0488 ret = -EFAULT;
0489 goto err_submit_cmds;
0490 }
0491
0492 ret = copy_from_user(pmrs, u64_to_user_ptr(args->pmrs),
0493 args->nr_pmrs * sizeof(*pmrs));
0494 if (ret) {
0495 ret = -EFAULT;
0496 goto err_submit_cmds;
0497 }
0498
0499 ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
0500 args->stream_size);
0501 if (ret) {
0502 ret = -EFAULT;
0503 goto err_submit_cmds;
0504 }
0505
0506 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
0507 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
0508 if (out_fence_fd < 0) {
0509 ret = out_fence_fd;
0510 goto err_submit_cmds;
0511 }
0512 }
0513
0514 ww_acquire_init(&ticket, &reservation_ww_class);
0515
0516 submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
0517 if (!submit) {
0518 ret = -ENOMEM;
0519 goto err_submit_ww_acquire;
0520 }
0521
0522 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
0523 ALIGN(args->stream_size, 8) + 8);
0524 if (ret)
0525 goto err_submit_put;
0526
0527 submit->ctx = file->driver_priv;
0528 submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
0529 submit->exec_state = args->exec_state;
0530 submit->flags = args->flags;
0531
0532 ret = drm_sched_job_init(&submit->sched_job,
0533 &ctx->sched_entity[args->pipe],
0534 submit->ctx);
0535 if (ret)
0536 goto err_submit_put;
0537
0538 ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
0539 if (ret)
0540 goto err_submit_job;
0541
0542 if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
0543 !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
0544 relocs, args->nr_relocs)) {
0545 ret = -EINVAL;
0546 goto err_submit_job;
0547 }
0548
0549 if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
0550 struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
0551 if (!in_fence) {
0552 ret = -EINVAL;
0553 goto err_submit_job;
0554 }
0555
0556 ret = drm_sched_job_add_dependency(&submit->sched_job,
0557 in_fence);
0558 if (ret)
0559 goto err_submit_job;
0560 }
0561
0562 ret = submit_pin_objects(submit);
0563 if (ret)
0564 goto err_submit_job;
0565
0566 ret = submit_reloc(submit, stream, args->stream_size / 4,
0567 relocs, args->nr_relocs);
0568 if (ret)
0569 goto err_submit_job;
0570
0571 ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
0572 if (ret)
0573 goto err_submit_job;
0574
0575 memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
0576
0577 ret = submit_lock_objects(submit, &ticket);
0578 if (ret)
0579 goto err_submit_job;
0580
0581 ret = submit_fence_sync(submit);
0582 if (ret)
0583 goto err_submit_job;
0584
0585 ret = etnaviv_sched_push_job(submit);
0586 if (ret)
0587 goto err_submit_job;
0588
0589 submit_attach_object_fences(submit);
0590
0591 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
0592
0593
0594
0595
0596
0597
0598 sync_file = sync_file_create(submit->out_fence);
0599 if (!sync_file) {
0600 ret = -ENOMEM;
0601
0602
0603
0604
0605
0606 goto err_submit_put;
0607 }
0608 fd_install(out_fence_fd, sync_file->file);
0609 }
0610
0611 args->fence_fd = out_fence_fd;
0612 args->fence = submit->out_fence_id;
0613
0614 err_submit_job:
0615 if (ret)
0616 drm_sched_job_cleanup(&submit->sched_job);
0617 err_submit_put:
0618 etnaviv_submit_put(submit);
0619
0620 err_submit_ww_acquire:
0621 ww_acquire_fini(&ticket);
0622
0623 err_submit_cmds:
0624 if (ret && (out_fence_fd >= 0))
0625 put_unused_fd(out_fence_fd);
0626 kvfree(stream);
0627 kvfree(bos);
0628 kvfree(relocs);
0629 kvfree(pmrs);
0630
0631 return ret;
0632 }