Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2013 Red Hat
0004  * Author: Rob Clark <robdclark@gmail.com>
0005  */
0006 
0007 #include <linux/file.h>
0008 #include <linux/sync_file.h>
0009 #include <linux/uaccess.h>
0010 
0011 #include <drm/drm_drv.h>
0012 #include <drm/drm_file.h>
0013 #include <drm/drm_syncobj.h>
0014 
0015 #include "msm_drv.h"
0016 #include "msm_gpu.h"
0017 #include "msm_gem.h"
0018 #include "msm_gpu_trace.h"
0019 
0020 /*
0021  * Cmdstream submission:
0022  */
0023 
0024 static struct msm_gem_submit *submit_create(struct drm_device *dev,
0025         struct msm_gpu *gpu,
0026         struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
0027         uint32_t nr_cmds)
0028 {
0029     struct msm_gem_submit *submit;
0030     uint64_t sz;
0031     int ret;
0032 
0033     sz = struct_size(submit, bos, nr_bos) +
0034             ((u64)nr_cmds * sizeof(submit->cmd[0]));
0035 
0036     if (sz > SIZE_MAX)
0037         return ERR_PTR(-ENOMEM);
0038 
0039     submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
0040     if (!submit)
0041         return ERR_PTR(-ENOMEM);
0042 
0043     ret = drm_sched_job_init(&submit->base, queue->entity, queue);
0044     if (ret) {
0045         kfree(submit);
0046         return ERR_PTR(ret);
0047     }
0048 
0049     kref_init(&submit->ref);
0050     submit->dev = dev;
0051     submit->aspace = queue->ctx->aspace;
0052     submit->gpu = gpu;
0053     submit->cmd = (void *)&submit->bos[nr_bos];
0054     submit->queue = queue;
0055     submit->ring = gpu->rb[queue->ring_nr];
0056     submit->fault_dumped = false;
0057 
0058     INIT_LIST_HEAD(&submit->node);
0059 
0060     return submit;
0061 }
0062 
0063 void __msm_gem_submit_destroy(struct kref *kref)
0064 {
0065     struct msm_gem_submit *submit =
0066             container_of(kref, struct msm_gem_submit, ref);
0067     unsigned i;
0068 
0069     if (submit->fence_id) {
0070         mutex_lock(&submit->queue->lock);
0071         idr_remove(&submit->queue->fence_idr, submit->fence_id);
0072         mutex_unlock(&submit->queue->lock);
0073     }
0074 
0075     dma_fence_put(submit->user_fence);
0076     dma_fence_put(submit->hw_fence);
0077 
0078     put_pid(submit->pid);
0079     msm_submitqueue_put(submit->queue);
0080 
0081     for (i = 0; i < submit->nr_cmds; i++)
0082         kfree(submit->cmd[i].relocs);
0083 
0084     kfree(submit);
0085 }
0086 
0087 static int submit_lookup_objects(struct msm_gem_submit *submit,
0088         struct drm_msm_gem_submit *args, struct drm_file *file)
0089 {
0090     unsigned i;
0091     int ret = 0;
0092 
0093     for (i = 0; i < args->nr_bos; i++) {
0094         struct drm_msm_gem_submit_bo submit_bo;
0095         void __user *userptr =
0096             u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
0097 
0098         /* make sure we don't have garbage flags, in case we hit
0099          * error path before flags is initialized:
0100          */
0101         submit->bos[i].flags = 0;
0102 
0103         if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
0104             ret = -EFAULT;
0105             i = 0;
0106             goto out;
0107         }
0108 
0109 /* at least one of READ and/or WRITE flags should be set: */
0110 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
0111 
0112         if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
0113             !(submit_bo.flags & MANDATORY_FLAGS)) {
0114             DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
0115             ret = -EINVAL;
0116             i = 0;
0117             goto out;
0118         }
0119 
0120         submit->bos[i].handle = submit_bo.handle;
0121         submit->bos[i].flags = submit_bo.flags;
0122         /* in validate_objects() we figure out if this is true: */
0123         submit->bos[i].iova  = submit_bo.presumed;
0124     }
0125 
0126     spin_lock(&file->table_lock);
0127 
0128     for (i = 0; i < args->nr_bos; i++) {
0129         struct drm_gem_object *obj;
0130 
0131         /* normally use drm_gem_object_lookup(), but for bulk lookup
0132          * all under single table_lock just hit object_idr directly:
0133          */
0134         obj = idr_find(&file->object_idr, submit->bos[i].handle);
0135         if (!obj) {
0136             DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
0137             ret = -EINVAL;
0138             goto out_unlock;
0139         }
0140 
0141         drm_gem_object_get(obj);
0142 
0143         submit->bos[i].obj = to_msm_bo(obj);
0144     }
0145 
0146 out_unlock:
0147     spin_unlock(&file->table_lock);
0148 
0149 out:
0150     submit->nr_bos = i;
0151 
0152     return ret;
0153 }
0154 
0155 static int submit_lookup_cmds(struct msm_gem_submit *submit,
0156         struct drm_msm_gem_submit *args, struct drm_file *file)
0157 {
0158     unsigned i;
0159     size_t sz;
0160     int ret = 0;
0161 
0162     for (i = 0; i < args->nr_cmds; i++) {
0163         struct drm_msm_gem_submit_cmd submit_cmd;
0164         void __user *userptr =
0165             u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
0166 
0167         ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
0168         if (ret) {
0169             ret = -EFAULT;
0170             goto out;
0171         }
0172 
0173         /* validate input from userspace: */
0174         switch (submit_cmd.type) {
0175         case MSM_SUBMIT_CMD_BUF:
0176         case MSM_SUBMIT_CMD_IB_TARGET_BUF:
0177         case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
0178             break;
0179         default:
0180             DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
0181             return -EINVAL;
0182         }
0183 
0184         if (submit_cmd.size % 4) {
0185             DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
0186                     submit_cmd.size);
0187             ret = -EINVAL;
0188             goto out;
0189         }
0190 
0191         submit->cmd[i].type = submit_cmd.type;
0192         submit->cmd[i].size = submit_cmd.size / 4;
0193         submit->cmd[i].offset = submit_cmd.submit_offset / 4;
0194         submit->cmd[i].idx  = submit_cmd.submit_idx;
0195         submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
0196 
0197         userptr = u64_to_user_ptr(submit_cmd.relocs);
0198 
0199         sz = array_size(submit_cmd.nr_relocs,
0200                 sizeof(struct drm_msm_gem_submit_reloc));
0201         /* check for overflow: */
0202         if (sz == SIZE_MAX) {
0203             ret = -ENOMEM;
0204             goto out;
0205         }
0206         submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
0207         ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
0208         if (ret) {
0209             ret = -EFAULT;
0210             goto out;
0211         }
0212     }
0213 
0214 out:
0215     return ret;
0216 }
0217 
0218 /* Unwind bo state, according to cleanup_flags.  In the success case, only
0219  * the lock is dropped at the end of the submit (and active/pin ref is dropped
0220  * later when the submit is retired).
0221  */
0222 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
0223         unsigned cleanup_flags)
0224 {
0225     struct drm_gem_object *obj = &submit->bos[i].obj->base;
0226     unsigned flags = submit->bos[i].flags & cleanup_flags;
0227 
0228     /*
0229      * Clear flags bit before dropping lock, so that the msm_job_run()
0230      * path isn't racing with submit_cleanup() (ie. the read/modify/
0231      * write is protected by the obj lock in all paths)
0232      */
0233     submit->bos[i].flags &= ~cleanup_flags;
0234 
0235     if (flags & BO_VMA_PINNED)
0236         msm_gem_unpin_vma(submit->bos[i].vma);
0237 
0238     if (flags & BO_OBJ_PINNED)
0239         msm_gem_unpin_locked(obj);
0240 
0241     if (flags & BO_ACTIVE)
0242         msm_gem_active_put(obj);
0243 
0244     if (flags & BO_LOCKED)
0245         dma_resv_unlock(obj->resv);
0246 }
0247 
0248 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
0249 {
0250     unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED |
0251                  BO_ACTIVE | BO_LOCKED;
0252     submit_cleanup_bo(submit, i, cleanup_flags);
0253 
0254     if (!(submit->bos[i].flags & BO_VALID))
0255         submit->bos[i].iova = 0;
0256 }
0257 
0258 /* This is where we make sure all the bo's are reserved and pin'd: */
0259 static int submit_lock_objects(struct msm_gem_submit *submit)
0260 {
0261     int contended, slow_locked = -1, i, ret = 0;
0262 
0263 retry:
0264     for (i = 0; i < submit->nr_bos; i++) {
0265         struct msm_gem_object *msm_obj = submit->bos[i].obj;
0266 
0267         if (slow_locked == i)
0268             slow_locked = -1;
0269 
0270         contended = i;
0271 
0272         if (!(submit->bos[i].flags & BO_LOCKED)) {
0273             ret = dma_resv_lock_interruptible(msm_obj->base.resv,
0274                               &submit->ticket);
0275             if (ret)
0276                 goto fail;
0277             submit->bos[i].flags |= BO_LOCKED;
0278         }
0279     }
0280 
0281     ww_acquire_done(&submit->ticket);
0282 
0283     return 0;
0284 
0285 fail:
0286     if (ret == -EALREADY) {
0287         DRM_ERROR("handle %u at index %u already on submit list\n",
0288                 submit->bos[i].handle, i);
0289         ret = -EINVAL;
0290     }
0291 
0292     for (; i >= 0; i--)
0293         submit_unlock_unpin_bo(submit, i);
0294 
0295     if (slow_locked > 0)
0296         submit_unlock_unpin_bo(submit, slow_locked);
0297 
0298     if (ret == -EDEADLK) {
0299         struct msm_gem_object *msm_obj = submit->bos[contended].obj;
0300         /* we lost out in a seqno race, lock and retry.. */
0301         ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
0302                                &submit->ticket);
0303         if (!ret) {
0304             submit->bos[contended].flags |= BO_LOCKED;
0305             slow_locked = contended;
0306             goto retry;
0307         }
0308 
0309         /* Not expecting -EALREADY here, if the bo was already
0310          * locked, we should have gotten -EALREADY already from
0311          * the dma_resv_lock_interruptable() call.
0312          */
0313         WARN_ON_ONCE(ret == -EALREADY);
0314     }
0315 
0316     return ret;
0317 }
0318 
0319 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
0320 {
0321     int i, ret = 0;
0322 
0323     for (i = 0; i < submit->nr_bos; i++) {
0324         struct drm_gem_object *obj = &submit->bos[i].obj->base;
0325         bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
0326 
0327         /* NOTE: _reserve_shared() must happen before
0328          * _add_shared_fence(), which makes this a slightly
0329          * strange place to call it.  OTOH this is a
0330          * convenient can-fail point to hook it in.
0331          */
0332         ret = dma_resv_reserve_fences(obj->resv, 1);
0333         if (ret)
0334             return ret;
0335 
0336         /* exclusive fences must be ordered */
0337         if (no_implicit && !write)
0338             continue;
0339 
0340         ret = drm_sched_job_add_implicit_dependencies(&submit->base,
0341                                   obj,
0342                                   write);
0343         if (ret)
0344             break;
0345     }
0346 
0347     return ret;
0348 }
0349 
0350 static int submit_pin_objects(struct msm_gem_submit *submit)
0351 {
0352     int i, ret = 0;
0353 
0354     submit->valid = true;
0355 
0356     /*
0357      * Increment active_count first, so if under memory pressure, we
0358      * don't inadvertently evict a bo needed by the submit in order
0359      * to pin an earlier bo in the same submit.
0360      */
0361     for (i = 0; i < submit->nr_bos; i++) {
0362         struct drm_gem_object *obj = &submit->bos[i].obj->base;
0363 
0364         msm_gem_active_get(obj, submit->gpu);
0365         submit->bos[i].flags |= BO_ACTIVE;
0366     }
0367 
0368     for (i = 0; i < submit->nr_bos; i++) {
0369         struct drm_gem_object *obj = &submit->bos[i].obj->base;
0370         struct msm_gem_vma *vma;
0371 
0372         /* if locking succeeded, pin bo: */
0373         vma = msm_gem_get_vma_locked(obj, submit->aspace);
0374         if (IS_ERR(vma)) {
0375             ret = PTR_ERR(vma);
0376             break;
0377         }
0378 
0379         ret = msm_gem_pin_vma_locked(obj, vma);
0380         if (ret)
0381             break;
0382 
0383         submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
0384         submit->bos[i].vma = vma;
0385 
0386         if (vma->iova == submit->bos[i].iova) {
0387             submit->bos[i].flags |= BO_VALID;
0388         } else {
0389             submit->bos[i].iova = vma->iova;
0390             /* iova changed, so address in cmdstream is not valid: */
0391             submit->bos[i].flags &= ~BO_VALID;
0392             submit->valid = false;
0393         }
0394     }
0395 
0396     return ret;
0397 }
0398 
0399 static void submit_attach_object_fences(struct msm_gem_submit *submit)
0400 {
0401     int i;
0402 
0403     for (i = 0; i < submit->nr_bos; i++) {
0404         struct drm_gem_object *obj = &submit->bos[i].obj->base;
0405 
0406         if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
0407             dma_resv_add_fence(obj->resv, submit->user_fence,
0408                        DMA_RESV_USAGE_WRITE);
0409         else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
0410             dma_resv_add_fence(obj->resv, submit->user_fence,
0411                        DMA_RESV_USAGE_READ);
0412     }
0413 }
0414 
0415 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
0416         struct msm_gem_object **obj, uint64_t *iova, bool *valid)
0417 {
0418     if (idx >= submit->nr_bos) {
0419         DRM_ERROR("invalid buffer index: %u (out of %u)\n",
0420                 idx, submit->nr_bos);
0421         return -EINVAL;
0422     }
0423 
0424     if (obj)
0425         *obj = submit->bos[idx].obj;
0426     if (iova)
0427         *iova = submit->bos[idx].iova;
0428     if (valid)
0429         *valid = !!(submit->bos[idx].flags & BO_VALID);
0430 
0431     return 0;
0432 }
0433 
0434 /* process the reloc's and patch up the cmdstream as needed: */
0435 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
0436         uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
0437 {
0438     uint32_t i, last_offset = 0;
0439     uint32_t *ptr;
0440     int ret = 0;
0441 
0442     if (!nr_relocs)
0443         return 0;
0444 
0445     if (offset % 4) {
0446         DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
0447         return -EINVAL;
0448     }
0449 
0450     /* For now, just map the entire thing.  Eventually we probably
0451      * to do it page-by-page, w/ kmap() if not vmap()d..
0452      */
0453     ptr = msm_gem_get_vaddr_locked(&obj->base);
0454 
0455     if (IS_ERR(ptr)) {
0456         ret = PTR_ERR(ptr);
0457         DBG("failed to map: %d", ret);
0458         return ret;
0459     }
0460 
0461     for (i = 0; i < nr_relocs; i++) {
0462         struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
0463         uint32_t off;
0464         uint64_t iova;
0465         bool valid;
0466 
0467         if (submit_reloc.submit_offset % 4) {
0468             DRM_ERROR("non-aligned reloc offset: %u\n",
0469                     submit_reloc.submit_offset);
0470             ret = -EINVAL;
0471             goto out;
0472         }
0473 
0474         /* offset in dwords: */
0475         off = submit_reloc.submit_offset / 4;
0476 
0477         if ((off >= (obj->base.size / 4)) ||
0478                 (off < last_offset)) {
0479             DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
0480             ret = -EINVAL;
0481             goto out;
0482         }
0483 
0484         ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
0485         if (ret)
0486             goto out;
0487 
0488         if (valid)
0489             continue;
0490 
0491         iova += submit_reloc.reloc_offset;
0492 
0493         if (submit_reloc.shift < 0)
0494             iova >>= -submit_reloc.shift;
0495         else
0496             iova <<= submit_reloc.shift;
0497 
0498         ptr[off] = iova | submit_reloc.or;
0499 
0500         last_offset = off;
0501     }
0502 
0503 out:
0504     msm_gem_put_vaddr_locked(&obj->base);
0505 
0506     return ret;
0507 }
0508 
0509 /* Cleanup submit at end of ioctl.  In the error case, this also drops
0510  * references, unpins, and drops active refcnt.  In the non-error case,
0511  * this is done when the submit is retired.
0512  */
0513 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
0514 {
0515     unsigned cleanup_flags = BO_LOCKED;
0516     unsigned i;
0517 
0518     if (error)
0519         cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE;
0520 
0521     for (i = 0; i < submit->nr_bos; i++) {
0522         struct msm_gem_object *msm_obj = submit->bos[i].obj;
0523         submit_cleanup_bo(submit, i, cleanup_flags);
0524         if (error)
0525             drm_gem_object_put(&msm_obj->base);
0526     }
0527 }
0528 
0529 void msm_submit_retire(struct msm_gem_submit *submit)
0530 {
0531     int i;
0532 
0533     for (i = 0; i < submit->nr_bos; i++) {
0534         struct drm_gem_object *obj = &submit->bos[i].obj->base;
0535 
0536         msm_gem_lock(obj);
0537         /* Note, VMA already fence-unpinned before submit: */
0538         submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE);
0539         msm_gem_unlock(obj);
0540         drm_gem_object_put(obj);
0541     }
0542 }
0543 
0544 struct msm_submit_post_dep {
0545     struct drm_syncobj *syncobj;
0546     uint64_t point;
0547     struct dma_fence_chain *chain;
0548 };
0549 
0550 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
0551                                            struct drm_file *file,
0552                                            uint64_t in_syncobjs_addr,
0553                                            uint32_t nr_in_syncobjs,
0554                                            size_t syncobj_stride,
0555                                            struct msm_ringbuffer *ring)
0556 {
0557     struct drm_syncobj **syncobjs = NULL;
0558     struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
0559     int ret = 0;
0560     uint32_t i, j;
0561 
0562     syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
0563                        GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
0564     if (!syncobjs)
0565         return ERR_PTR(-ENOMEM);
0566 
0567     for (i = 0; i < nr_in_syncobjs; ++i) {
0568         uint64_t address = in_syncobjs_addr + i * syncobj_stride;
0569         struct dma_fence *fence;
0570 
0571         if (copy_from_user(&syncobj_desc,
0572                        u64_to_user_ptr(address),
0573                        min(syncobj_stride, sizeof(syncobj_desc)))) {
0574             ret = -EFAULT;
0575             break;
0576         }
0577 
0578         if (syncobj_desc.point &&
0579             !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
0580             ret = -EOPNOTSUPP;
0581             break;
0582         }
0583 
0584         if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
0585             ret = -EINVAL;
0586             break;
0587         }
0588 
0589         ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
0590                                      syncobj_desc.point, 0, &fence);
0591         if (ret)
0592             break;
0593 
0594         ret = drm_sched_job_add_dependency(&submit->base, fence);
0595         if (ret)
0596             break;
0597 
0598         if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
0599             syncobjs[i] =
0600                 drm_syncobj_find(file, syncobj_desc.handle);
0601             if (!syncobjs[i]) {
0602                 ret = -EINVAL;
0603                 break;
0604             }
0605         }
0606     }
0607 
0608     if (ret) {
0609         for (j = 0; j <= i; ++j) {
0610             if (syncobjs[j])
0611                 drm_syncobj_put(syncobjs[j]);
0612         }
0613         kfree(syncobjs);
0614         return ERR_PTR(ret);
0615     }
0616     return syncobjs;
0617 }
0618 
0619 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
0620                                uint32_t nr_syncobjs)
0621 {
0622     uint32_t i;
0623 
0624     for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
0625         if (syncobjs[i])
0626             drm_syncobj_replace_fence(syncobjs[i], NULL);
0627     }
0628 }
0629 
0630 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
0631                                                        struct drm_file *file,
0632                                                        uint64_t syncobjs_addr,
0633                                                        uint32_t nr_syncobjs,
0634                                                        size_t syncobj_stride)
0635 {
0636     struct msm_submit_post_dep *post_deps;
0637     struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
0638     int ret = 0;
0639     uint32_t i, j;
0640 
0641     post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
0642                               GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
0643     if (!post_deps)
0644         return ERR_PTR(-ENOMEM);
0645 
0646     for (i = 0; i < nr_syncobjs; ++i) {
0647         uint64_t address = syncobjs_addr + i * syncobj_stride;
0648 
0649         if (copy_from_user(&syncobj_desc,
0650                        u64_to_user_ptr(address),
0651                        min(syncobj_stride, sizeof(syncobj_desc)))) {
0652             ret = -EFAULT;
0653             break;
0654         }
0655 
0656         post_deps[i].point = syncobj_desc.point;
0657         post_deps[i].chain = NULL;
0658 
0659         if (syncobj_desc.flags) {
0660             ret = -EINVAL;
0661             break;
0662         }
0663 
0664         if (syncobj_desc.point) {
0665             if (!drm_core_check_feature(dev,
0666                                         DRIVER_SYNCOBJ_TIMELINE)) {
0667                 ret = -EOPNOTSUPP;
0668                 break;
0669             }
0670 
0671             post_deps[i].chain = dma_fence_chain_alloc();
0672             if (!post_deps[i].chain) {
0673                 ret = -ENOMEM;
0674                 break;
0675             }
0676         }
0677 
0678         post_deps[i].syncobj =
0679             drm_syncobj_find(file, syncobj_desc.handle);
0680         if (!post_deps[i].syncobj) {
0681             ret = -EINVAL;
0682             break;
0683         }
0684     }
0685 
0686     if (ret) {
0687         for (j = 0; j <= i; ++j) {
0688             dma_fence_chain_free(post_deps[j].chain);
0689             if (post_deps[j].syncobj)
0690                 drm_syncobj_put(post_deps[j].syncobj);
0691         }
0692 
0693         kfree(post_deps);
0694         return ERR_PTR(ret);
0695     }
0696 
0697     return post_deps;
0698 }
0699 
0700 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
0701                                   uint32_t count, struct dma_fence *fence)
0702 {
0703     uint32_t i;
0704 
0705     for (i = 0; post_deps && i < count; ++i) {
0706         if (post_deps[i].chain) {
0707             drm_syncobj_add_point(post_deps[i].syncobj,
0708                                   post_deps[i].chain,
0709                                   fence, post_deps[i].point);
0710             post_deps[i].chain = NULL;
0711         } else {
0712             drm_syncobj_replace_fence(post_deps[i].syncobj,
0713                                       fence);
0714         }
0715     }
0716 }
0717 
0718 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
0719         struct drm_file *file)
0720 {
0721     static atomic_t ident = ATOMIC_INIT(0);
0722     struct msm_drm_private *priv = dev->dev_private;
0723     struct drm_msm_gem_submit *args = data;
0724     struct msm_file_private *ctx = file->driver_priv;
0725     struct msm_gem_submit *submit = NULL;
0726     struct msm_gpu *gpu = priv->gpu;
0727     struct msm_gpu_submitqueue *queue;
0728     struct msm_ringbuffer *ring;
0729     struct msm_submit_post_dep *post_deps = NULL;
0730     struct drm_syncobj **syncobjs_to_reset = NULL;
0731     int out_fence_fd = -1;
0732     struct pid *pid = get_pid(task_pid(current));
0733     bool has_ww_ticket = false;
0734     unsigned i;
0735     int ret, submitid;
0736 
0737     if (!gpu)
0738         return -ENXIO;
0739 
0740     if (args->pad)
0741         return -EINVAL;
0742 
0743     if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
0744         DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
0745         return -EPERM;
0746     }
0747 
0748     /* for now, we just have 3d pipe.. eventually this would need to
0749      * be more clever to dispatch to appropriate gpu module:
0750      */
0751     if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
0752         return -EINVAL;
0753 
0754     if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
0755         return -EINVAL;
0756 
0757     if (args->flags & MSM_SUBMIT_SUDO) {
0758         if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
0759             !capable(CAP_SYS_RAWIO))
0760             return -EINVAL;
0761     }
0762 
0763     queue = msm_submitqueue_get(ctx, args->queueid);
0764     if (!queue)
0765         return -ENOENT;
0766 
0767     /* Get a unique identifier for the submission for logging purposes */
0768     submitid = atomic_inc_return(&ident) - 1;
0769 
0770     ring = gpu->rb[queue->ring_nr];
0771     trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
0772         args->nr_bos, args->nr_cmds);
0773 
0774     ret = mutex_lock_interruptible(&queue->lock);
0775     if (ret)
0776         goto out_post_unlock;
0777 
0778     if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
0779         out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
0780         if (out_fence_fd < 0) {
0781             ret = out_fence_fd;
0782             goto out_unlock;
0783         }
0784     }
0785 
0786     submit = submit_create(dev, gpu, queue, args->nr_bos,
0787         args->nr_cmds);
0788     if (IS_ERR(submit)) {
0789         ret = PTR_ERR(submit);
0790         submit = NULL;
0791         goto out_unlock;
0792     }
0793 
0794     submit->pid = pid;
0795     submit->ident = submitid;
0796 
0797     if (args->flags & MSM_SUBMIT_SUDO)
0798         submit->in_rb = true;
0799 
0800     if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
0801         struct dma_fence *in_fence;
0802 
0803         in_fence = sync_file_get_fence(args->fence_fd);
0804 
0805         if (!in_fence) {
0806             ret = -EINVAL;
0807             goto out_unlock;
0808         }
0809 
0810         ret = drm_sched_job_add_dependency(&submit->base, in_fence);
0811         if (ret)
0812             goto out_unlock;
0813     }
0814 
0815     if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
0816         syncobjs_to_reset = msm_parse_deps(submit, file,
0817                                            args->in_syncobjs,
0818                                            args->nr_in_syncobjs,
0819                                            args->syncobj_stride, ring);
0820         if (IS_ERR(syncobjs_to_reset)) {
0821             ret = PTR_ERR(syncobjs_to_reset);
0822             goto out_unlock;
0823         }
0824     }
0825 
0826     if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
0827         post_deps = msm_parse_post_deps(dev, file,
0828                                         args->out_syncobjs,
0829                                         args->nr_out_syncobjs,
0830                                         args->syncobj_stride);
0831         if (IS_ERR(post_deps)) {
0832             ret = PTR_ERR(post_deps);
0833             goto out_unlock;
0834         }
0835     }
0836 
0837     ret = submit_lookup_objects(submit, args, file);
0838     if (ret)
0839         goto out;
0840 
0841     ret = submit_lookup_cmds(submit, args, file);
0842     if (ret)
0843         goto out;
0844 
0845     /* copy_*_user while holding a ww ticket upsets lockdep */
0846     ww_acquire_init(&submit->ticket, &reservation_ww_class);
0847     has_ww_ticket = true;
0848     ret = submit_lock_objects(submit);
0849     if (ret)
0850         goto out;
0851 
0852     ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
0853     if (ret)
0854         goto out;
0855 
0856     ret = submit_pin_objects(submit);
0857     if (ret)
0858         goto out;
0859 
0860     for (i = 0; i < args->nr_cmds; i++) {
0861         struct msm_gem_object *msm_obj;
0862         uint64_t iova;
0863 
0864         ret = submit_bo(submit, submit->cmd[i].idx,
0865                 &msm_obj, &iova, NULL);
0866         if (ret)
0867             goto out;
0868 
0869         if (!submit->cmd[i].size ||
0870             ((submit->cmd[i].size + submit->cmd[i].offset) >
0871                 msm_obj->base.size / 4)) {
0872             DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
0873             ret = -EINVAL;
0874             goto out;
0875         }
0876 
0877         submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
0878 
0879         if (submit->valid)
0880             continue;
0881 
0882         ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
0883                 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
0884         if (ret)
0885             goto out;
0886     }
0887 
0888     submit->nr_cmds = i;
0889 
0890     /*
0891      * If using userspace provided seqno fence, validate that the id
0892      * is available before arming sched job.  Since access to fence_idr
0893      * is serialized on the queue lock, the slot should be still avail
0894      * after the job is armed
0895      */
0896     if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
0897             idr_find(&queue->fence_idr, args->fence)) {
0898         ret = -EINVAL;
0899         goto out;
0900     }
0901 
0902     drm_sched_job_arm(&submit->base);
0903 
0904     submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
0905 
0906     if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
0907         /*
0908          * Userspace has assigned the seqno fence that it wants
0909          * us to use.  It is an error to pick a fence sequence
0910          * number that is not available.
0911          */
0912         submit->fence_id = args->fence;
0913         ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
0914                     &submit->fence_id, submit->fence_id,
0915                     GFP_KERNEL);
0916         /*
0917          * We've already validated that the fence_id slot is valid,
0918          * so if idr_alloc_u32 failed, it is a kernel bug
0919          */
0920         WARN_ON(ret);
0921     } else {
0922         /*
0923          * Allocate an id which can be used by WAIT_FENCE ioctl to map
0924          * back to the underlying fence.
0925          */
0926         submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
0927                             submit->user_fence, 1,
0928                             INT_MAX, GFP_KERNEL);
0929     }
0930     if (submit->fence_id < 0) {
0931         ret = submit->fence_id;
0932         submit->fence_id = 0;
0933     }
0934 
0935     if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
0936         struct sync_file *sync_file = sync_file_create(submit->user_fence);
0937         if (!sync_file) {
0938             ret = -ENOMEM;
0939         } else {
0940             fd_install(out_fence_fd, sync_file->file);
0941             args->fence_fd = out_fence_fd;
0942         }
0943     }
0944 
0945     submit_attach_object_fences(submit);
0946 
0947     /* The scheduler owns a ref now: */
0948     msm_gem_submit_get(submit);
0949 
0950     drm_sched_entity_push_job(&submit->base);
0951 
0952     args->fence = submit->fence_id;
0953     queue->last_fence = submit->fence_id;
0954 
0955     msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
0956     msm_process_post_deps(post_deps, args->nr_out_syncobjs,
0957                           submit->user_fence);
0958 
0959 
0960 out:
0961     submit_cleanup(submit, !!ret);
0962     if (has_ww_ticket)
0963         ww_acquire_fini(&submit->ticket);
0964 out_unlock:
0965     if (ret && (out_fence_fd >= 0))
0966         put_unused_fd(out_fence_fd);
0967     mutex_unlock(&queue->lock);
0968     if (submit)
0969         msm_gem_submit_put(submit);
0970 out_post_unlock:
0971     if (!IS_ERR_OR_NULL(post_deps)) {
0972         for (i = 0; i < args->nr_out_syncobjs; ++i) {
0973             kfree(post_deps[i].chain);
0974             drm_syncobj_put(post_deps[i].syncobj);
0975         }
0976         kfree(post_deps);
0977     }
0978 
0979     if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
0980         for (i = 0; i < args->nr_in_syncobjs; ++i) {
0981             if (syncobjs_to_reset[i])
0982                 drm_syncobj_put(syncobjs_to_reset[i]);
0983         }
0984         kfree(syncobjs_to_reset);
0985     }
0986 
0987     return ret;
0988 }