Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
0003  */
0004 
0005 #include <linux/kref.h>
0006 #include <linux/uaccess.h>
0007 
0008 #include "msm_gpu.h"
0009 
0010 int msm_file_private_set_sysprof(struct msm_file_private *ctx,
0011                  struct msm_gpu *gpu, int sysprof)
0012 {
0013     /*
0014      * Since pm_runtime and sysprof_active are both refcounts, we
0015      * call apply the new value first, and then unwind the previous
0016      * value
0017      */
0018 
0019     switch (sysprof) {
0020     default:
0021         return -EINVAL;
0022     case 2:
0023         pm_runtime_get_sync(&gpu->pdev->dev);
0024         fallthrough;
0025     case 1:
0026         refcount_inc(&gpu->sysprof_active);
0027         fallthrough;
0028     case 0:
0029         break;
0030     }
0031 
0032     /* unwind old value: */
0033     switch (ctx->sysprof) {
0034     case 2:
0035         pm_runtime_put_autosuspend(&gpu->pdev->dev);
0036         fallthrough;
0037     case 1:
0038         refcount_dec(&gpu->sysprof_active);
0039         fallthrough;
0040     case 0:
0041         break;
0042     }
0043 
0044     ctx->sysprof = sysprof;
0045 
0046     return 0;
0047 }
0048 
0049 void __msm_file_private_destroy(struct kref *kref)
0050 {
0051     struct msm_file_private *ctx = container_of(kref,
0052         struct msm_file_private, ref);
0053     int i;
0054 
0055     for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
0056         if (!ctx->entities[i])
0057             continue;
0058 
0059         drm_sched_entity_destroy(ctx->entities[i]);
0060         kfree(ctx->entities[i]);
0061     }
0062 
0063     msm_gem_address_space_put(ctx->aspace);
0064     kfree(ctx->comm);
0065     kfree(ctx->cmdline);
0066     kfree(ctx);
0067 }
0068 
0069 void msm_submitqueue_destroy(struct kref *kref)
0070 {
0071     struct msm_gpu_submitqueue *queue = container_of(kref,
0072         struct msm_gpu_submitqueue, ref);
0073 
0074     idr_destroy(&queue->fence_idr);
0075 
0076     msm_file_private_put(queue->ctx);
0077 
0078     kfree(queue);
0079 }
0080 
0081 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
0082         u32 id)
0083 {
0084     struct msm_gpu_submitqueue *entry;
0085 
0086     if (!ctx)
0087         return NULL;
0088 
0089     read_lock(&ctx->queuelock);
0090 
0091     list_for_each_entry(entry, &ctx->submitqueues, node) {
0092         if (entry->id == id) {
0093             kref_get(&entry->ref);
0094             read_unlock(&ctx->queuelock);
0095 
0096             return entry;
0097         }
0098     }
0099 
0100     read_unlock(&ctx->queuelock);
0101     return NULL;
0102 }
0103 
0104 void msm_submitqueue_close(struct msm_file_private *ctx)
0105 {
0106     struct msm_gpu_submitqueue *entry, *tmp;
0107 
0108     if (!ctx)
0109         return;
0110 
0111     /*
0112      * No lock needed in close and there won't
0113      * be any more user ioctls coming our way
0114      */
0115     list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
0116         list_del(&entry->node);
0117         msm_submitqueue_put(entry);
0118     }
0119 }
0120 
0121 static struct drm_sched_entity *
0122 get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
0123          unsigned ring_nr, enum drm_sched_priority sched_prio)
0124 {
0125     static DEFINE_MUTEX(entity_lock);
0126     unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
0127 
0128     /* We should have already validated that the requested priority is
0129      * valid by the time we get here.
0130      */
0131     if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
0132         return ERR_PTR(-EINVAL);
0133 
0134     mutex_lock(&entity_lock);
0135 
0136     if (!ctx->entities[idx]) {
0137         struct drm_sched_entity *entity;
0138         struct drm_gpu_scheduler *sched = &ring->sched;
0139         int ret;
0140 
0141         entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
0142 
0143         ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
0144         if (ret) {
0145             mutex_unlock(&entity_lock);
0146             kfree(entity);
0147             return ERR_PTR(ret);
0148         }
0149 
0150         ctx->entities[idx] = entity;
0151     }
0152 
0153     mutex_unlock(&entity_lock);
0154 
0155     return ctx->entities[idx];
0156 }
0157 
0158 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
0159         u32 prio, u32 flags, u32 *id)
0160 {
0161     struct msm_drm_private *priv = drm->dev_private;
0162     struct msm_gpu_submitqueue *queue;
0163     enum drm_sched_priority sched_prio;
0164     unsigned ring_nr;
0165     int ret;
0166 
0167     if (!ctx)
0168         return -ENODEV;
0169 
0170     if (!priv->gpu)
0171         return -ENODEV;
0172 
0173     ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
0174     if (ret)
0175         return ret;
0176 
0177     queue = kzalloc(sizeof(*queue), GFP_KERNEL);
0178 
0179     if (!queue)
0180         return -ENOMEM;
0181 
0182     kref_init(&queue->ref);
0183     queue->flags = flags;
0184     queue->ring_nr = ring_nr;
0185 
0186     queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
0187                      ring_nr, sched_prio);
0188     if (IS_ERR(queue->entity)) {
0189         ret = PTR_ERR(queue->entity);
0190         kfree(queue);
0191         return ret;
0192     }
0193 
0194     write_lock(&ctx->queuelock);
0195 
0196     queue->ctx = msm_file_private_get(ctx);
0197     queue->id = ctx->queueid++;
0198 
0199     if (id)
0200         *id = queue->id;
0201 
0202     idr_init(&queue->fence_idr);
0203     mutex_init(&queue->lock);
0204 
0205     list_add_tail(&queue->node, &ctx->submitqueues);
0206 
0207     write_unlock(&ctx->queuelock);
0208 
0209     return 0;
0210 }
0211 
0212 /*
0213  * Create the default submit-queue (id==0), used for backwards compatibility
0214  * for userspace that pre-dates the introduction of submitqueues.
0215  */
0216 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
0217 {
0218     struct msm_drm_private *priv = drm->dev_private;
0219     int default_prio, max_priority;
0220 
0221     if (!priv->gpu)
0222         return -ENODEV;
0223 
0224     max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
0225 
0226     /*
0227      * Pick a medium priority level as default.  Lower numeric value is
0228      * higher priority, so round-up to pick a priority that is not higher
0229      * than the middle priority level.
0230      */
0231     default_prio = DIV_ROUND_UP(max_priority, 2);
0232 
0233     return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
0234 }
0235 
0236 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
0237         struct drm_msm_submitqueue_query *args)
0238 {
0239     size_t size = min_t(size_t, args->len, sizeof(queue->faults));
0240     int ret;
0241 
0242     /* If a zero length was passed in, return the data size we expect */
0243     if (!args->len) {
0244         args->len = sizeof(queue->faults);
0245         return 0;
0246     }
0247 
0248     /* Set the length to the actual size of the data */
0249     args->len = size;
0250 
0251     ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
0252 
0253     return ret ? -EFAULT : 0;
0254 }
0255 
0256 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
0257         struct drm_msm_submitqueue_query *args)
0258 {
0259     struct msm_gpu_submitqueue *queue;
0260     int ret = -EINVAL;
0261 
0262     if (args->pad)
0263         return -EINVAL;
0264 
0265     queue = msm_submitqueue_get(ctx, args->id);
0266     if (!queue)
0267         return -ENOENT;
0268 
0269     if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
0270         ret = msm_submitqueue_query_faults(queue, args);
0271 
0272     msm_submitqueue_put(queue);
0273 
0274     return ret;
0275 }
0276 
0277 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
0278 {
0279     struct msm_gpu_submitqueue *entry;
0280 
0281     if (!ctx)
0282         return 0;
0283 
0284     /*
0285      * id 0 is the "default" queue and can't be destroyed
0286      * by the user
0287      */
0288     if (!id)
0289         return -ENOENT;
0290 
0291     write_lock(&ctx->queuelock);
0292 
0293     list_for_each_entry(entry, &ctx->submitqueues, node) {
0294         if (entry->id == id) {
0295             list_del(&entry->node);
0296             write_unlock(&ctx->queuelock);
0297 
0298             msm_submitqueue_put(entry);
0299             return 0;
0300         }
0301     }
0302 
0303     write_unlock(&ctx->queuelock);
0304     return -ENOENT;
0305 }
0306