Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2015 Advanced Micro Devices, Inc.
0003  *
0004  * Permission is hereby granted, free of charge, to any person obtaining a
0005  * copy of this software and associated documentation files (the "Software"),
0006  * to deal in the Software without restriction, including without limitation
0007  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0008  * and/or sell copies of the Software, and to permit persons to whom the
0009  * Software is furnished to do so, subject to the following conditions:
0010  *
0011  * The above copyright notice and this permission notice shall be included in
0012  * all copies or substantial portions of the Software.
0013  *
0014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0017  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0018  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0019  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0020  * OTHER DEALINGS IN THE SOFTWARE.
0021  *
0022  */
0023 
0024 #ifndef _DRM_GPU_SCHEDULER_H_
0025 #define _DRM_GPU_SCHEDULER_H_
0026 
0027 #include <drm/spsc_queue.h>
0028 #include <linux/dma-fence.h>
0029 #include <linux/completion.h>
0030 #include <linux/xarray.h>
0031 #include <linux/workqueue.h>
0032 
0033 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
0034 
0035 struct drm_gem_object;
0036 
0037 struct drm_gpu_scheduler;
0038 struct drm_sched_rq;
0039 
0040 /* These are often used as an (initial) index
0041  * to an array, and as such should start at 0.
0042  */
0043 enum drm_sched_priority {
0044     DRM_SCHED_PRIORITY_MIN,
0045     DRM_SCHED_PRIORITY_NORMAL,
0046     DRM_SCHED_PRIORITY_HIGH,
0047     DRM_SCHED_PRIORITY_KERNEL,
0048 
0049     DRM_SCHED_PRIORITY_COUNT,
0050     DRM_SCHED_PRIORITY_UNSET = -2
0051 };
0052 
0053 /**
0054  * struct drm_sched_entity - A wrapper around a job queue (typically
0055  * attached to the DRM file_priv).
0056  *
0057  * Entities will emit jobs in order to their corresponding hardware
0058  * ring, and the scheduler will alternate between entities based on
0059  * scheduling policy.
0060  */
0061 struct drm_sched_entity {
0062     /**
0063      * @list:
0064      *
0065      * Used to append this struct to the list of entities in the runqueue
0066      * @rq under &drm_sched_rq.entities.
0067      *
0068      * Protected by &drm_sched_rq.lock of @rq.
0069      */
0070     struct list_head        list;
0071 
0072     /**
0073      * @rq:
0074      *
0075      * Runqueue on which this entity is currently scheduled.
0076      *
0077      * FIXME: Locking is very unclear for this. Writers are protected by
0078      * @rq_lock, but readers are generally lockless and seem to just race
0079      * with not even a READ_ONCE.
0080      */
0081     struct drm_sched_rq     *rq;
0082 
0083     /**
0084      * @sched_list:
0085      *
0086      * A list of schedulers (struct drm_gpu_scheduler).  Jobs from this entity can
0087      * be scheduled on any scheduler on this list.
0088      *
0089      * This can be modified by calling drm_sched_entity_modify_sched().
0090      * Locking is entirely up to the driver, see the above function for more
0091      * details.
0092      *
0093      * This will be set to NULL if &num_sched_list equals 1 and @rq has been
0094      * set already.
0095      *
0096      * FIXME: This means priority changes through
0097      * drm_sched_entity_set_priority() will be lost henceforth in this case.
0098      */
0099     struct drm_gpu_scheduler        **sched_list;
0100 
0101     /**
0102      * @num_sched_list:
0103      *
0104      * Number of drm_gpu_schedulers in the @sched_list.
0105      */
0106     unsigned int                    num_sched_list;
0107 
0108     /**
0109      * @priority:
0110      *
0111      * Priority of the entity. This can be modified by calling
0112      * drm_sched_entity_set_priority(). Protected by &rq_lock.
0113      */
0114     enum drm_sched_priority         priority;
0115 
0116     /**
0117      * @rq_lock:
0118      *
0119      * Lock to modify the runqueue to which this entity belongs.
0120      */
0121     spinlock_t          rq_lock;
0122 
0123     /**
0124      * @job_queue: the list of jobs of this entity.
0125      */
0126     struct spsc_queue       job_queue;
0127 
0128     /**
0129      * @fence_seq:
0130      *
0131      * A linearly increasing seqno incremented with each new
0132      * &drm_sched_fence which is part of the entity.
0133      *
0134      * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
0135      * this doesn't need to be atomic.
0136      */
0137     atomic_t            fence_seq;
0138 
0139     /**
0140      * @fence_context:
0141      *
0142      * A unique context for all the fences which belong to this entity.  The
0143      * &drm_sched_fence.scheduled uses the fence_context but
0144      * &drm_sched_fence.finished uses fence_context + 1.
0145      */
0146     uint64_t            fence_context;
0147 
0148     /**
0149      * @dependency:
0150      *
0151      * The dependency fence of the job which is on the top of the job queue.
0152      */
0153     struct dma_fence        *dependency;
0154 
0155     /**
0156      * @cb:
0157      *
0158      * Callback for the dependency fence above.
0159      */
0160     struct dma_fence_cb     cb;
0161 
0162     /**
0163      * @guilty:
0164      *
0165      * Points to entities' guilty.
0166      */
0167     atomic_t            *guilty;
0168 
0169     /**
0170      * @last_scheduled:
0171      *
0172      * Points to the finished fence of the last scheduled job. Only written
0173      * by the scheduler thread, can be accessed locklessly from
0174      * drm_sched_job_arm() iff the queue is empty.
0175      */
0176     struct dma_fence                *last_scheduled;
0177 
0178     /**
0179      * @last_user: last group leader pushing a job into the entity.
0180      */
0181     struct task_struct      *last_user;
0182 
0183     /**
0184      * @stopped:
0185      *
0186      * Marks the enity as removed from rq and destined for
0187      * termination. This is set by calling drm_sched_entity_flush() and by
0188      * drm_sched_fini().
0189      */
0190     bool                stopped;
0191 
0192     /**
0193      * @entity_idle:
0194      *
0195      * Signals when entity is not in use, used to sequence entity cleanup in
0196      * drm_sched_entity_fini().
0197      */
0198     struct completion       entity_idle;
0199 };
0200 
0201 /**
0202  * struct drm_sched_rq - queue of entities to be scheduled.
0203  *
0204  * @lock: to modify the entities list.
0205  * @sched: the scheduler to which this rq belongs to.
0206  * @entities: list of the entities to be scheduled.
0207  * @current_entity: the entity which is to be scheduled.
0208  *
0209  * Run queue is a set of entities scheduling command submissions for
0210  * one specific ring. It implements the scheduling policy that selects
0211  * the next entity to emit commands from.
0212  */
0213 struct drm_sched_rq {
0214     spinlock_t          lock;
0215     struct drm_gpu_scheduler    *sched;
0216     struct list_head        entities;
0217     struct drm_sched_entity     *current_entity;
0218 };
0219 
0220 /**
0221  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
0222  */
0223 struct drm_sched_fence {
0224         /**
0225          * @scheduled: this fence is what will be signaled by the scheduler
0226          * when the job is scheduled.
0227          */
0228     struct dma_fence        scheduled;
0229 
0230         /**
0231          * @finished: this fence is what will be signaled by the scheduler
0232          * when the job is completed.
0233          *
0234          * When setting up an out fence for the job, you should use
0235          * this, since it's available immediately upon
0236          * drm_sched_job_init(), and the fence returned by the driver
0237          * from run_job() won't be created until the dependencies have
0238          * resolved.
0239          */
0240     struct dma_fence        finished;
0241 
0242         /**
0243          * @parent: the fence returned by &drm_sched_backend_ops.run_job
0244          * when scheduling the job on hardware. We signal the
0245          * &drm_sched_fence.finished fence once parent is signalled.
0246          */
0247     struct dma_fence        *parent;
0248         /**
0249          * @sched: the scheduler instance to which the job having this struct
0250          * belongs to.
0251          */
0252     struct drm_gpu_scheduler    *sched;
0253         /**
0254          * @lock: the lock used by the scheduled and the finished fences.
0255          */
0256     spinlock_t          lock;
0257         /**
0258          * @owner: job owner for debugging
0259          */
0260     void                *owner;
0261 };
0262 
0263 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
0264 
0265 /**
0266  * struct drm_sched_job - A job to be run by an entity.
0267  *
0268  * @queue_node: used to append this struct to the queue of jobs in an entity.
0269  * @list: a job participates in a "pending" and "done" lists.
0270  * @sched: the scheduler instance on which this job is scheduled.
0271  * @s_fence: contains the fences for the scheduling of job.
0272  * @finish_cb: the callback for the finished fence.
0273  * @work: Helper to reschdeule job kill to different context.
0274  * @id: a unique id assigned to each job scheduled on the scheduler.
0275  * @karma: increment on every hang caused by this job. If this exceeds the hang
0276  *         limit of the scheduler then the job is marked guilty and will not
0277  *         be scheduled further.
0278  * @s_priority: the priority of the job.
0279  * @entity: the entity to which this job belongs.
0280  * @cb: the callback for the parent fence in s_fence.
0281  *
0282  * A job is created by the driver using drm_sched_job_init(), and
0283  * should call drm_sched_entity_push_job() once it wants the scheduler
0284  * to schedule the job.
0285  */
0286 struct drm_sched_job {
0287     struct spsc_node        queue_node;
0288     struct list_head        list;
0289     struct drm_gpu_scheduler    *sched;
0290     struct drm_sched_fence      *s_fence;
0291 
0292     /*
0293      * work is used only after finish_cb has been used and will not be
0294      * accessed anymore.
0295      */
0296     union {
0297         struct dma_fence_cb     finish_cb;
0298         struct work_struct      work;
0299     };
0300 
0301     uint64_t            id;
0302     atomic_t            karma;
0303     enum drm_sched_priority     s_priority;
0304     struct drm_sched_entity         *entity;
0305     struct dma_fence_cb     cb;
0306     /**
0307      * @dependencies:
0308      *
0309      * Contains the dependencies as struct dma_fence for this job, see
0310      * drm_sched_job_add_dependency() and
0311      * drm_sched_job_add_implicit_dependencies().
0312      */
0313     struct xarray           dependencies;
0314 
0315     /** @last_dependency: tracks @dependencies as they signal */
0316     unsigned long           last_dependency;
0317 };
0318 
0319 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
0320                         int threshold)
0321 {
0322     return s_job && atomic_inc_return(&s_job->karma) > threshold;
0323 }
0324 
0325 enum drm_gpu_sched_stat {
0326     DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
0327     DRM_GPU_SCHED_STAT_NOMINAL,
0328     DRM_GPU_SCHED_STAT_ENODEV,
0329 };
0330 
0331 /**
0332  * struct drm_sched_backend_ops
0333  *
0334  * Define the backend operations called by the scheduler,
0335  * these functions should be implemented in driver side.
0336  */
0337 struct drm_sched_backend_ops {
0338     /**
0339      * @dependency:
0340      *
0341      * Called when the scheduler is considering scheduling this job next, to
0342      * get another struct dma_fence for this job to block on.  Once it
0343      * returns NULL, run_job() may be called.
0344      *
0345      * If a driver exclusively uses drm_sched_job_add_dependency() and
0346      * drm_sched_job_add_implicit_dependencies() this can be ommitted and
0347      * left as NULL.
0348      */
0349     struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
0350                     struct drm_sched_entity *s_entity);
0351 
0352     /**
0353          * @run_job: Called to execute the job once all of the dependencies
0354          * have been resolved.  This may be called multiple times, if
0355      * timedout_job() has happened and drm_sched_job_recovery()
0356      * decides to try it again.
0357      */
0358     struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
0359 
0360     /**
0361      * @timedout_job: Called when a job has taken too long to execute,
0362      * to trigger GPU recovery.
0363      *
0364      * This method is called in a workqueue context.
0365      *
0366      * Drivers typically issue a reset to recover from GPU hangs, and this
0367      * procedure usually follows the following workflow:
0368      *
0369      * 1. Stop the scheduler using drm_sched_stop(). This will park the
0370      *    scheduler thread and cancel the timeout work, guaranteeing that
0371      *    nothing is queued while we reset the hardware queue
0372      * 2. Try to gracefully stop non-faulty jobs (optional)
0373      * 3. Issue a GPU reset (driver-specific)
0374      * 4. Re-submit jobs using drm_sched_resubmit_jobs()
0375      * 5. Restart the scheduler using drm_sched_start(). At that point, new
0376      *    jobs can be queued, and the scheduler thread is unblocked
0377      *
0378      * Note that some GPUs have distinct hardware queues but need to reset
0379      * the GPU globally, which requires extra synchronization between the
0380      * timeout handler of the different &drm_gpu_scheduler. One way to
0381      * achieve this synchronization is to create an ordered workqueue
0382      * (using alloc_ordered_workqueue()) at the driver level, and pass this
0383      * queue to drm_sched_init(), to guarantee that timeout handlers are
0384      * executed sequentially. The above workflow needs to be slightly
0385      * adjusted in that case:
0386      *
0387      * 1. Stop all schedulers impacted by the reset using drm_sched_stop()
0388      * 2. Try to gracefully stop non-faulty jobs on all queues impacted by
0389      *    the reset (optional)
0390      * 3. Issue a GPU reset on all faulty queues (driver-specific)
0391      * 4. Re-submit jobs on all schedulers impacted by the reset using
0392      *    drm_sched_resubmit_jobs()
0393      * 5. Restart all schedulers that were stopped in step #1 using
0394      *    drm_sched_start()
0395      *
0396      * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
0397      * and the underlying driver has started or completed recovery.
0398      *
0399      * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
0400      * available, i.e. has been unplugged.
0401      */
0402     enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
0403 
0404     /**
0405          * @free_job: Called once the job's finished fence has been signaled
0406          * and it's time to clean it up.
0407      */
0408     void (*free_job)(struct drm_sched_job *sched_job);
0409 };
0410 
0411 /**
0412  * struct drm_gpu_scheduler
0413  *
0414  * @ops: backend operations provided by the driver.
0415  * @hw_submission_limit: the max size of the hardware queue.
0416  * @timeout: the time after which a job is removed from the scheduler.
0417  * @name: name of the ring for which this scheduler is being used.
0418  * @sched_rq: priority wise array of run queues.
0419  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
0420  *                  is ready to be scheduled.
0421  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
0422  *                 waits on this wait queue until all the scheduled jobs are
0423  *                 finished.
0424  * @hw_rq_count: the number of jobs currently in the hardware queue.
0425  * @job_id_count: used to assign unique id to the each job.
0426  * @timeout_wq: workqueue used to queue @work_tdr
0427  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
0428  *            timeout interval is over.
0429  * @thread: the kthread on which the scheduler which run.
0430  * @pending_list: the list of jobs which are currently in the job queue.
0431  * @job_list_lock: lock to protect the pending_list.
0432  * @hang_limit: once the hangs by a job crosses this limit then it is marked
0433  *              guilty and it will no longer be considered for scheduling.
0434  * @score: score to help loadbalancer pick a idle sched
0435  * @_score: score used when the driver doesn't provide one
0436  * @ready: marks if the underlying HW is ready to work
0437  * @free_guilty: A hit to time out handler to free the guilty job.
0438  *
0439  * One scheduler is implemented for each hardware ring.
0440  */
0441 struct drm_gpu_scheduler {
0442     const struct drm_sched_backend_ops  *ops;
0443     uint32_t            hw_submission_limit;
0444     long                timeout;
0445     const char          *name;
0446     struct drm_sched_rq     sched_rq[DRM_SCHED_PRIORITY_COUNT];
0447     wait_queue_head_t       wake_up_worker;
0448     wait_queue_head_t       job_scheduled;
0449     atomic_t            hw_rq_count;
0450     atomic64_t          job_id_count;
0451     struct workqueue_struct     *timeout_wq;
0452     struct delayed_work     work_tdr;
0453     struct task_struct      *thread;
0454     struct list_head        pending_list;
0455     spinlock_t          job_list_lock;
0456     int             hang_limit;
0457     atomic_t                        *score;
0458     atomic_t                        _score;
0459     bool                ready;
0460     bool                free_guilty;
0461     struct device           *dev;
0462 };
0463 
0464 int drm_sched_init(struct drm_gpu_scheduler *sched,
0465            const struct drm_sched_backend_ops *ops,
0466            uint32_t hw_submission, unsigned hang_limit,
0467            long timeout, struct workqueue_struct *timeout_wq,
0468            atomic_t *score, const char *name, struct device *dev);
0469 
0470 void drm_sched_fini(struct drm_gpu_scheduler *sched);
0471 int drm_sched_job_init(struct drm_sched_job *job,
0472                struct drm_sched_entity *entity,
0473                void *owner);
0474 void drm_sched_job_arm(struct drm_sched_job *job);
0475 int drm_sched_job_add_dependency(struct drm_sched_job *job,
0476                  struct dma_fence *fence);
0477 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
0478                         struct drm_gem_object *obj,
0479                         bool write);
0480 
0481 
0482 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
0483                     struct drm_gpu_scheduler **sched_list,
0484                                    unsigned int num_sched_list);
0485 
0486 void drm_sched_job_cleanup(struct drm_sched_job *job);
0487 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
0488 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
0489 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
0490 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
0491 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
0492 void drm_sched_increase_karma(struct drm_sched_job *bad);
0493 void drm_sched_reset_karma(struct drm_sched_job *bad);
0494 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
0495 bool drm_sched_dependency_optimized(struct dma_fence* fence,
0496                     struct drm_sched_entity *entity);
0497 void drm_sched_fault(struct drm_gpu_scheduler *sched);
0498 void drm_sched_job_kickout(struct drm_sched_job *s_job);
0499 
0500 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
0501                  struct drm_sched_entity *entity);
0502 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
0503                 struct drm_sched_entity *entity);
0504 
0505 int drm_sched_entity_init(struct drm_sched_entity *entity,
0506               enum drm_sched_priority priority,
0507               struct drm_gpu_scheduler **sched_list,
0508               unsigned int num_sched_list,
0509               atomic_t *guilty);
0510 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
0511 void drm_sched_entity_fini(struct drm_sched_entity *entity);
0512 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
0513 void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
0514 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
0515 void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
0516 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
0517                    enum drm_sched_priority priority);
0518 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
0519 
0520 struct drm_sched_fence *drm_sched_fence_alloc(
0521     struct drm_sched_entity *s_entity, void *owner);
0522 void drm_sched_fence_init(struct drm_sched_fence *fence,
0523               struct drm_sched_entity *entity);
0524 void drm_sched_fence_free(struct drm_sched_fence *fence);
0525 
0526 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
0527 void drm_sched_fence_finished(struct drm_sched_fence *fence);
0528 
0529 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
0530 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
0531                         unsigned long remaining);
0532 struct drm_gpu_scheduler *
0533 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
0534              unsigned int num_sched_list);
0535 
0536 #endif