0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/kthread.h>
0025 #include <linux/wait.h>
0026 #include <linux/sched.h>
0027
0028 #include <drm/drm_drv.h>
0029
0030 #include "amdgpu.h"
0031 #include "amdgpu_trace.h"
0032 #include "amdgpu_reset.h"
0033
0034 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
0035 {
0036 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
0037 struct amdgpu_job *job = to_amdgpu_job(s_job);
0038 struct amdgpu_task_info ti;
0039 struct amdgpu_device *adev = ring->adev;
0040 int idx;
0041 int r;
0042
0043 if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
0044 DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
0045 __func__, s_job->sched->name);
0046
0047
0048 return DRM_GPU_SCHED_STAT_ENODEV;
0049 }
0050
0051 memset(&ti, 0, sizeof(struct amdgpu_task_info));
0052
0053 if (amdgpu_gpu_recovery &&
0054 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
0055 DRM_ERROR("ring %s timeout, but soft recovered\n",
0056 s_job->sched->name);
0057 goto exit;
0058 }
0059
0060 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
0061 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
0062 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
0063 ring->fence_drv.sync_seq);
0064 DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
0065 ti.process_name, ti.tgid, ti.task_name, ti.pid);
0066
0067 if (amdgpu_device_should_recover_gpu(ring->adev)) {
0068 struct amdgpu_reset_context reset_context;
0069 memset(&reset_context, 0, sizeof(reset_context));
0070
0071 reset_context.method = AMD_RESET_METHOD_NONE;
0072 reset_context.reset_req_dev = adev;
0073 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
0074
0075 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
0076 if (r)
0077 DRM_ERROR("GPU Recovery Failed: %d\n", r);
0078 } else {
0079 drm_sched_suspend_timeout(&ring->sched);
0080 if (amdgpu_sriov_vf(adev))
0081 adev->virt.tdr_debug = true;
0082 }
0083
0084 exit:
0085 drm_dev_exit(idx);
0086 return DRM_GPU_SCHED_STAT_NOMINAL;
0087 }
0088
0089 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
0090 struct amdgpu_job **job, struct amdgpu_vm *vm)
0091 {
0092 if (num_ibs == 0)
0093 return -EINVAL;
0094
0095 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
0096 if (!*job)
0097 return -ENOMEM;
0098
0099
0100
0101
0102
0103 (*job)->base.sched = &adev->rings[0]->sched;
0104 (*job)->vm = vm;
0105 (*job)->num_ibs = num_ibs;
0106
0107 amdgpu_sync_create(&(*job)->sync);
0108 amdgpu_sync_create(&(*job)->sched_sync);
0109 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
0110 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
0111
0112 return 0;
0113 }
0114
0115 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
0116 enum amdgpu_ib_pool_type pool_type,
0117 struct amdgpu_job **job)
0118 {
0119 int r;
0120
0121 r = amdgpu_job_alloc(adev, 1, job, NULL);
0122 if (r)
0123 return r;
0124
0125 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
0126 if (r)
0127 kfree(*job);
0128
0129 return r;
0130 }
0131
0132 void amdgpu_job_free_resources(struct amdgpu_job *job)
0133 {
0134 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
0135 struct dma_fence *f;
0136 unsigned i;
0137
0138
0139 f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
0140 for (i = 0; i < job->num_ibs; ++i)
0141 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
0142 }
0143
0144 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
0145 {
0146 struct amdgpu_job *job = to_amdgpu_job(s_job);
0147
0148 drm_sched_job_cleanup(s_job);
0149
0150 amdgpu_sync_free(&job->sync);
0151 amdgpu_sync_free(&job->sched_sync);
0152
0153 dma_fence_put(&job->hw_fence);
0154 }
0155
0156 void amdgpu_job_free(struct amdgpu_job *job)
0157 {
0158 amdgpu_job_free_resources(job);
0159 amdgpu_sync_free(&job->sync);
0160 amdgpu_sync_free(&job->sched_sync);
0161
0162 if (!job->hw_fence.ops)
0163 kfree(job);
0164 else
0165 dma_fence_put(&job->hw_fence);
0166 }
0167
0168 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
0169 void *owner, struct dma_fence **f)
0170 {
0171 int r;
0172
0173 if (!f)
0174 return -EINVAL;
0175
0176 r = drm_sched_job_init(&job->base, entity, owner);
0177 if (r)
0178 return r;
0179
0180 drm_sched_job_arm(&job->base);
0181
0182 *f = dma_fence_get(&job->base.s_fence->finished);
0183 amdgpu_job_free_resources(job);
0184 drm_sched_entity_push_job(&job->base);
0185
0186 return 0;
0187 }
0188
0189 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
0190 struct dma_fence **fence)
0191 {
0192 int r;
0193
0194 job->base.sched = &ring->sched;
0195 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
0196
0197 if (r)
0198 return r;
0199
0200 amdgpu_job_free(job);
0201 return 0;
0202 }
0203
0204 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
0205 struct drm_sched_entity *s_entity)
0206 {
0207 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
0208 struct amdgpu_job *job = to_amdgpu_job(sched_job);
0209 struct amdgpu_vm *vm = job->vm;
0210 struct dma_fence *fence;
0211 int r;
0212
0213 fence = amdgpu_sync_get_fence(&job->sync);
0214 if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
0215 r = amdgpu_sync_fence(&job->sched_sync, fence);
0216 if (r)
0217 DRM_ERROR("Error adding fence (%d)\n", r);
0218 }
0219
0220 while (fence == NULL && vm && !job->vmid) {
0221 r = amdgpu_vmid_grab(vm, ring, &job->sync,
0222 &job->base.s_fence->finished,
0223 job);
0224 if (r)
0225 DRM_ERROR("Error getting VM ID (%d)\n", r);
0226
0227 fence = amdgpu_sync_get_fence(&job->sync);
0228 }
0229
0230 return fence;
0231 }
0232
0233 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
0234 {
0235 struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
0236 struct dma_fence *fence = NULL, *finished;
0237 struct amdgpu_job *job;
0238 int r = 0;
0239
0240 job = to_amdgpu_job(sched_job);
0241 finished = &job->base.s_fence->finished;
0242
0243 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
0244
0245 trace_amdgpu_sched_run_job(job);
0246
0247 if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
0248 dma_fence_set_error(finished, -ECANCELED);
0249
0250 if (finished->error < 0) {
0251 DRM_INFO("Skip scheduling IBs!\n");
0252 } else {
0253 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
0254 &fence);
0255 if (r)
0256 DRM_ERROR("Error scheduling IBs (%d)\n", r);
0257 }
0258
0259 job->job_run_counter++;
0260 amdgpu_job_free_resources(job);
0261
0262 fence = r ? ERR_PTR(r) : fence;
0263 return fence;
0264 }
0265
0266 #define to_drm_sched_job(sched_job) \
0267 container_of((sched_job), struct drm_sched_job, queue_node)
0268
0269 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
0270 {
0271 struct drm_sched_job *s_job;
0272 struct drm_sched_entity *s_entity = NULL;
0273 int i;
0274
0275
0276 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
0277 struct drm_sched_rq *rq = &sched->sched_rq[i];
0278 spin_lock(&rq->lock);
0279 list_for_each_entry(s_entity, &rq->entities, list) {
0280 while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
0281 struct drm_sched_fence *s_fence = s_job->s_fence;
0282
0283 dma_fence_signal(&s_fence->scheduled);
0284 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
0285 dma_fence_signal(&s_fence->finished);
0286 }
0287 }
0288 spin_unlock(&rq->lock);
0289 }
0290
0291
0292 list_for_each_entry(s_job, &sched->pending_list, list) {
0293 struct drm_sched_fence *s_fence = s_job->s_fence;
0294
0295 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
0296 dma_fence_signal(&s_fence->finished);
0297 }
0298 }
0299
0300 const struct drm_sched_backend_ops amdgpu_sched_ops = {
0301 .dependency = amdgpu_job_dependency,
0302 .run_job = amdgpu_job_run,
0303 .timedout_job = amdgpu_job_timedout,
0304 .free_job = amdgpu_job_free_cb
0305 };