0001
0002
0003
0004
0005
0006 #include <linux/moduleparam.h>
0007
0008 #include "etnaviv_drv.h"
0009 #include "etnaviv_dump.h"
0010 #include "etnaviv_gem.h"
0011 #include "etnaviv_gpu.h"
0012 #include "etnaviv_sched.h"
0013 #include "state.xml.h"
0014
0015 static int etnaviv_job_hang_limit = 0;
0016 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
0017 static int etnaviv_hw_jobs_limit = 4;
0018 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
0019
0020 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
0021 {
0022 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
0023 struct dma_fence *fence = NULL;
0024
0025 if (likely(!sched_job->s_fence->finished.error))
0026 fence = etnaviv_gpu_submit(submit);
0027 else
0028 dev_dbg(submit->gpu->dev, "skipping bad job\n");
0029
0030 return fence;
0031 }
0032
0033 static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
0034 *sched_job)
0035 {
0036 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
0037 struct etnaviv_gpu *gpu = submit->gpu;
0038 u32 dma_addr;
0039 int change;
0040
0041
0042 drm_sched_stop(&gpu->sched, sched_job);
0043
0044
0045
0046
0047
0048 if (dma_fence_is_signaled(submit->out_fence))
0049 goto out_no_timeout;
0050
0051
0052
0053
0054
0055
0056 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
0057 change = dma_addr - gpu->hangcheck_dma_addr;
0058 if (gpu->completed_fence != gpu->hangcheck_fence ||
0059 change < 0 || change > 16) {
0060 gpu->hangcheck_dma_addr = dma_addr;
0061 gpu->hangcheck_fence = gpu->completed_fence;
0062 goto out_no_timeout;
0063 }
0064
0065 if(sched_job)
0066 drm_sched_increase_karma(sched_job);
0067
0068
0069 etnaviv_core_dump(submit);
0070 etnaviv_gpu_recover_hang(gpu);
0071
0072 drm_sched_resubmit_jobs(&gpu->sched);
0073
0074 drm_sched_start(&gpu->sched, true);
0075 return DRM_GPU_SCHED_STAT_NOMINAL;
0076
0077 out_no_timeout:
0078
0079 drm_sched_start(&gpu->sched, true);
0080 return DRM_GPU_SCHED_STAT_NOMINAL;
0081 }
0082
0083 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
0084 {
0085 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
0086
0087 drm_sched_job_cleanup(sched_job);
0088
0089 etnaviv_submit_put(submit);
0090 }
0091
0092 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
0093 .run_job = etnaviv_sched_run_job,
0094 .timedout_job = etnaviv_sched_timedout_job,
0095 .free_job = etnaviv_sched_free_job,
0096 };
0097
0098 int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
0099 {
0100 int ret = 0;
0101
0102
0103
0104
0105
0106
0107 mutex_lock(&submit->gpu->fence_lock);
0108
0109 drm_sched_job_arm(&submit->sched_job);
0110
0111 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
0112 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
0113 submit->out_fence, 0,
0114 INT_MAX, GFP_KERNEL);
0115 if (submit->out_fence_id < 0) {
0116 drm_sched_job_cleanup(&submit->sched_job);
0117 ret = -ENOMEM;
0118 goto out_unlock;
0119 }
0120
0121
0122 kref_get(&submit->refcount);
0123
0124 drm_sched_entity_push_job(&submit->sched_job);
0125
0126 out_unlock:
0127 mutex_unlock(&submit->gpu->fence_lock);
0128
0129 return ret;
0130 }
0131
0132 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
0133 {
0134 int ret;
0135
0136 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
0137 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
0138 msecs_to_jiffies(500), NULL, NULL,
0139 dev_name(gpu->dev), gpu->dev);
0140 if (ret)
0141 return ret;
0142
0143 return 0;
0144 }
0145
0146 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
0147 {
0148 drm_sched_fini(&gpu->sched);
0149 }