0001
0002
0003
0004 #include <linux/delay.h>
0005 #include <linux/interrupt.h>
0006 #include <linux/io.h>
0007 #include <linux/iopoll.h>
0008 #include <linux/platform_device.h>
0009 #include <linux/pm_runtime.h>
0010 #include <linux/dma-resv.h>
0011 #include <drm/gpu_scheduler.h>
0012 #include <drm/panfrost_drm.h>
0013
0014 #include "panfrost_device.h"
0015 #include "panfrost_devfreq.h"
0016 #include "panfrost_job.h"
0017 #include "panfrost_features.h"
0018 #include "panfrost_issues.h"
0019 #include "panfrost_gem.h"
0020 #include "panfrost_regs.h"
0021 #include "panfrost_gpu.h"
0022 #include "panfrost_mmu.h"
0023
0024 #define JOB_TIMEOUT_MS 500
0025
0026 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
0027 #define job_read(dev, reg) readl(dev->iomem + (reg))
0028
0029 struct panfrost_queue_state {
0030 struct drm_gpu_scheduler sched;
0031 u64 fence_context;
0032 u64 emit_seqno;
0033 };
0034
0035 struct panfrost_job_slot {
0036 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
0037 spinlock_t job_lock;
0038 int irq;
0039 };
0040
0041 static struct panfrost_job *
0042 to_panfrost_job(struct drm_sched_job *sched_job)
0043 {
0044 return container_of(sched_job, struct panfrost_job, base);
0045 }
0046
0047 struct panfrost_fence {
0048 struct dma_fence base;
0049 struct drm_device *dev;
0050
0051 u64 seqno;
0052 int queue;
0053 };
0054
0055 static inline struct panfrost_fence *
0056 to_panfrost_fence(struct dma_fence *fence)
0057 {
0058 return (struct panfrost_fence *)fence;
0059 }
0060
0061 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
0062 {
0063 return "panfrost";
0064 }
0065
0066 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
0067 {
0068 struct panfrost_fence *f = to_panfrost_fence(fence);
0069
0070 switch (f->queue) {
0071 case 0:
0072 return "panfrost-js-0";
0073 case 1:
0074 return "panfrost-js-1";
0075 case 2:
0076 return "panfrost-js-2";
0077 default:
0078 return NULL;
0079 }
0080 }
0081
0082 static const struct dma_fence_ops panfrost_fence_ops = {
0083 .get_driver_name = panfrost_fence_get_driver_name,
0084 .get_timeline_name = panfrost_fence_get_timeline_name,
0085 };
0086
0087 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
0088 {
0089 struct panfrost_fence *fence;
0090 struct panfrost_job_slot *js = pfdev->js;
0091
0092 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
0093 if (!fence)
0094 return ERR_PTR(-ENOMEM);
0095
0096 fence->dev = pfdev->ddev;
0097 fence->queue = js_num;
0098 fence->seqno = ++js->queue[js_num].emit_seqno;
0099 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
0100 js->queue[js_num].fence_context, fence->seqno);
0101
0102 return &fence->base;
0103 }
0104
0105 int panfrost_job_get_slot(struct panfrost_job *job)
0106 {
0107
0108
0109
0110
0111 if (job->requirements & PANFROST_JD_REQ_FS)
0112 return 0;
0113
0114
0115 #if 0
0116 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
0117 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
0118 (job->pfdev->features.nr_core_groups == 2))
0119 return 2;
0120 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
0121 return 2;
0122 }
0123 #endif
0124 return 1;
0125 }
0126
0127 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
0128 u32 requirements,
0129 int js)
0130 {
0131 u64 affinity;
0132
0133
0134
0135
0136
0137
0138 affinity = pfdev->features.shader_present;
0139
0140 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
0141 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
0142 }
0143
0144 static u32
0145 panfrost_get_job_chain_flag(const struct panfrost_job *job)
0146 {
0147 struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
0148
0149 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
0150 return 0;
0151
0152 return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
0153 }
0154
0155 static struct panfrost_job *
0156 panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
0157 {
0158 struct panfrost_job *job = pfdev->jobs[slot][0];
0159
0160 WARN_ON(!job);
0161 pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
0162 pfdev->jobs[slot][1] = NULL;
0163
0164 return job;
0165 }
0166
0167 static unsigned int
0168 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
0169 struct panfrost_job *job)
0170 {
0171 if (WARN_ON(!job))
0172 return 0;
0173
0174 if (!pfdev->jobs[slot][0]) {
0175 pfdev->jobs[slot][0] = job;
0176 return 0;
0177 }
0178
0179 WARN_ON(pfdev->jobs[slot][1]);
0180 pfdev->jobs[slot][1] = job;
0181 WARN_ON(panfrost_get_job_chain_flag(job) ==
0182 panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
0183 return 1;
0184 }
0185
0186 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
0187 {
0188 struct panfrost_device *pfdev = job->pfdev;
0189 unsigned int subslot;
0190 u32 cfg;
0191 u64 jc_head = job->jc;
0192 int ret;
0193
0194 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
0195
0196 ret = pm_runtime_get_sync(pfdev->dev);
0197 if (ret < 0)
0198 return;
0199
0200 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
0201 return;
0202 }
0203
0204 cfg = panfrost_mmu_as_get(pfdev, job->mmu);
0205
0206 job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
0207 job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
0208
0209 panfrost_job_write_affinity(pfdev, job->requirements, js);
0210
0211
0212
0213 cfg |= JS_CONFIG_THREAD_PRI(8) |
0214 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
0215 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
0216 panfrost_get_job_chain_flag(job);
0217
0218 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
0219 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
0220
0221 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
0222 cfg |= JS_CONFIG_START_MMU;
0223
0224 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
0225
0226 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
0227 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
0228
0229
0230
0231 spin_lock(&pfdev->js->job_lock);
0232 subslot = panfrost_enqueue_job(pfdev, js, job);
0233
0234 if (!atomic_read(&pfdev->reset.pending)) {
0235 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
0236 dev_dbg(pfdev->dev,
0237 "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
0238 job, js, subslot, jc_head, cfg & 0xf);
0239 }
0240 spin_unlock(&pfdev->js->job_lock);
0241 }
0242
0243 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
0244 int bo_count,
0245 struct drm_sched_job *job)
0246 {
0247 int i, ret;
0248
0249 for (i = 0; i < bo_count; i++) {
0250 ret = dma_resv_reserve_fences(bos[i]->resv, 1);
0251 if (ret)
0252 return ret;
0253
0254
0255 ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
0256 true);
0257 if (ret)
0258 return ret;
0259 }
0260
0261 return 0;
0262 }
0263
0264 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
0265 int bo_count,
0266 struct dma_fence *fence)
0267 {
0268 int i;
0269
0270 for (i = 0; i < bo_count; i++)
0271 dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
0272 }
0273
0274 int panfrost_job_push(struct panfrost_job *job)
0275 {
0276 struct panfrost_device *pfdev = job->pfdev;
0277 struct ww_acquire_ctx acquire_ctx;
0278 int ret = 0;
0279
0280 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
0281 &acquire_ctx);
0282 if (ret)
0283 return ret;
0284
0285 mutex_lock(&pfdev->sched_lock);
0286 drm_sched_job_arm(&job->base);
0287
0288 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
0289
0290 ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
0291 &job->base);
0292 if (ret) {
0293 mutex_unlock(&pfdev->sched_lock);
0294 goto unlock;
0295 }
0296
0297 kref_get(&job->refcount);
0298
0299 drm_sched_entity_push_job(&job->base);
0300
0301 mutex_unlock(&pfdev->sched_lock);
0302
0303 panfrost_attach_object_fences(job->bos, job->bo_count,
0304 job->render_done_fence);
0305
0306 unlock:
0307 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
0308
0309 return ret;
0310 }
0311
0312 static void panfrost_job_cleanup(struct kref *ref)
0313 {
0314 struct panfrost_job *job = container_of(ref, struct panfrost_job,
0315 refcount);
0316 unsigned int i;
0317
0318 dma_fence_put(job->done_fence);
0319 dma_fence_put(job->render_done_fence);
0320
0321 if (job->mappings) {
0322 for (i = 0; i < job->bo_count; i++) {
0323 if (!job->mappings[i])
0324 break;
0325
0326 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
0327 panfrost_gem_mapping_put(job->mappings[i]);
0328 }
0329 kvfree(job->mappings);
0330 }
0331
0332 if (job->bos) {
0333 for (i = 0; i < job->bo_count; i++)
0334 drm_gem_object_put(job->bos[i]);
0335
0336 kvfree(job->bos);
0337 }
0338
0339 kfree(job);
0340 }
0341
0342 void panfrost_job_put(struct panfrost_job *job)
0343 {
0344 kref_put(&job->refcount, panfrost_job_cleanup);
0345 }
0346
0347 static void panfrost_job_free(struct drm_sched_job *sched_job)
0348 {
0349 struct panfrost_job *job = to_panfrost_job(sched_job);
0350
0351 drm_sched_job_cleanup(sched_job);
0352
0353 panfrost_job_put(job);
0354 }
0355
0356 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
0357 {
0358 struct panfrost_job *job = to_panfrost_job(sched_job);
0359 struct panfrost_device *pfdev = job->pfdev;
0360 int slot = panfrost_job_get_slot(job);
0361 struct dma_fence *fence = NULL;
0362
0363 if (unlikely(job->base.s_fence->finished.error))
0364 return NULL;
0365
0366
0367
0368
0369 if (!job->jc)
0370 return NULL;
0371
0372 fence = panfrost_fence_create(pfdev, slot);
0373 if (IS_ERR(fence))
0374 return fence;
0375
0376 if (job->done_fence)
0377 dma_fence_put(job->done_fence);
0378 job->done_fence = dma_fence_get(fence);
0379
0380 panfrost_job_hw_submit(job, slot);
0381
0382 return fence;
0383 }
0384
0385 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
0386 {
0387 int j;
0388 u32 irq_mask = 0;
0389
0390 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0391 irq_mask |= MK_JS_MASK(j);
0392 }
0393
0394 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
0395 job_write(pfdev, JOB_INT_MASK, irq_mask);
0396 }
0397
0398 static void panfrost_job_handle_err(struct panfrost_device *pfdev,
0399 struct panfrost_job *job,
0400 unsigned int js)
0401 {
0402 u32 js_status = job_read(pfdev, JS_STATUS(js));
0403 const char *exception_name = panfrost_exception_name(js_status);
0404 bool signal_fence = true;
0405
0406 if (!panfrost_exception_is_fault(js_status)) {
0407 dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
0408 js, exception_name,
0409 job_read(pfdev, JS_HEAD_LO(js)),
0410 job_read(pfdev, JS_TAIL_LO(js)));
0411 } else {
0412 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
0413 js, exception_name,
0414 job_read(pfdev, JS_HEAD_LO(js)),
0415 job_read(pfdev, JS_TAIL_LO(js)));
0416 }
0417
0418 if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
0419
0420 job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
0421 ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
0422
0423
0424 signal_fence = false;
0425 } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
0426
0427 dma_fence_set_error(job->done_fence, -ECANCELED);
0428 job->jc = 0;
0429 } else if (panfrost_exception_is_fault(js_status)) {
0430
0431
0432
0433
0434 dma_fence_set_error(job->done_fence, -EINVAL);
0435 job->jc = 0;
0436 }
0437
0438 panfrost_mmu_as_put(pfdev, job->mmu);
0439 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
0440
0441 if (signal_fence)
0442 dma_fence_signal_locked(job->done_fence);
0443
0444 pm_runtime_put_autosuspend(pfdev->dev);
0445
0446 if (panfrost_exception_needs_reset(pfdev, js_status)) {
0447 atomic_set(&pfdev->reset.pending, 1);
0448 drm_sched_fault(&pfdev->js->queue[js].sched);
0449 }
0450 }
0451
0452 static void panfrost_job_handle_done(struct panfrost_device *pfdev,
0453 struct panfrost_job *job)
0454 {
0455
0456
0457
0458 job->jc = 0;
0459 panfrost_mmu_as_put(pfdev, job->mmu);
0460 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
0461
0462 dma_fence_signal_locked(job->done_fence);
0463 pm_runtime_put_autosuspend(pfdev->dev);
0464 }
0465
0466 static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
0467 {
0468 struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
0469 struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
0470 u32 js_state = 0, js_events = 0;
0471 unsigned int i, j;
0472
0473
0474 while (status) {
0475 u32 js_state_mask = 0;
0476
0477 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0478 if (status & MK_JS_MASK(j))
0479 js_state_mask |= MK_JS_MASK(j);
0480
0481 if (status & JOB_INT_MASK_DONE(j)) {
0482 if (done[j][0])
0483 done[j][1] = panfrost_dequeue_job(pfdev, j);
0484 else
0485 done[j][0] = panfrost_dequeue_job(pfdev, j);
0486 }
0487
0488 if (status & JOB_INT_MASK_ERR(j)) {
0489
0490
0491
0492
0493 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
0494 failed[j] = panfrost_dequeue_job(pfdev, j);
0495 }
0496 }
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 job_write(pfdev, JOB_INT_CLEAR, status);
0511 js_state &= ~js_state_mask;
0512 js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
0513 js_events |= status;
0514 status = job_read(pfdev, JOB_INT_RAWSTAT);
0515 }
0516
0517
0518 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0519 if (!(js_events & MK_JS_MASK(j)))
0520 continue;
0521
0522 if (failed[j]) {
0523 panfrost_job_handle_err(pfdev, failed[j], j);
0524 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 if (WARN_ON(!done[j][0]))
0535 done[j][0] = panfrost_dequeue_job(pfdev, j);
0536 else
0537 done[j][1] = panfrost_dequeue_job(pfdev, j);
0538 }
0539
0540 for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
0541 panfrost_job_handle_done(pfdev, done[j][i]);
0542 }
0543
0544
0545
0546
0547 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0548 if (!(js_events & MK_JS_MASK(j)))
0549 continue;
0550
0551 if (!failed[j] || !pfdev->jobs[j][0])
0552 continue;
0553
0554 if (pfdev->jobs[j][0]->jc == 0) {
0555
0556 struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
0557
0558 dma_fence_set_error(canceled->done_fence, -ECANCELED);
0559 panfrost_job_handle_done(pfdev, canceled);
0560 } else if (!atomic_read(&pfdev->reset.pending)) {
0561
0562 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
0563 }
0564 }
0565 }
0566
0567 static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
0568 {
0569 u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
0570
0571 while (status) {
0572 pm_runtime_mark_last_busy(pfdev->dev);
0573
0574 spin_lock(&pfdev->js->job_lock);
0575 panfrost_job_handle_irq(pfdev, status);
0576 spin_unlock(&pfdev->js->job_lock);
0577 status = job_read(pfdev, JOB_INT_RAWSTAT);
0578 }
0579 }
0580
0581 static u32 panfrost_active_slots(struct panfrost_device *pfdev,
0582 u32 *js_state_mask, u32 js_state)
0583 {
0584 u32 rawstat;
0585
0586 if (!(js_state & *js_state_mask))
0587 return 0;
0588
0589 rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
0590 if (rawstat) {
0591 unsigned int i;
0592
0593 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0594 if (rawstat & MK_JS_MASK(i))
0595 *js_state_mask &= ~MK_JS_MASK(i);
0596 }
0597 }
0598
0599 return js_state & *js_state_mask;
0600 }
0601
0602 static void
0603 panfrost_reset(struct panfrost_device *pfdev,
0604 struct drm_sched_job *bad)
0605 {
0606 u32 js_state, js_state_mask = 0xffffffff;
0607 unsigned int i, j;
0608 bool cookie;
0609 int ret;
0610
0611 if (!atomic_read(&pfdev->reset.pending))
0612 return;
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 for (i = 0; i < NUM_JOB_SLOTS; i++)
0628 drm_sched_stop(&pfdev->js->queue[i].sched, bad);
0629
0630 cookie = dma_fence_begin_signalling();
0631
0632 if (bad)
0633 drm_sched_increase_karma(bad);
0634
0635
0636
0637
0638 job_write(pfdev, JOB_INT_MASK, 0);
0639 synchronize_irq(pfdev->js->irq);
0640
0641 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0642
0643 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
0644 job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
0645 }
0646
0647
0648 ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
0649 !panfrost_active_slots(pfdev, &js_state_mask, js_state),
0650 10, 10000);
0651
0652 if (ret)
0653 dev_err(pfdev->dev, "Soft-stop failed\n");
0654
0655
0656 panfrost_job_handle_irqs(pfdev);
0657
0658
0659
0660
0661
0662
0663 spin_lock(&pfdev->js->job_lock);
0664 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0665 for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
0666 pm_runtime_put_noidle(pfdev->dev);
0667 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
0668 }
0669 }
0670 memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
0671 spin_unlock(&pfdev->js->job_lock);
0672
0673
0674 panfrost_device_reset(pfdev);
0675
0676
0677
0678
0679 job_write(pfdev, JOB_INT_MASK, 0);
0680
0681
0682 atomic_set(&pfdev->reset.pending, 0);
0683
0684
0685
0686
0687
0688
0689
0690
0691 dma_fence_end_signalling(cookie);
0692 for (i = 0; i < NUM_JOB_SLOTS; i++)
0693 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
0694 cookie = dma_fence_begin_signalling();
0695
0696
0697 for (i = 0; i < NUM_JOB_SLOTS; i++)
0698 drm_sched_start(&pfdev->js->queue[i].sched, true);
0699
0700
0701 job_write(pfdev, JOB_INT_MASK,
0702 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
0703 GENMASK(NUM_JOB_SLOTS - 1, 0));
0704
0705 dma_fence_end_signalling(cookie);
0706 }
0707
0708 static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
0709 *sched_job)
0710 {
0711 struct panfrost_job *job = to_panfrost_job(sched_job);
0712 struct panfrost_device *pfdev = job->pfdev;
0713 int js = panfrost_job_get_slot(job);
0714
0715
0716
0717
0718
0719 if (dma_fence_is_signaled(job->done_fence))
0720 return DRM_GPU_SCHED_STAT_NOMINAL;
0721
0722 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
0723 js,
0724 job_read(pfdev, JS_CONFIG(js)),
0725 job_read(pfdev, JS_STATUS(js)),
0726 job_read(pfdev, JS_HEAD_LO(js)),
0727 job_read(pfdev, JS_TAIL_LO(js)),
0728 sched_job);
0729
0730 atomic_set(&pfdev->reset.pending, 1);
0731 panfrost_reset(pfdev, sched_job);
0732
0733 return DRM_GPU_SCHED_STAT_NOMINAL;
0734 }
0735
0736 static void panfrost_reset_work(struct work_struct *work)
0737 {
0738 struct panfrost_device *pfdev;
0739
0740 pfdev = container_of(work, struct panfrost_device, reset.work);
0741 panfrost_reset(pfdev, NULL);
0742 }
0743
0744 static const struct drm_sched_backend_ops panfrost_sched_ops = {
0745 .run_job = panfrost_job_run,
0746 .timedout_job = panfrost_job_timedout,
0747 .free_job = panfrost_job_free
0748 };
0749
0750 static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
0751 {
0752 struct panfrost_device *pfdev = data;
0753
0754 panfrost_job_handle_irqs(pfdev);
0755 job_write(pfdev, JOB_INT_MASK,
0756 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
0757 GENMASK(NUM_JOB_SLOTS - 1, 0));
0758 return IRQ_HANDLED;
0759 }
0760
0761 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
0762 {
0763 struct panfrost_device *pfdev = data;
0764 u32 status = job_read(pfdev, JOB_INT_STAT);
0765
0766 if (!status)
0767 return IRQ_NONE;
0768
0769 job_write(pfdev, JOB_INT_MASK, 0);
0770 return IRQ_WAKE_THREAD;
0771 }
0772
0773 int panfrost_job_init(struct panfrost_device *pfdev)
0774 {
0775 struct panfrost_job_slot *js;
0776 unsigned int nentries = 2;
0777 int ret, j;
0778
0779
0780
0781
0782
0783 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
0784 nentries = 1;
0785
0786 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
0787 if (!js)
0788 return -ENOMEM;
0789
0790 INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
0791 spin_lock_init(&js->job_lock);
0792
0793 js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
0794 if (js->irq <= 0)
0795 return -ENODEV;
0796
0797 ret = devm_request_threaded_irq(pfdev->dev, js->irq,
0798 panfrost_job_irq_handler,
0799 panfrost_job_irq_handler_thread,
0800 IRQF_SHARED, KBUILD_MODNAME "-job",
0801 pfdev);
0802 if (ret) {
0803 dev_err(pfdev->dev, "failed to request job irq");
0804 return ret;
0805 }
0806
0807 pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
0808 if (!pfdev->reset.wq)
0809 return -ENOMEM;
0810
0811 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0812 js->queue[j].fence_context = dma_fence_context_alloc(1);
0813
0814 ret = drm_sched_init(&js->queue[j].sched,
0815 &panfrost_sched_ops,
0816 nentries, 0,
0817 msecs_to_jiffies(JOB_TIMEOUT_MS),
0818 pfdev->reset.wq,
0819 NULL, "pan_js", pfdev->dev);
0820 if (ret) {
0821 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
0822 goto err_sched;
0823 }
0824 }
0825
0826 panfrost_job_enable_interrupts(pfdev);
0827
0828 return 0;
0829
0830 err_sched:
0831 for (j--; j >= 0; j--)
0832 drm_sched_fini(&js->queue[j].sched);
0833
0834 destroy_workqueue(pfdev->reset.wq);
0835 return ret;
0836 }
0837
0838 void panfrost_job_fini(struct panfrost_device *pfdev)
0839 {
0840 struct panfrost_job_slot *js = pfdev->js;
0841 int j;
0842
0843 job_write(pfdev, JOB_INT_MASK, 0);
0844
0845 for (j = 0; j < NUM_JOB_SLOTS; j++) {
0846 drm_sched_fini(&js->queue[j].sched);
0847 }
0848
0849 cancel_work_sync(&pfdev->reset.work);
0850 destroy_workqueue(pfdev->reset.wq);
0851 }
0852
0853 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
0854 {
0855 struct panfrost_device *pfdev = panfrost_priv->pfdev;
0856 struct panfrost_job_slot *js = pfdev->js;
0857 struct drm_gpu_scheduler *sched;
0858 int ret, i;
0859
0860 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0861 sched = &js->queue[i].sched;
0862 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
0863 DRM_SCHED_PRIORITY_NORMAL, &sched,
0864 1, NULL);
0865 if (WARN_ON(ret))
0866 return ret;
0867 }
0868 return 0;
0869 }
0870
0871 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
0872 {
0873 struct panfrost_device *pfdev = panfrost_priv->pfdev;
0874 int i;
0875
0876 for (i = 0; i < NUM_JOB_SLOTS; i++)
0877 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
0878
0879
0880 spin_lock(&pfdev->js->job_lock);
0881 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0882 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
0883 int j;
0884
0885 for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
0886 struct panfrost_job *job = pfdev->jobs[i][j];
0887 u32 cmd;
0888
0889 if (!job || job->base.entity != entity)
0890 continue;
0891
0892 if (j == 1) {
0893
0894 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
0895
0896
0897
0898 job->jc = 0;
0899 }
0900
0901 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
0902 cmd = panfrost_get_job_chain_flag(job) ?
0903 JS_COMMAND_HARD_STOP_1 :
0904 JS_COMMAND_HARD_STOP_0;
0905 } else {
0906 cmd = JS_COMMAND_HARD_STOP;
0907 }
0908
0909 job_write(pfdev, JS_COMMAND(i), cmd);
0910 }
0911 }
0912 spin_unlock(&pfdev->js->job_lock);
0913 }
0914
0915 int panfrost_job_is_idle(struct panfrost_device *pfdev)
0916 {
0917 struct panfrost_job_slot *js = pfdev->js;
0918 int i;
0919
0920 for (i = 0; i < NUM_JOB_SLOTS; i++) {
0921
0922 if (atomic_read(&js->queue[i].sched.hw_rq_count))
0923 return false;
0924 }
0925
0926 return true;
0927 }