0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include <linux/seq_file.h>
0032 #include <linux/atomic.h>
0033 #include <linux/wait.h>
0034 #include <linux/kref.h>
0035 #include <linux/slab.h>
0036 #include <linux/firmware.h>
0037 #include <linux/pm_runtime.h>
0038
0039 #include <drm/drm_drv.h>
0040 #include "amdgpu.h"
0041 #include "amdgpu_trace.h"
0042 #include "amdgpu_reset.h"
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 struct amdgpu_fence {
0054 struct dma_fence base;
0055
0056
0057 struct amdgpu_ring *ring;
0058 };
0059
0060 static struct kmem_cache *amdgpu_fence_slab;
0061
0062 int amdgpu_fence_slab_init(void)
0063 {
0064 amdgpu_fence_slab = kmem_cache_create(
0065 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
0066 SLAB_HWCACHE_ALIGN, NULL);
0067 if (!amdgpu_fence_slab)
0068 return -ENOMEM;
0069 return 0;
0070 }
0071
0072 void amdgpu_fence_slab_fini(void)
0073 {
0074 rcu_barrier();
0075 kmem_cache_destroy(amdgpu_fence_slab);
0076 }
0077
0078
0079
0080 static const struct dma_fence_ops amdgpu_fence_ops;
0081 static const struct dma_fence_ops amdgpu_job_fence_ops;
0082 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
0083 {
0084 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
0085
0086 if (__f->base.ops == &amdgpu_fence_ops ||
0087 __f->base.ops == &amdgpu_job_fence_ops)
0088 return __f;
0089
0090 return NULL;
0091 }
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
0102 {
0103 struct amdgpu_fence_driver *drv = &ring->fence_drv;
0104
0105 if (drv->cpu_addr)
0106 *drv->cpu_addr = cpu_to_le32(seq);
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
0118 {
0119 struct amdgpu_fence_driver *drv = &ring->fence_drv;
0120 u32 seq = 0;
0121
0122 if (drv->cpu_addr)
0123 seq = le32_to_cpu(*drv->cpu_addr);
0124 else
0125 seq = atomic_read(&drv->last_seq);
0126
0127 return seq;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
0142 unsigned flags)
0143 {
0144 struct amdgpu_device *adev = ring->adev;
0145 struct dma_fence *fence;
0146 struct amdgpu_fence *am_fence;
0147 struct dma_fence __rcu **ptr;
0148 uint32_t seq;
0149 int r;
0150
0151 if (job == NULL) {
0152
0153 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
0154 if (am_fence == NULL)
0155 return -ENOMEM;
0156 fence = &am_fence->base;
0157 am_fence->ring = ring;
0158 } else {
0159
0160 fence = &job->hw_fence;
0161 }
0162
0163 seq = ++ring->fence_drv.sync_seq;
0164 if (job && job->job_run_counter) {
0165
0166 fence->seqno = seq;
0167
0168 dma_fence_get(fence);
0169 } else {
0170 if (job) {
0171 dma_fence_init(fence, &amdgpu_job_fence_ops,
0172 &ring->fence_drv.lock,
0173 adev->fence_context + ring->idx, seq);
0174
0175 dma_fence_get(fence);
0176 }
0177 else
0178 dma_fence_init(fence, &amdgpu_fence_ops,
0179 &ring->fence_drv.lock,
0180 adev->fence_context + ring->idx, seq);
0181 }
0182
0183 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
0184 seq, flags | AMDGPU_FENCE_FLAG_INT);
0185 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
0186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
0187 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
0188 struct dma_fence *old;
0189
0190 rcu_read_lock();
0191 old = dma_fence_get_rcu_safe(ptr);
0192 rcu_read_unlock();
0193
0194 if (old) {
0195 r = dma_fence_wait(old, false);
0196 dma_fence_put(old);
0197 if (r)
0198 return r;
0199 }
0200 }
0201
0202
0203
0204
0205 rcu_assign_pointer(*ptr, dma_fence_get(fence));
0206
0207 *f = fence;
0208
0209 return 0;
0210 }
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
0224 uint32_t timeout)
0225 {
0226 uint32_t seq;
0227 signed long r;
0228
0229 if (!s)
0230 return -EINVAL;
0231
0232 seq = ++ring->fence_drv.sync_seq;
0233 r = amdgpu_fence_wait_polling(ring,
0234 seq - ring->fence_drv.num_fences_mask,
0235 timeout);
0236 if (r < 1)
0237 return -ETIMEDOUT;
0238
0239 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
0240 seq, 0);
0241
0242 *s = seq;
0243
0244 return 0;
0245 }
0246
0247
0248
0249
0250
0251
0252
0253
0254 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
0255 {
0256 mod_timer(&ring->fence_drv.fallback_timer,
0257 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 bool amdgpu_fence_process(struct amdgpu_ring *ring)
0272 {
0273 struct amdgpu_fence_driver *drv = &ring->fence_drv;
0274 struct amdgpu_device *adev = ring->adev;
0275 uint32_t seq, last_seq;
0276
0277 do {
0278 last_seq = atomic_read(&ring->fence_drv.last_seq);
0279 seq = amdgpu_fence_read(ring);
0280
0281 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
0282
0283 if (del_timer(&ring->fence_drv.fallback_timer) &&
0284 seq != ring->fence_drv.sync_seq)
0285 amdgpu_fence_schedule_fallback(ring);
0286
0287 if (unlikely(seq == last_seq))
0288 return false;
0289
0290 last_seq &= drv->num_fences_mask;
0291 seq &= drv->num_fences_mask;
0292
0293 do {
0294 struct dma_fence *fence, **ptr;
0295
0296 ++last_seq;
0297 last_seq &= drv->num_fences_mask;
0298 ptr = &drv->fences[last_seq];
0299
0300
0301 fence = rcu_dereference_protected(*ptr, 1);
0302 RCU_INIT_POINTER(*ptr, NULL);
0303
0304 if (!fence)
0305 continue;
0306
0307 dma_fence_signal(fence);
0308 dma_fence_put(fence);
0309 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
0310 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
0311 } while (last_seq != seq);
0312
0313 return true;
0314 }
0315
0316
0317
0318
0319
0320
0321
0322
0323 static void amdgpu_fence_fallback(struct timer_list *t)
0324 {
0325 struct amdgpu_ring *ring = from_timer(ring, t,
0326 fence_drv.fallback_timer);
0327
0328 if (amdgpu_fence_process(ring))
0329 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
0341 {
0342 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
0343 struct dma_fence *fence, **ptr;
0344 int r;
0345
0346 if (!seq)
0347 return 0;
0348
0349 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
0350 rcu_read_lock();
0351 fence = rcu_dereference(*ptr);
0352 if (!fence || !dma_fence_get_rcu(fence)) {
0353 rcu_read_unlock();
0354 return 0;
0355 }
0356 rcu_read_unlock();
0357
0358 r = dma_fence_wait(fence, false);
0359 dma_fence_put(fence);
0360 return r;
0361 }
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
0374 uint32_t wait_seq,
0375 signed long timeout)
0376 {
0377 uint32_t seq;
0378
0379 do {
0380 seq = amdgpu_fence_read(ring);
0381 udelay(5);
0382 timeout -= 5;
0383 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
0384
0385 return timeout > 0 ? timeout : 0;
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
0397 {
0398 uint64_t emitted;
0399
0400
0401
0402
0403 emitted = 0x100000000ull;
0404 emitted -= atomic_read(&ring->fence_drv.last_seq);
0405 emitted += READ_ONCE(ring->fence_drv.sync_seq);
0406 return lower_32_bits(emitted);
0407 }
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
0423 struct amdgpu_irq_src *irq_src,
0424 unsigned irq_type)
0425 {
0426 struct amdgpu_device *adev = ring->adev;
0427 uint64_t index;
0428
0429 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
0430 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
0431 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
0432 } else {
0433
0434 index = ALIGN(adev->uvd.fw->size, 8);
0435 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
0436 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
0437 }
0438 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
0439
0440 ring->fence_drv.irq_src = irq_src;
0441 ring->fence_drv.irq_type = irq_type;
0442 ring->fence_drv.initialized = true;
0443
0444 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
0445 ring->name, ring->fence_drv.gpu_addr);
0446 return 0;
0447 }
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
0459 {
0460 struct amdgpu_device *adev = ring->adev;
0461
0462 if (!adev)
0463 return -EINVAL;
0464
0465 if (!is_power_of_2(ring->num_hw_submission))
0466 return -EINVAL;
0467
0468 ring->fence_drv.cpu_addr = NULL;
0469 ring->fence_drv.gpu_addr = 0;
0470 ring->fence_drv.sync_seq = 0;
0471 atomic_set(&ring->fence_drv.last_seq, 0);
0472 ring->fence_drv.initialized = false;
0473
0474 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
0475
0476 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
0477 spin_lock_init(&ring->fence_drv.lock);
0478 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
0479 GFP_KERNEL);
0480
0481 if (!ring->fence_drv.fences)
0482 return -ENOMEM;
0483
0484 return 0;
0485 }
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
0500 {
0501 return 0;
0502 }
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
0513 {
0514 int i, r;
0515
0516 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
0517 struct amdgpu_ring *ring = adev->rings[i];
0518
0519 if (!ring || !ring->fence_drv.initialized)
0520 continue;
0521
0522
0523 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
0524 r = amdgpu_fence_wait_empty(ring);
0525 else
0526 r = -ENODEV;
0527
0528 if (r)
0529 amdgpu_fence_driver_force_completion(ring);
0530
0531 if (ring->fence_drv.irq_src)
0532 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
0533 ring->fence_drv.irq_type);
0534
0535 del_timer_sync(&ring->fence_drv.fallback_timer);
0536 }
0537 }
0538
0539
0540 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
0541 {
0542 int i;
0543
0544 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
0545 struct amdgpu_ring *ring = adev->rings[i];
0546
0547 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
0548 continue;
0549
0550 if (stop)
0551 disable_irq(adev->irq.irq);
0552 else
0553 enable_irq(adev->irq.irq);
0554 }
0555 }
0556
0557 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
0558 {
0559 unsigned int i, j;
0560
0561 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
0562 struct amdgpu_ring *ring = adev->rings[i];
0563
0564 if (!ring || !ring->fence_drv.initialized)
0565 continue;
0566
0567 if (!ring->no_scheduler)
0568 drm_sched_fini(&ring->sched);
0569
0570 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
0571 dma_fence_put(ring->fence_drv.fences[j]);
0572 kfree(ring->fence_drv.fences);
0573 ring->fence_drv.fences = NULL;
0574 ring->fence_drv.initialized = false;
0575 }
0576 }
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
0591 {
0592 int i;
0593
0594 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
0595 struct amdgpu_ring *ring = adev->rings[i];
0596 if (!ring || !ring->fence_drv.initialized)
0597 continue;
0598
0599
0600 if (ring->fence_drv.irq_src)
0601 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
0602 ring->fence_drv.irq_type);
0603 }
0604 }
0605
0606
0607
0608
0609
0610
0611
0612 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
0613 {
0614 int i;
0615 struct dma_fence *old, **ptr;
0616
0617 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
0618 ptr = &ring->fence_drv.fences[i];
0619 old = rcu_dereference_protected(*ptr, 1);
0620 if (old && old->ops == &amdgpu_job_fence_ops) {
0621 RCU_INIT_POINTER(*ptr, NULL);
0622 dma_fence_put(old);
0623 }
0624 }
0625 }
0626
0627
0628
0629
0630
0631
0632
0633 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
0634 {
0635 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
0636 amdgpu_fence_process(ring);
0637 }
0638
0639
0640
0641
0642
0643 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
0644 {
0645 return "amdgpu";
0646 }
0647
0648 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
0649 {
0650 return (const char *)to_amdgpu_fence(f)->ring->name;
0651 }
0652
0653 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
0654 {
0655 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
0656
0657 return (const char *)to_amdgpu_ring(job->base.sched)->name;
0658 }
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
0669 {
0670 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
0671 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
0672
0673 return true;
0674 }
0675
0676
0677
0678
0679
0680
0681
0682
0683 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
0684 {
0685 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
0686
0687 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
0688 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
0689
0690 return true;
0691 }
0692
0693
0694
0695
0696
0697
0698
0699
0700 static void amdgpu_fence_free(struct rcu_head *rcu)
0701 {
0702 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
0703
0704
0705 kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
0706 }
0707
0708
0709
0710
0711
0712
0713
0714
0715 static void amdgpu_job_fence_free(struct rcu_head *rcu)
0716 {
0717 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
0718
0719
0720 kfree(container_of(f, struct amdgpu_job, hw_fence));
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731 static void amdgpu_fence_release(struct dma_fence *f)
0732 {
0733 call_rcu(&f->rcu, amdgpu_fence_free);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744 static void amdgpu_job_fence_release(struct dma_fence *f)
0745 {
0746 call_rcu(&f->rcu, amdgpu_job_fence_free);
0747 }
0748
0749 static const struct dma_fence_ops amdgpu_fence_ops = {
0750 .get_driver_name = amdgpu_fence_get_driver_name,
0751 .get_timeline_name = amdgpu_fence_get_timeline_name,
0752 .enable_signaling = amdgpu_fence_enable_signaling,
0753 .release = amdgpu_fence_release,
0754 };
0755
0756 static const struct dma_fence_ops amdgpu_job_fence_ops = {
0757 .get_driver_name = amdgpu_fence_get_driver_name,
0758 .get_timeline_name = amdgpu_job_fence_get_timeline_name,
0759 .enable_signaling = amdgpu_job_fence_enable_signaling,
0760 .release = amdgpu_job_fence_release,
0761 };
0762
0763
0764
0765
0766 #if defined(CONFIG_DEBUG_FS)
0767 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
0768 {
0769 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
0770 int i;
0771
0772 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
0773 struct amdgpu_ring *ring = adev->rings[i];
0774 if (!ring || !ring->fence_drv.initialized)
0775 continue;
0776
0777 amdgpu_fence_process(ring);
0778
0779 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
0780 seq_printf(m, "Last signaled fence 0x%08x\n",
0781 atomic_read(&ring->fence_drv.last_seq));
0782 seq_printf(m, "Last emitted 0x%08x\n",
0783 ring->fence_drv.sync_seq);
0784
0785 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
0786 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
0787 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
0788 le32_to_cpu(*ring->trail_fence_cpu_addr));
0789 seq_printf(m, "Last emitted 0x%08x\n",
0790 ring->trail_seq);
0791 }
0792
0793 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
0794 continue;
0795
0796
0797 seq_printf(m, "Last preempted 0x%08x\n",
0798 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
0799
0800 seq_printf(m, "Last reset 0x%08x\n",
0801 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
0802
0803 seq_printf(m, "Last both 0x%08x\n",
0804 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
0805 }
0806 return 0;
0807 }
0808
0809
0810
0811
0812
0813
0814 static int gpu_recover_get(void *data, u64 *val)
0815 {
0816 struct amdgpu_device *adev = (struct amdgpu_device *)data;
0817 struct drm_device *dev = adev_to_drm(adev);
0818 int r;
0819
0820 r = pm_runtime_get_sync(dev->dev);
0821 if (r < 0) {
0822 pm_runtime_put_autosuspend(dev->dev);
0823 return 0;
0824 }
0825
0826 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
0827 flush_work(&adev->reset_work);
0828
0829 *val = atomic_read(&adev->reset_domain->reset_res);
0830
0831 pm_runtime_mark_last_busy(dev->dev);
0832 pm_runtime_put_autosuspend(dev->dev);
0833
0834 return 0;
0835 }
0836
0837 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
0838 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
0839 "%lld\n");
0840
0841 static void amdgpu_debugfs_reset_work(struct work_struct *work)
0842 {
0843 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0844 reset_work);
0845
0846 struct amdgpu_reset_context reset_context;
0847 memset(&reset_context, 0, sizeof(reset_context));
0848
0849 reset_context.method = AMD_RESET_METHOD_NONE;
0850 reset_context.reset_req_dev = adev;
0851 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
0852
0853 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
0854 }
0855
0856 #endif
0857
0858 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
0859 {
0860 #if defined(CONFIG_DEBUG_FS)
0861 struct drm_minor *minor = adev_to_drm(adev)->primary;
0862 struct dentry *root = minor->debugfs_root;
0863
0864 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
0865 &amdgpu_debugfs_fence_info_fops);
0866
0867 if (!amdgpu_sriov_vf(adev)) {
0868
0869 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
0870 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
0871 &amdgpu_debugfs_gpu_recover_fops);
0872 }
0873 #endif
0874 }
0875