0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #include <linux/irq.h>
0046 #include <linux/pci.h>
0047
0048 #include <drm/drm_crtc_helper.h>
0049 #include <drm/drm_vblank.h>
0050 #include <drm/amdgpu_drm.h>
0051 #include <drm/drm_drv.h>
0052 #include "amdgpu.h"
0053 #include "amdgpu_ih.h"
0054 #include "atom.h"
0055 #include "amdgpu_connectors.h"
0056 #include "amdgpu_trace.h"
0057 #include "amdgpu_amdkfd.h"
0058 #include "amdgpu_ras.h"
0059
0060 #include <linux/pm_runtime.h>
0061
0062 #ifdef CONFIG_DRM_AMD_DC
0063 #include "amdgpu_dm_irq.h"
0064 #endif
0065
0066 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
0067
0068 const char *soc15_ih_clientid_name[] = {
0069 "IH",
0070 "SDMA2 or ACP",
0071 "ATHUB",
0072 "BIF",
0073 "SDMA3 or DCE",
0074 "SDMA4 or ISP",
0075 "VMC1 or PCIE0",
0076 "RLC",
0077 "SDMA0",
0078 "SDMA1",
0079 "SE0SH",
0080 "SE1SH",
0081 "SE2SH",
0082 "SE3SH",
0083 "VCN1 or UVD1",
0084 "THM",
0085 "VCN or UVD",
0086 "SDMA5 or VCE0",
0087 "VMC",
0088 "SDMA6 or XDMA",
0089 "GRBM_CP",
0090 "ATS",
0091 "ROM_SMUIO",
0092 "DF",
0093 "SDMA7 or VCE1",
0094 "PWR",
0095 "reserved",
0096 "UTCL2",
0097 "EA",
0098 "UTCL2LOG",
0099 "MP0",
0100 "MP1"
0101 };
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 static void amdgpu_hotplug_work_func(struct work_struct *work)
0120 {
0121 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0122 hotplug_work);
0123 struct drm_device *dev = adev_to_drm(adev);
0124 struct drm_mode_config *mode_config = &dev->mode_config;
0125 struct drm_connector *connector;
0126 struct drm_connector_list_iter iter;
0127
0128 mutex_lock(&mode_config->mutex);
0129 drm_connector_list_iter_begin(dev, &iter);
0130 drm_for_each_connector_iter(connector, &iter)
0131 amdgpu_connector_hotplug(connector);
0132 drm_connector_list_iter_end(&iter);
0133 mutex_unlock(&mode_config->mutex);
0134
0135 drm_helper_hpd_irq_event(dev);
0136 }
0137
0138
0139
0140
0141
0142
0143
0144
0145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
0146 {
0147 unsigned long irqflags;
0148 unsigned i, j, k;
0149 int r;
0150
0151 spin_lock_irqsave(&adev->irq.lock, irqflags);
0152 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0153 if (!adev->irq.client[i].sources)
0154 continue;
0155
0156 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0157 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0158
0159 if (!src || !src->funcs->set || !src->num_types)
0160 continue;
0161
0162 for (k = 0; k < src->num_types; ++k) {
0163 atomic_set(&src->enabled_types[k], 0);
0164 r = src->funcs->set(adev, src, k,
0165 AMDGPU_IRQ_STATE_DISABLE);
0166 if (r)
0167 DRM_ERROR("error disabling interrupt (%d)\n",
0168 r);
0169 }
0170 }
0171 }
0172 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
0173 }
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
0187 {
0188 struct drm_device *dev = (struct drm_device *) arg;
0189 struct amdgpu_device *adev = drm_to_adev(dev);
0190 irqreturn_t ret;
0191
0192 ret = amdgpu_ih_process(adev, &adev->irq.ih);
0193 if (ret == IRQ_HANDLED)
0194 pm_runtime_mark_last_busy(dev->dev);
0195
0196 amdgpu_ras_interrupt_fatal_error_handler(adev);
0197
0198 return ret;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208 static void amdgpu_irq_handle_ih1(struct work_struct *work)
0209 {
0210 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0211 irq.ih1_work);
0212
0213 amdgpu_ih_process(adev, &adev->irq.ih1);
0214 }
0215
0216
0217
0218
0219
0220
0221
0222
0223 static void amdgpu_irq_handle_ih2(struct work_struct *work)
0224 {
0225 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0226 irq.ih2_work);
0227
0228 amdgpu_ih_process(adev, &adev->irq.ih2);
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
0239 {
0240 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0241 irq.ih_soft_work);
0242
0243 amdgpu_ih_process(adev, &adev->irq.ih_soft);
0244 }
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
0258 {
0259 if (amdgpu_msi == 1)
0260 return true;
0261 else if (amdgpu_msi == 0)
0262 return false;
0263
0264 return true;
0265 }
0266
0267 static void amdgpu_restore_msix(struct amdgpu_device *adev)
0268 {
0269 u16 ctrl;
0270
0271 pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
0272 if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
0273 return;
0274
0275
0276 ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
0277 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
0278 ctrl |= PCI_MSIX_FLAGS_ENABLE;
0279 pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 int amdgpu_irq_init(struct amdgpu_device *adev)
0294 {
0295 int r = 0;
0296 unsigned int irq;
0297
0298 spin_lock_init(&adev->irq.lock);
0299
0300
0301 adev->irq.msi_enabled = false;
0302
0303 if (amdgpu_msi_ok(adev)) {
0304 int nvec = pci_msix_vec_count(adev->pdev);
0305 unsigned int flags;
0306
0307 if (nvec <= 0) {
0308 flags = PCI_IRQ_MSI;
0309 } else {
0310 flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
0311 }
0312
0313 nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
0314 if (nvec > 0) {
0315 adev->irq.msi_enabled = true;
0316 dev_dbg(adev->dev, "using MSI/MSI-X.\n");
0317 }
0318 }
0319
0320 if (!amdgpu_device_has_dc_support(adev)) {
0321 if (!adev->enable_virtual_display)
0322
0323
0324 adev_to_drm(adev)->vblank_disable_immediate = true;
0325
0326 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
0327 if (r)
0328 return r;
0329
0330
0331 INIT_WORK(&adev->hotplug_work,
0332 amdgpu_hotplug_work_func);
0333 }
0334
0335 INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
0336 INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
0337 INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
0338
0339
0340 r = pci_irq_vector(adev->pdev, 0);
0341 if (r < 0)
0342 return r;
0343 irq = r;
0344
0345
0346 r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
0347 adev_to_drm(adev));
0348 if (r) {
0349 if (!amdgpu_device_has_dc_support(adev))
0350 flush_work(&adev->hotplug_work);
0351 return r;
0352 }
0353 adev->irq.installed = true;
0354 adev->irq.irq = irq;
0355 adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
0356
0357 DRM_DEBUG("amdgpu: irq initialized.\n");
0358 return 0;
0359 }
0360
0361
0362 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
0363 {
0364 if (adev->irq.installed) {
0365 free_irq(adev->irq.irq, adev_to_drm(adev));
0366 adev->irq.installed = false;
0367 if (adev->irq.msi_enabled)
0368 pci_free_irq_vectors(adev->pdev);
0369
0370 if (!amdgpu_device_has_dc_support(adev))
0371 flush_work(&adev->hotplug_work);
0372 }
0373
0374 amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
0375 amdgpu_ih_ring_fini(adev, &adev->irq.ih);
0376 amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
0377 amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
0390 {
0391 unsigned i, j;
0392
0393 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0394 if (!adev->irq.client[i].sources)
0395 continue;
0396
0397 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0398 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0399
0400 if (!src)
0401 continue;
0402
0403 kfree(src->enabled_types);
0404 src->enabled_types = NULL;
0405 }
0406 kfree(adev->irq.client[i].sources);
0407 adev->irq.client[i].sources = NULL;
0408 }
0409 }
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 int amdgpu_irq_add_id(struct amdgpu_device *adev,
0425 unsigned client_id, unsigned src_id,
0426 struct amdgpu_irq_src *source)
0427 {
0428 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
0429 return -EINVAL;
0430
0431 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
0432 return -EINVAL;
0433
0434 if (!source->funcs)
0435 return -EINVAL;
0436
0437 if (!adev->irq.client[client_id].sources) {
0438 adev->irq.client[client_id].sources =
0439 kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
0440 sizeof(struct amdgpu_irq_src *),
0441 GFP_KERNEL);
0442 if (!adev->irq.client[client_id].sources)
0443 return -ENOMEM;
0444 }
0445
0446 if (adev->irq.client[client_id].sources[src_id] != NULL)
0447 return -EINVAL;
0448
0449 if (source->num_types && !source->enabled_types) {
0450 atomic_t *types;
0451
0452 types = kcalloc(source->num_types, sizeof(atomic_t),
0453 GFP_KERNEL);
0454 if (!types)
0455 return -ENOMEM;
0456
0457 source->enabled_types = types;
0458 }
0459
0460 adev->irq.client[client_id].sources[src_id] = source;
0461 return 0;
0462 }
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
0473 struct amdgpu_ih_ring *ih)
0474 {
0475 u32 ring_index = ih->rptr >> 2;
0476 struct amdgpu_iv_entry entry;
0477 unsigned client_id, src_id;
0478 struct amdgpu_irq_src *src;
0479 bool handled = false;
0480 int r;
0481
0482 entry.ih = ih;
0483 entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
0484 amdgpu_ih_decode_iv(adev, &entry);
0485
0486 trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
0487
0488 client_id = entry.client_id;
0489 src_id = entry.src_id;
0490
0491 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
0492 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
0493
0494 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
0495 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
0496
0497 } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
0498 adev->irq.virq[src_id]) {
0499 generic_handle_domain_irq(adev->irq.domain, src_id);
0500
0501 } else if (!adev->irq.client[client_id].sources) {
0502 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
0503 client_id, src_id);
0504
0505 } else if ((src = adev->irq.client[client_id].sources[src_id])) {
0506 r = src->funcs->process(adev, src, &entry);
0507 if (r < 0)
0508 DRM_ERROR("error processing interrupt (%d)\n", r);
0509 else if (r)
0510 handled = true;
0511
0512 } else {
0513 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
0514 }
0515
0516
0517 if (!handled)
0518 amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
0519
0520 if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
0521 ih->processed_timestamp = entry.timestamp;
0522 }
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 void amdgpu_irq_delegate(struct amdgpu_device *adev,
0535 struct amdgpu_iv_entry *entry,
0536 unsigned int num_dw)
0537 {
0538 amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
0539 schedule_work(&adev->irq.ih_soft_work);
0540 }
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 int amdgpu_irq_update(struct amdgpu_device *adev,
0552 struct amdgpu_irq_src *src, unsigned type)
0553 {
0554 unsigned long irqflags;
0555 enum amdgpu_interrupt_state state;
0556 int r;
0557
0558 spin_lock_irqsave(&adev->irq.lock, irqflags);
0559
0560
0561
0562 if (amdgpu_irq_enabled(adev, src, type))
0563 state = AMDGPU_IRQ_STATE_ENABLE;
0564 else
0565 state = AMDGPU_IRQ_STATE_DISABLE;
0566
0567 r = src->funcs->set(adev, src, type, state);
0568 spin_unlock_irqrestore(&adev->irq.lock, irqflags);
0569 return r;
0570 }
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
0581 {
0582 int i, j, k;
0583
0584 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
0585 amdgpu_restore_msix(adev);
0586
0587 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0588 if (!adev->irq.client[i].sources)
0589 continue;
0590
0591 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0592 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0593
0594 if (!src || !src->funcs || !src->funcs->set)
0595 continue;
0596 for (k = 0; k < src->num_types; k++)
0597 amdgpu_irq_update(adev, src, k);
0598 }
0599 }
0600 }
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0615 unsigned type)
0616 {
0617 if (!adev->irq.installed)
0618 return -ENOENT;
0619
0620 if (type >= src->num_types)
0621 return -EINVAL;
0622
0623 if (!src->enabled_types || !src->funcs->set)
0624 return -EINVAL;
0625
0626 if (atomic_inc_return(&src->enabled_types[type]) == 1)
0627 return amdgpu_irq_update(adev, src, type);
0628
0629 return 0;
0630 }
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0645 unsigned type)
0646 {
0647 if (!adev->irq.installed)
0648 return -ENOENT;
0649
0650 if (type >= src->num_types)
0651 return -EINVAL;
0652
0653 if (!src->enabled_types || !src->funcs->set)
0654 return -EINVAL;
0655
0656 if (atomic_dec_and_test(&src->enabled_types[type]))
0657 return amdgpu_irq_update(adev, src, type);
0658
0659 return 0;
0660 }
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0676 unsigned type)
0677 {
0678 if (!adev->irq.installed)
0679 return false;
0680
0681 if (type >= src->num_types)
0682 return false;
0683
0684 if (!src->enabled_types || !src->funcs->set)
0685 return false;
0686
0687 return !!atomic_read(&src->enabled_types[type]);
0688 }
0689
0690
0691 static void amdgpu_irq_mask(struct irq_data *irqd)
0692 {
0693
0694 }
0695
0696 static void amdgpu_irq_unmask(struct irq_data *irqd)
0697 {
0698
0699 }
0700
0701
0702 static struct irq_chip amdgpu_irq_chip = {
0703 .name = "amdgpu-ih",
0704 .irq_mask = amdgpu_irq_mask,
0705 .irq_unmask = amdgpu_irq_unmask,
0706 };
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 static int amdgpu_irqdomain_map(struct irq_domain *d,
0722 unsigned int irq, irq_hw_number_t hwirq)
0723 {
0724 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
0725 return -EPERM;
0726
0727 irq_set_chip_and_handler(irq,
0728 &amdgpu_irq_chip, handle_simple_irq);
0729 return 0;
0730 }
0731
0732
0733 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
0734 .map = amdgpu_irqdomain_map,
0735 };
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
0749 {
0750 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
0751 &amdgpu_hw_irqdomain_ops, adev);
0752 if (!adev->irq.domain) {
0753 DRM_ERROR("GPU irq add domain failed\n");
0754 return -ENODEV;
0755 }
0756
0757 return 0;
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
0769 {
0770 if (adev->irq.domain) {
0771 irq_domain_remove(adev->irq.domain);
0772 adev->irq.domain = NULL;
0773 }
0774 }
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
0790 {
0791 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
0792
0793 return adev->irq.virq[src_id];
0794 }