Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright 2008 Advanced Micro Devices, Inc.
0003  * Copyright 2008 Red Hat Inc.
0004  * Copyright 2009 Jerome Glisse.
0005  *
0006  * Permission is hereby granted, free of charge, to any person obtaining a
0007  * copy of this software and associated documentation files (the "Software"),
0008  * to deal in the Software without restriction, including without limitation
0009  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
0010  * and/or sell copies of the Software, and to permit persons to whom the
0011  * Software is furnished to do so, subject to the following conditions:
0012  *
0013  * The above copyright notice and this permission notice shall be included in
0014  * all copies or substantial portions of the Software.
0015  *
0016  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0017  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0018  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
0019  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
0020  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
0021  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
0022  * OTHER DEALINGS IN THE SOFTWARE.
0023  *
0024  * Authors: Dave Airlie
0025  *          Alex Deucher
0026  *          Jerome Glisse
0027  */
0028 
0029 /**
0030  * DOC: Interrupt Handling
0031  *
0032  * Interrupts generated within GPU hardware raise interrupt requests that are
0033  * passed to amdgpu IRQ handler which is responsible for detecting source and
0034  * type of the interrupt and dispatching matching handlers. If handling an
0035  * interrupt requires calling kernel functions that may sleep processing is
0036  * dispatched to work handlers.
0037  *
0038  * If MSI functionality is not disabled by module parameter then MSI
0039  * support will be enabled.
0040  *
0041  * For GPU interrupt sources that may be driven by another driver, IRQ domain
0042  * support is used (with mapping between virtual and hardware IRQs).
0043  */
0044 
0045 #include <linux/irq.h>
0046 #include <linux/pci.h>
0047 
0048 #include <drm/drm_crtc_helper.h>
0049 #include <drm/drm_vblank.h>
0050 #include <drm/amdgpu_drm.h>
0051 #include <drm/drm_drv.h>
0052 #include "amdgpu.h"
0053 #include "amdgpu_ih.h"
0054 #include "atom.h"
0055 #include "amdgpu_connectors.h"
0056 #include "amdgpu_trace.h"
0057 #include "amdgpu_amdkfd.h"
0058 #include "amdgpu_ras.h"
0059 
0060 #include <linux/pm_runtime.h>
0061 
0062 #ifdef CONFIG_DRM_AMD_DC
0063 #include "amdgpu_dm_irq.h"
0064 #endif
0065 
0066 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
0067 
0068 const char *soc15_ih_clientid_name[] = {
0069     "IH",
0070     "SDMA2 or ACP",
0071     "ATHUB",
0072     "BIF",
0073     "SDMA3 or DCE",
0074     "SDMA4 or ISP",
0075     "VMC1 or PCIE0",
0076     "RLC",
0077     "SDMA0",
0078     "SDMA1",
0079     "SE0SH",
0080     "SE1SH",
0081     "SE2SH",
0082     "SE3SH",
0083     "VCN1 or UVD1",
0084     "THM",
0085     "VCN or UVD",
0086     "SDMA5 or VCE0",
0087     "VMC",
0088     "SDMA6 or XDMA",
0089     "GRBM_CP",
0090     "ATS",
0091     "ROM_SMUIO",
0092     "DF",
0093     "SDMA7 or VCE1",
0094     "PWR",
0095     "reserved",
0096     "UTCL2",
0097     "EA",
0098     "UTCL2LOG",
0099     "MP0",
0100     "MP1"
0101 };
0102 
0103 /**
0104  * amdgpu_hotplug_work_func - work handler for display hotplug event
0105  *
0106  * @work: work struct pointer
0107  *
0108  * This is the hotplug event work handler (all ASICs).
0109  * The work gets scheduled from the IRQ handler if there
0110  * was a hotplug interrupt.  It walks through the connector table
0111  * and calls hotplug handler for each connector. After this, it sends
0112  * a DRM hotplug event to alert userspace.
0113  *
0114  * This design approach is required in order to defer hotplug event handling
0115  * from the IRQ handler to a work handler because hotplug handler has to use
0116  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
0117  * sleep).
0118  */
0119 static void amdgpu_hotplug_work_func(struct work_struct *work)
0120 {
0121     struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0122                           hotplug_work);
0123     struct drm_device *dev = adev_to_drm(adev);
0124     struct drm_mode_config *mode_config = &dev->mode_config;
0125     struct drm_connector *connector;
0126     struct drm_connector_list_iter iter;
0127 
0128     mutex_lock(&mode_config->mutex);
0129     drm_connector_list_iter_begin(dev, &iter);
0130     drm_for_each_connector_iter(connector, &iter)
0131         amdgpu_connector_hotplug(connector);
0132     drm_connector_list_iter_end(&iter);
0133     mutex_unlock(&mode_config->mutex);
0134     /* Just fire off a uevent and let userspace tell us what to do */
0135     drm_helper_hpd_irq_event(dev);
0136 }
0137 
0138 /**
0139  * amdgpu_irq_disable_all - disable *all* interrupts
0140  *
0141  * @adev: amdgpu device pointer
0142  *
0143  * Disable all types of interrupts from all sources.
0144  */
0145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
0146 {
0147     unsigned long irqflags;
0148     unsigned i, j, k;
0149     int r;
0150 
0151     spin_lock_irqsave(&adev->irq.lock, irqflags);
0152     for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0153         if (!adev->irq.client[i].sources)
0154             continue;
0155 
0156         for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0157             struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0158 
0159             if (!src || !src->funcs->set || !src->num_types)
0160                 continue;
0161 
0162             for (k = 0; k < src->num_types; ++k) {
0163                 atomic_set(&src->enabled_types[k], 0);
0164                 r = src->funcs->set(adev, src, k,
0165                             AMDGPU_IRQ_STATE_DISABLE);
0166                 if (r)
0167                     DRM_ERROR("error disabling interrupt (%d)\n",
0168                           r);
0169             }
0170         }
0171     }
0172     spin_unlock_irqrestore(&adev->irq.lock, irqflags);
0173 }
0174 
0175 /**
0176  * amdgpu_irq_handler - IRQ handler
0177  *
0178  * @irq: IRQ number (unused)
0179  * @arg: pointer to DRM device
0180  *
0181  * IRQ handler for amdgpu driver (all ASICs).
0182  *
0183  * Returns:
0184  * result of handling the IRQ, as defined by &irqreturn_t
0185  */
0186 static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
0187 {
0188     struct drm_device *dev = (struct drm_device *) arg;
0189     struct amdgpu_device *adev = drm_to_adev(dev);
0190     irqreturn_t ret;
0191 
0192     ret = amdgpu_ih_process(adev, &adev->irq.ih);
0193     if (ret == IRQ_HANDLED)
0194         pm_runtime_mark_last_busy(dev->dev);
0195 
0196     amdgpu_ras_interrupt_fatal_error_handler(adev);
0197 
0198     return ret;
0199 }
0200 
0201 /**
0202  * amdgpu_irq_handle_ih1 - kick of processing for IH1
0203  *
0204  * @work: work structure in struct amdgpu_irq
0205  *
0206  * Kick of processing IH ring 1.
0207  */
0208 static void amdgpu_irq_handle_ih1(struct work_struct *work)
0209 {
0210     struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0211                           irq.ih1_work);
0212 
0213     amdgpu_ih_process(adev, &adev->irq.ih1);
0214 }
0215 
0216 /**
0217  * amdgpu_irq_handle_ih2 - kick of processing for IH2
0218  *
0219  * @work: work structure in struct amdgpu_irq
0220  *
0221  * Kick of processing IH ring 2.
0222  */
0223 static void amdgpu_irq_handle_ih2(struct work_struct *work)
0224 {
0225     struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0226                           irq.ih2_work);
0227 
0228     amdgpu_ih_process(adev, &adev->irq.ih2);
0229 }
0230 
0231 /**
0232  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
0233  *
0234  * @work: work structure in struct amdgpu_irq
0235  *
0236  * Kick of processing IH soft ring.
0237  */
0238 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
0239 {
0240     struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
0241                           irq.ih_soft_work);
0242 
0243     amdgpu_ih_process(adev, &adev->irq.ih_soft);
0244 }
0245 
0246 /**
0247  * amdgpu_msi_ok - check whether MSI functionality is enabled
0248  *
0249  * @adev: amdgpu device pointer (unused)
0250  *
0251  * Checks whether MSI functionality has been disabled via module parameter
0252  * (all ASICs).
0253  *
0254  * Returns:
0255  * *true* if MSIs are allowed to be enabled or *false* otherwise
0256  */
0257 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
0258 {
0259     if (amdgpu_msi == 1)
0260         return true;
0261     else if (amdgpu_msi == 0)
0262         return false;
0263 
0264     return true;
0265 }
0266 
0267 static void amdgpu_restore_msix(struct amdgpu_device *adev)
0268 {
0269     u16 ctrl;
0270 
0271     pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
0272     if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
0273         return;
0274 
0275     /* VF FLR */
0276     ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
0277     pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
0278     ctrl |= PCI_MSIX_FLAGS_ENABLE;
0279     pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
0280 }
0281 
0282 /**
0283  * amdgpu_irq_init - initialize interrupt handling
0284  *
0285  * @adev: amdgpu device pointer
0286  *
0287  * Sets up work functions for hotplug and reset interrupts, enables MSI
0288  * functionality, initializes vblank, hotplug and reset interrupt handling.
0289  *
0290  * Returns:
0291  * 0 on success or error code on failure
0292  */
0293 int amdgpu_irq_init(struct amdgpu_device *adev)
0294 {
0295     int r = 0;
0296     unsigned int irq;
0297 
0298     spin_lock_init(&adev->irq.lock);
0299 
0300     /* Enable MSI if not disabled by module parameter */
0301     adev->irq.msi_enabled = false;
0302 
0303     if (amdgpu_msi_ok(adev)) {
0304         int nvec = pci_msix_vec_count(adev->pdev);
0305         unsigned int flags;
0306 
0307         if (nvec <= 0) {
0308             flags = PCI_IRQ_MSI;
0309         } else {
0310             flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
0311         }
0312         /* we only need one vector */
0313         nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
0314         if (nvec > 0) {
0315             adev->irq.msi_enabled = true;
0316             dev_dbg(adev->dev, "using MSI/MSI-X.\n");
0317         }
0318     }
0319 
0320     if (!amdgpu_device_has_dc_support(adev)) {
0321         if (!adev->enable_virtual_display)
0322             /* Disable vblank IRQs aggressively for power-saving */
0323             /* XXX: can this be enabled for DC? */
0324             adev_to_drm(adev)->vblank_disable_immediate = true;
0325 
0326         r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
0327         if (r)
0328             return r;
0329 
0330         /* Pre-DCE11 */
0331         INIT_WORK(&adev->hotplug_work,
0332                 amdgpu_hotplug_work_func);
0333     }
0334 
0335     INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
0336     INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
0337     INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
0338 
0339     /* Use vector 0 for MSI-X. */
0340     r = pci_irq_vector(adev->pdev, 0);
0341     if (r < 0)
0342         return r;
0343     irq = r;
0344 
0345     /* PCI devices require shared interrupts. */
0346     r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
0347             adev_to_drm(adev));
0348     if (r) {
0349         if (!amdgpu_device_has_dc_support(adev))
0350             flush_work(&adev->hotplug_work);
0351         return r;
0352     }
0353     adev->irq.installed = true;
0354     adev->irq.irq = irq;
0355     adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
0356 
0357     DRM_DEBUG("amdgpu: irq initialized.\n");
0358     return 0;
0359 }
0360 
0361 
0362 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
0363 {
0364     if (adev->irq.installed) {
0365         free_irq(adev->irq.irq, adev_to_drm(adev));
0366         adev->irq.installed = false;
0367         if (adev->irq.msi_enabled)
0368             pci_free_irq_vectors(adev->pdev);
0369 
0370         if (!amdgpu_device_has_dc_support(adev))
0371             flush_work(&adev->hotplug_work);
0372     }
0373 
0374     amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
0375     amdgpu_ih_ring_fini(adev, &adev->irq.ih);
0376     amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
0377     amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
0378 }
0379 
0380 /**
0381  * amdgpu_irq_fini_sw - shut down interrupt handling
0382  *
0383  * @adev: amdgpu device pointer
0384  *
0385  * Tears down work functions for hotplug and reset interrupts, disables MSI
0386  * functionality, shuts down vblank, hotplug and reset interrupt handling,
0387  * turns off interrupts from all sources (all ASICs).
0388  */
0389 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
0390 {
0391     unsigned i, j;
0392 
0393     for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0394         if (!adev->irq.client[i].sources)
0395             continue;
0396 
0397         for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0398             struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0399 
0400             if (!src)
0401                 continue;
0402 
0403             kfree(src->enabled_types);
0404             src->enabled_types = NULL;
0405         }
0406         kfree(adev->irq.client[i].sources);
0407         adev->irq.client[i].sources = NULL;
0408     }
0409 }
0410 
0411 /**
0412  * amdgpu_irq_add_id - register IRQ source
0413  *
0414  * @adev: amdgpu device pointer
0415  * @client_id: client id
0416  * @src_id: source id
0417  * @source: IRQ source pointer
0418  *
0419  * Registers IRQ source on a client.
0420  *
0421  * Returns:
0422  * 0 on success or error code otherwise
0423  */
0424 int amdgpu_irq_add_id(struct amdgpu_device *adev,
0425               unsigned client_id, unsigned src_id,
0426               struct amdgpu_irq_src *source)
0427 {
0428     if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
0429         return -EINVAL;
0430 
0431     if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
0432         return -EINVAL;
0433 
0434     if (!source->funcs)
0435         return -EINVAL;
0436 
0437     if (!adev->irq.client[client_id].sources) {
0438         adev->irq.client[client_id].sources =
0439             kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
0440                 sizeof(struct amdgpu_irq_src *),
0441                 GFP_KERNEL);
0442         if (!adev->irq.client[client_id].sources)
0443             return -ENOMEM;
0444     }
0445 
0446     if (adev->irq.client[client_id].sources[src_id] != NULL)
0447         return -EINVAL;
0448 
0449     if (source->num_types && !source->enabled_types) {
0450         atomic_t *types;
0451 
0452         types = kcalloc(source->num_types, sizeof(atomic_t),
0453                 GFP_KERNEL);
0454         if (!types)
0455             return -ENOMEM;
0456 
0457         source->enabled_types = types;
0458     }
0459 
0460     adev->irq.client[client_id].sources[src_id] = source;
0461     return 0;
0462 }
0463 
0464 /**
0465  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
0466  *
0467  * @adev: amdgpu device pointer
0468  * @ih: interrupt ring instance
0469  *
0470  * Dispatches IRQ to IP blocks.
0471  */
0472 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
0473              struct amdgpu_ih_ring *ih)
0474 {
0475     u32 ring_index = ih->rptr >> 2;
0476     struct amdgpu_iv_entry entry;
0477     unsigned client_id, src_id;
0478     struct amdgpu_irq_src *src;
0479     bool handled = false;
0480     int r;
0481 
0482     entry.ih = ih;
0483     entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
0484     amdgpu_ih_decode_iv(adev, &entry);
0485 
0486     trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
0487 
0488     client_id = entry.client_id;
0489     src_id = entry.src_id;
0490 
0491     if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
0492         DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
0493 
0494     } else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
0495         DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
0496 
0497     } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
0498            adev->irq.virq[src_id]) {
0499         generic_handle_domain_irq(adev->irq.domain, src_id);
0500 
0501     } else if (!adev->irq.client[client_id].sources) {
0502         DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
0503               client_id, src_id);
0504 
0505     } else if ((src = adev->irq.client[client_id].sources[src_id])) {
0506         r = src->funcs->process(adev, src, &entry);
0507         if (r < 0)
0508             DRM_ERROR("error processing interrupt (%d)\n", r);
0509         else if (r)
0510             handled = true;
0511 
0512     } else {
0513         DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
0514     }
0515 
0516     /* Send it to amdkfd as well if it isn't already handled */
0517     if (!handled)
0518         amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
0519 
0520     if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
0521         ih->processed_timestamp = entry.timestamp;
0522 }
0523 
0524 /**
0525  * amdgpu_irq_delegate - delegate IV to soft IH ring
0526  *
0527  * @adev: amdgpu device pointer
0528  * @entry: IV entry
0529  * @num_dw: size of IV
0530  *
0531  * Delegate the IV to the soft IH ring and schedule processing of it. Used
0532  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
0533  */
0534 void amdgpu_irq_delegate(struct amdgpu_device *adev,
0535              struct amdgpu_iv_entry *entry,
0536              unsigned int num_dw)
0537 {
0538     amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
0539     schedule_work(&adev->irq.ih_soft_work);
0540 }
0541 
0542 /**
0543  * amdgpu_irq_update - update hardware interrupt state
0544  *
0545  * @adev: amdgpu device pointer
0546  * @src: interrupt source pointer
0547  * @type: type of interrupt
0548  *
0549  * Updates interrupt state for the specific source (all ASICs).
0550  */
0551 int amdgpu_irq_update(struct amdgpu_device *adev,
0552                  struct amdgpu_irq_src *src, unsigned type)
0553 {
0554     unsigned long irqflags;
0555     enum amdgpu_interrupt_state state;
0556     int r;
0557 
0558     spin_lock_irqsave(&adev->irq.lock, irqflags);
0559 
0560     /* We need to determine after taking the lock, otherwise
0561        we might disable just enabled interrupts again */
0562     if (amdgpu_irq_enabled(adev, src, type))
0563         state = AMDGPU_IRQ_STATE_ENABLE;
0564     else
0565         state = AMDGPU_IRQ_STATE_DISABLE;
0566 
0567     r = src->funcs->set(adev, src, type, state);
0568     spin_unlock_irqrestore(&adev->irq.lock, irqflags);
0569     return r;
0570 }
0571 
0572 /**
0573  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
0574  *
0575  * @adev: amdgpu device pointer
0576  *
0577  * Updates state of all types of interrupts on all sources on resume after
0578  * reset.
0579  */
0580 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
0581 {
0582     int i, j, k;
0583 
0584     if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
0585         amdgpu_restore_msix(adev);
0586 
0587     for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
0588         if (!adev->irq.client[i].sources)
0589             continue;
0590 
0591         for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
0592             struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
0593 
0594             if (!src || !src->funcs || !src->funcs->set)
0595                 continue;
0596             for (k = 0; k < src->num_types; k++)
0597                 amdgpu_irq_update(adev, src, k);
0598         }
0599     }
0600 }
0601 
0602 /**
0603  * amdgpu_irq_get - enable interrupt
0604  *
0605  * @adev: amdgpu device pointer
0606  * @src: interrupt source pointer
0607  * @type: type of interrupt
0608  *
0609  * Enables specified type of interrupt on the specified source (all ASICs).
0610  *
0611  * Returns:
0612  * 0 on success or error code otherwise
0613  */
0614 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0615            unsigned type)
0616 {
0617     if (!adev->irq.installed)
0618         return -ENOENT;
0619 
0620     if (type >= src->num_types)
0621         return -EINVAL;
0622 
0623     if (!src->enabled_types || !src->funcs->set)
0624         return -EINVAL;
0625 
0626     if (atomic_inc_return(&src->enabled_types[type]) == 1)
0627         return amdgpu_irq_update(adev, src, type);
0628 
0629     return 0;
0630 }
0631 
0632 /**
0633  * amdgpu_irq_put - disable interrupt
0634  *
0635  * @adev: amdgpu device pointer
0636  * @src: interrupt source pointer
0637  * @type: type of interrupt
0638  *
0639  * Enables specified type of interrupt on the specified source (all ASICs).
0640  *
0641  * Returns:
0642  * 0 on success or error code otherwise
0643  */
0644 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0645            unsigned type)
0646 {
0647     if (!adev->irq.installed)
0648         return -ENOENT;
0649 
0650     if (type >= src->num_types)
0651         return -EINVAL;
0652 
0653     if (!src->enabled_types || !src->funcs->set)
0654         return -EINVAL;
0655 
0656     if (atomic_dec_and_test(&src->enabled_types[type]))
0657         return amdgpu_irq_update(adev, src, type);
0658 
0659     return 0;
0660 }
0661 
0662 /**
0663  * amdgpu_irq_enabled - check whether interrupt is enabled or not
0664  *
0665  * @adev: amdgpu device pointer
0666  * @src: interrupt source pointer
0667  * @type: type of interrupt
0668  *
0669  * Checks whether the given type of interrupt is enabled on the given source.
0670  *
0671  * Returns:
0672  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
0673  * invalid parameters
0674  */
0675 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
0676             unsigned type)
0677 {
0678     if (!adev->irq.installed)
0679         return false;
0680 
0681     if (type >= src->num_types)
0682         return false;
0683 
0684     if (!src->enabled_types || !src->funcs->set)
0685         return false;
0686 
0687     return !!atomic_read(&src->enabled_types[type]);
0688 }
0689 
0690 /* XXX: Generic IRQ handling */
0691 static void amdgpu_irq_mask(struct irq_data *irqd)
0692 {
0693     /* XXX */
0694 }
0695 
0696 static void amdgpu_irq_unmask(struct irq_data *irqd)
0697 {
0698     /* XXX */
0699 }
0700 
0701 /* amdgpu hardware interrupt chip descriptor */
0702 static struct irq_chip amdgpu_irq_chip = {
0703     .name = "amdgpu-ih",
0704     .irq_mask = amdgpu_irq_mask,
0705     .irq_unmask = amdgpu_irq_unmask,
0706 };
0707 
0708 /**
0709  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
0710  *
0711  * @d: amdgpu IRQ domain pointer (unused)
0712  * @irq: virtual IRQ number
0713  * @hwirq: hardware irq number
0714  *
0715  * Current implementation assigns simple interrupt handler to the given virtual
0716  * IRQ.
0717  *
0718  * Returns:
0719  * 0 on success or error code otherwise
0720  */
0721 static int amdgpu_irqdomain_map(struct irq_domain *d,
0722                 unsigned int irq, irq_hw_number_t hwirq)
0723 {
0724     if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
0725         return -EPERM;
0726 
0727     irq_set_chip_and_handler(irq,
0728                  &amdgpu_irq_chip, handle_simple_irq);
0729     return 0;
0730 }
0731 
0732 /* Implementation of methods for amdgpu IRQ domain */
0733 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
0734     .map = amdgpu_irqdomain_map,
0735 };
0736 
0737 /**
0738  * amdgpu_irq_add_domain - create a linear IRQ domain
0739  *
0740  * @adev: amdgpu device pointer
0741  *
0742  * Creates an IRQ domain for GPU interrupt sources
0743  * that may be driven by another driver (e.g., ACP).
0744  *
0745  * Returns:
0746  * 0 on success or error code otherwise
0747  */
0748 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
0749 {
0750     adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
0751                          &amdgpu_hw_irqdomain_ops, adev);
0752     if (!adev->irq.domain) {
0753         DRM_ERROR("GPU irq add domain failed\n");
0754         return -ENODEV;
0755     }
0756 
0757     return 0;
0758 }
0759 
0760 /**
0761  * amdgpu_irq_remove_domain - remove the IRQ domain
0762  *
0763  * @adev: amdgpu device pointer
0764  *
0765  * Removes the IRQ domain for GPU interrupt sources
0766  * that may be driven by another driver (e.g., ACP).
0767  */
0768 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
0769 {
0770     if (adev->irq.domain) {
0771         irq_domain_remove(adev->irq.domain);
0772         adev->irq.domain = NULL;
0773     }
0774 }
0775 
0776 /**
0777  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
0778  *
0779  * @adev: amdgpu device pointer
0780  * @src_id: IH source id
0781  *
0782  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
0783  * Use this for components that generate a GPU interrupt, but are driven
0784  * by a different driver (e.g., ACP).
0785  *
0786  * Returns:
0787  * Linux IRQ
0788  */
0789 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
0790 {
0791     adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
0792 
0793     return adev->irq.virq[src_id];
0794 }