Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * VFIO PCI interrupt handling
0004  *
0005  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
0006  *     Author: Alex Williamson <alex.williamson@redhat.com>
0007  *
0008  * Derived from original vfio:
0009  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
0010  * Author: Tom Lyon, pugs@cisco.com
0011  */
0012 
0013 #include <linux/device.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/eventfd.h>
0016 #include <linux/msi.h>
0017 #include <linux/pci.h>
0018 #include <linux/file.h>
0019 #include <linux/vfio.h>
0020 #include <linux/wait.h>
0021 #include <linux/slab.h>
0022 
0023 #include <linux/vfio_pci_core.h>
0024 
0025 /*
0026  * INTx
0027  */
0028 static void vfio_send_intx_eventfd(void *opaque, void *unused)
0029 {
0030     struct vfio_pci_core_device *vdev = opaque;
0031 
0032     if (likely(is_intx(vdev) && !vdev->virq_disabled))
0033         eventfd_signal(vdev->ctx[0].trigger, 1);
0034 }
0035 
0036 void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
0037 {
0038     struct pci_dev *pdev = vdev->pdev;
0039     unsigned long flags;
0040 
0041     spin_lock_irqsave(&vdev->irqlock, flags);
0042 
0043     /*
0044      * Masking can come from interrupt, ioctl, or config space
0045      * via INTx disable.  The latter means this can get called
0046      * even when not using intx delivery.  In this case, just
0047      * try to have the physical bit follow the virtual bit.
0048      */
0049     if (unlikely(!is_intx(vdev))) {
0050         if (vdev->pci_2_3)
0051             pci_intx(pdev, 0);
0052     } else if (!vdev->ctx[0].masked) {
0053         /*
0054          * Can't use check_and_mask here because we always want to
0055          * mask, not just when something is pending.
0056          */
0057         if (vdev->pci_2_3)
0058             pci_intx(pdev, 0);
0059         else
0060             disable_irq_nosync(pdev->irq);
0061 
0062         vdev->ctx[0].masked = true;
0063     }
0064 
0065     spin_unlock_irqrestore(&vdev->irqlock, flags);
0066 }
0067 
0068 /*
0069  * If this is triggered by an eventfd, we can't call eventfd_signal
0070  * or else we'll deadlock on the eventfd wait queue.  Return >0 when
0071  * a signal is necessary, which can then be handled via a work queue
0072  * or directly depending on the caller.
0073  */
0074 static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
0075 {
0076     struct vfio_pci_core_device *vdev = opaque;
0077     struct pci_dev *pdev = vdev->pdev;
0078     unsigned long flags;
0079     int ret = 0;
0080 
0081     spin_lock_irqsave(&vdev->irqlock, flags);
0082 
0083     /*
0084      * Unmasking comes from ioctl or config, so again, have the
0085      * physical bit follow the virtual even when not using INTx.
0086      */
0087     if (unlikely(!is_intx(vdev))) {
0088         if (vdev->pci_2_3)
0089             pci_intx(pdev, 1);
0090     } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
0091         /*
0092          * A pending interrupt here would immediately trigger,
0093          * but we can avoid that overhead by just re-sending
0094          * the interrupt to the user.
0095          */
0096         if (vdev->pci_2_3) {
0097             if (!pci_check_and_unmask_intx(pdev))
0098                 ret = 1;
0099         } else
0100             enable_irq(pdev->irq);
0101 
0102         vdev->ctx[0].masked = (ret > 0);
0103     }
0104 
0105     spin_unlock_irqrestore(&vdev->irqlock, flags);
0106 
0107     return ret;
0108 }
0109 
0110 void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
0111 {
0112     if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
0113         vfio_send_intx_eventfd(vdev, NULL);
0114 }
0115 
0116 static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
0117 {
0118     struct vfio_pci_core_device *vdev = dev_id;
0119     unsigned long flags;
0120     int ret = IRQ_NONE;
0121 
0122     spin_lock_irqsave(&vdev->irqlock, flags);
0123 
0124     if (!vdev->pci_2_3) {
0125         disable_irq_nosync(vdev->pdev->irq);
0126         vdev->ctx[0].masked = true;
0127         ret = IRQ_HANDLED;
0128     } else if (!vdev->ctx[0].masked &&  /* may be shared */
0129            pci_check_and_mask_intx(vdev->pdev)) {
0130         vdev->ctx[0].masked = true;
0131         ret = IRQ_HANDLED;
0132     }
0133 
0134     spin_unlock_irqrestore(&vdev->irqlock, flags);
0135 
0136     if (ret == IRQ_HANDLED)
0137         vfio_send_intx_eventfd(vdev, NULL);
0138 
0139     return ret;
0140 }
0141 
0142 static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
0143 {
0144     if (!is_irq_none(vdev))
0145         return -EINVAL;
0146 
0147     if (!vdev->pdev->irq)
0148         return -ENODEV;
0149 
0150     vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
0151     if (!vdev->ctx)
0152         return -ENOMEM;
0153 
0154     vdev->num_ctx = 1;
0155 
0156     /*
0157      * If the virtual interrupt is masked, restore it.  Devices
0158      * supporting DisINTx can be masked at the hardware level
0159      * here, non-PCI-2.3 devices will have to wait until the
0160      * interrupt is enabled.
0161      */
0162     vdev->ctx[0].masked = vdev->virq_disabled;
0163     if (vdev->pci_2_3)
0164         pci_intx(vdev->pdev, !vdev->ctx[0].masked);
0165 
0166     vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
0167 
0168     return 0;
0169 }
0170 
0171 static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
0172 {
0173     struct pci_dev *pdev = vdev->pdev;
0174     unsigned long irqflags = IRQF_SHARED;
0175     struct eventfd_ctx *trigger;
0176     unsigned long flags;
0177     int ret;
0178 
0179     if (vdev->ctx[0].trigger) {
0180         free_irq(pdev->irq, vdev);
0181         kfree(vdev->ctx[0].name);
0182         eventfd_ctx_put(vdev->ctx[0].trigger);
0183         vdev->ctx[0].trigger = NULL;
0184     }
0185 
0186     if (fd < 0) /* Disable only */
0187         return 0;
0188 
0189     vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
0190                       pci_name(pdev));
0191     if (!vdev->ctx[0].name)
0192         return -ENOMEM;
0193 
0194     trigger = eventfd_ctx_fdget(fd);
0195     if (IS_ERR(trigger)) {
0196         kfree(vdev->ctx[0].name);
0197         return PTR_ERR(trigger);
0198     }
0199 
0200     vdev->ctx[0].trigger = trigger;
0201 
0202     if (!vdev->pci_2_3)
0203         irqflags = 0;
0204 
0205     ret = request_irq(pdev->irq, vfio_intx_handler,
0206               irqflags, vdev->ctx[0].name, vdev);
0207     if (ret) {
0208         vdev->ctx[0].trigger = NULL;
0209         kfree(vdev->ctx[0].name);
0210         eventfd_ctx_put(trigger);
0211         return ret;
0212     }
0213 
0214     /*
0215      * INTx disable will stick across the new irq setup,
0216      * disable_irq won't.
0217      */
0218     spin_lock_irqsave(&vdev->irqlock, flags);
0219     if (!vdev->pci_2_3 && vdev->ctx[0].masked)
0220         disable_irq_nosync(pdev->irq);
0221     spin_unlock_irqrestore(&vdev->irqlock, flags);
0222 
0223     return 0;
0224 }
0225 
0226 static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
0227 {
0228     vfio_virqfd_disable(&vdev->ctx[0].unmask);
0229     vfio_virqfd_disable(&vdev->ctx[0].mask);
0230     vfio_intx_set_signal(vdev, -1);
0231     vdev->irq_type = VFIO_PCI_NUM_IRQS;
0232     vdev->num_ctx = 0;
0233     kfree(vdev->ctx);
0234 }
0235 
0236 /*
0237  * MSI/MSI-X
0238  */
0239 static irqreturn_t vfio_msihandler(int irq, void *arg)
0240 {
0241     struct eventfd_ctx *trigger = arg;
0242 
0243     eventfd_signal(trigger, 1);
0244     return IRQ_HANDLED;
0245 }
0246 
0247 static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
0248 {
0249     struct pci_dev *pdev = vdev->pdev;
0250     unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
0251     int ret;
0252     u16 cmd;
0253 
0254     if (!is_irq_none(vdev))
0255         return -EINVAL;
0256 
0257     vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
0258     if (!vdev->ctx)
0259         return -ENOMEM;
0260 
0261     /* return the number of supported vectors if we can't get all: */
0262     cmd = vfio_pci_memory_lock_and_enable(vdev);
0263     ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
0264     if (ret < nvec) {
0265         if (ret > 0)
0266             pci_free_irq_vectors(pdev);
0267         vfio_pci_memory_unlock_and_restore(vdev, cmd);
0268         kfree(vdev->ctx);
0269         return ret;
0270     }
0271     vfio_pci_memory_unlock_and_restore(vdev, cmd);
0272 
0273     vdev->num_ctx = nvec;
0274     vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
0275                 VFIO_PCI_MSI_IRQ_INDEX;
0276 
0277     if (!msix) {
0278         /*
0279          * Compute the virtual hardware field for max msi vectors -
0280          * it is the log base 2 of the number of vectors.
0281          */
0282         vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
0283     }
0284 
0285     return 0;
0286 }
0287 
0288 static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
0289                       int vector, int fd, bool msix)
0290 {
0291     struct pci_dev *pdev = vdev->pdev;
0292     struct eventfd_ctx *trigger;
0293     int irq, ret;
0294     u16 cmd;
0295 
0296     if (vector < 0 || vector >= vdev->num_ctx)
0297         return -EINVAL;
0298 
0299     irq = pci_irq_vector(pdev, vector);
0300 
0301     if (vdev->ctx[vector].trigger) {
0302         irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
0303 
0304         cmd = vfio_pci_memory_lock_and_enable(vdev);
0305         free_irq(irq, vdev->ctx[vector].trigger);
0306         vfio_pci_memory_unlock_and_restore(vdev, cmd);
0307 
0308         kfree(vdev->ctx[vector].name);
0309         eventfd_ctx_put(vdev->ctx[vector].trigger);
0310         vdev->ctx[vector].trigger = NULL;
0311     }
0312 
0313     if (fd < 0)
0314         return 0;
0315 
0316     vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
0317                        msix ? "x" : "", vector,
0318                        pci_name(pdev));
0319     if (!vdev->ctx[vector].name)
0320         return -ENOMEM;
0321 
0322     trigger = eventfd_ctx_fdget(fd);
0323     if (IS_ERR(trigger)) {
0324         kfree(vdev->ctx[vector].name);
0325         return PTR_ERR(trigger);
0326     }
0327 
0328     /*
0329      * The MSIx vector table resides in device memory which may be cleared
0330      * via backdoor resets. We don't allow direct access to the vector
0331      * table so even if a userspace driver attempts to save/restore around
0332      * such a reset it would be unsuccessful. To avoid this, restore the
0333      * cached value of the message prior to enabling.
0334      */
0335     cmd = vfio_pci_memory_lock_and_enable(vdev);
0336     if (msix) {
0337         struct msi_msg msg;
0338 
0339         get_cached_msi_msg(irq, &msg);
0340         pci_write_msi_msg(irq, &msg);
0341     }
0342 
0343     ret = request_irq(irq, vfio_msihandler, 0,
0344               vdev->ctx[vector].name, trigger);
0345     vfio_pci_memory_unlock_and_restore(vdev, cmd);
0346     if (ret) {
0347         kfree(vdev->ctx[vector].name);
0348         eventfd_ctx_put(trigger);
0349         return ret;
0350     }
0351 
0352     vdev->ctx[vector].producer.token = trigger;
0353     vdev->ctx[vector].producer.irq = irq;
0354     ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
0355     if (unlikely(ret)) {
0356         dev_info(&pdev->dev,
0357         "irq bypass producer (token %p) registration fails: %d\n",
0358         vdev->ctx[vector].producer.token, ret);
0359 
0360         vdev->ctx[vector].producer.token = NULL;
0361     }
0362     vdev->ctx[vector].trigger = trigger;
0363 
0364     return 0;
0365 }
0366 
0367 static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
0368                   unsigned count, int32_t *fds, bool msix)
0369 {
0370     int i, j, ret = 0;
0371 
0372     if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
0373         return -EINVAL;
0374 
0375     for (i = 0, j = start; i < count && !ret; i++, j++) {
0376         int fd = fds ? fds[i] : -1;
0377         ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
0378     }
0379 
0380     if (ret) {
0381         for (--j; j >= (int)start; j--)
0382             vfio_msi_set_vector_signal(vdev, j, -1, msix);
0383     }
0384 
0385     return ret;
0386 }
0387 
0388 static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
0389 {
0390     struct pci_dev *pdev = vdev->pdev;
0391     int i;
0392     u16 cmd;
0393 
0394     for (i = 0; i < vdev->num_ctx; i++) {
0395         vfio_virqfd_disable(&vdev->ctx[i].unmask);
0396         vfio_virqfd_disable(&vdev->ctx[i].mask);
0397     }
0398 
0399     vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
0400 
0401     cmd = vfio_pci_memory_lock_and_enable(vdev);
0402     pci_free_irq_vectors(pdev);
0403     vfio_pci_memory_unlock_and_restore(vdev, cmd);
0404 
0405     /*
0406      * Both disable paths above use pci_intx_for_msi() to clear DisINTx
0407      * via their shutdown paths.  Restore for NoINTx devices.
0408      */
0409     if (vdev->nointx)
0410         pci_intx(pdev, 0);
0411 
0412     vdev->irq_type = VFIO_PCI_NUM_IRQS;
0413     vdev->num_ctx = 0;
0414     kfree(vdev->ctx);
0415 }
0416 
0417 /*
0418  * IOCTL support
0419  */
0420 static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
0421                     unsigned index, unsigned start,
0422                     unsigned count, uint32_t flags, void *data)
0423 {
0424     if (!is_intx(vdev) || start != 0 || count != 1)
0425         return -EINVAL;
0426 
0427     if (flags & VFIO_IRQ_SET_DATA_NONE) {
0428         vfio_pci_intx_unmask(vdev);
0429     } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
0430         uint8_t unmask = *(uint8_t *)data;
0431         if (unmask)
0432             vfio_pci_intx_unmask(vdev);
0433     } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
0434         int32_t fd = *(int32_t *)data;
0435         if (fd >= 0)
0436             return vfio_virqfd_enable((void *) vdev,
0437                           vfio_pci_intx_unmask_handler,
0438                           vfio_send_intx_eventfd, NULL,
0439                           &vdev->ctx[0].unmask, fd);
0440 
0441         vfio_virqfd_disable(&vdev->ctx[0].unmask);
0442     }
0443 
0444     return 0;
0445 }
0446 
0447 static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
0448                   unsigned index, unsigned start,
0449                   unsigned count, uint32_t flags, void *data)
0450 {
0451     if (!is_intx(vdev) || start != 0 || count != 1)
0452         return -EINVAL;
0453 
0454     if (flags & VFIO_IRQ_SET_DATA_NONE) {
0455         vfio_pci_intx_mask(vdev);
0456     } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
0457         uint8_t mask = *(uint8_t *)data;
0458         if (mask)
0459             vfio_pci_intx_mask(vdev);
0460     } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
0461         return -ENOTTY; /* XXX implement me */
0462     }
0463 
0464     return 0;
0465 }
0466 
0467 static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
0468                      unsigned index, unsigned start,
0469                      unsigned count, uint32_t flags, void *data)
0470 {
0471     if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
0472         vfio_intx_disable(vdev);
0473         return 0;
0474     }
0475 
0476     if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
0477         return -EINVAL;
0478 
0479     if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
0480         int32_t fd = *(int32_t *)data;
0481         int ret;
0482 
0483         if (is_intx(vdev))
0484             return vfio_intx_set_signal(vdev, fd);
0485 
0486         ret = vfio_intx_enable(vdev);
0487         if (ret)
0488             return ret;
0489 
0490         ret = vfio_intx_set_signal(vdev, fd);
0491         if (ret)
0492             vfio_intx_disable(vdev);
0493 
0494         return ret;
0495     }
0496 
0497     if (!is_intx(vdev))
0498         return -EINVAL;
0499 
0500     if (flags & VFIO_IRQ_SET_DATA_NONE) {
0501         vfio_send_intx_eventfd(vdev, NULL);
0502     } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
0503         uint8_t trigger = *(uint8_t *)data;
0504         if (trigger)
0505             vfio_send_intx_eventfd(vdev, NULL);
0506     }
0507     return 0;
0508 }
0509 
0510 static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
0511                     unsigned index, unsigned start,
0512                     unsigned count, uint32_t flags, void *data)
0513 {
0514     int i;
0515     bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
0516 
0517     if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
0518         vfio_msi_disable(vdev, msix);
0519         return 0;
0520     }
0521 
0522     if (!(irq_is(vdev, index) || is_irq_none(vdev)))
0523         return -EINVAL;
0524 
0525     if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
0526         int32_t *fds = data;
0527         int ret;
0528 
0529         if (vdev->irq_type == index)
0530             return vfio_msi_set_block(vdev, start, count,
0531                           fds, msix);
0532 
0533         ret = vfio_msi_enable(vdev, start + count, msix);
0534         if (ret)
0535             return ret;
0536 
0537         ret = vfio_msi_set_block(vdev, start, count, fds, msix);
0538         if (ret)
0539             vfio_msi_disable(vdev, msix);
0540 
0541         return ret;
0542     }
0543 
0544     if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
0545         return -EINVAL;
0546 
0547     for (i = start; i < start + count; i++) {
0548         if (!vdev->ctx[i].trigger)
0549             continue;
0550         if (flags & VFIO_IRQ_SET_DATA_NONE) {
0551             eventfd_signal(vdev->ctx[i].trigger, 1);
0552         } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
0553             uint8_t *bools = data;
0554             if (bools[i - start])
0555                 eventfd_signal(vdev->ctx[i].trigger, 1);
0556         }
0557     }
0558     return 0;
0559 }
0560 
0561 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
0562                        unsigned int count, uint32_t flags,
0563                        void *data)
0564 {
0565     /* DATA_NONE/DATA_BOOL enables loopback testing */
0566     if (flags & VFIO_IRQ_SET_DATA_NONE) {
0567         if (*ctx) {
0568             if (count) {
0569                 eventfd_signal(*ctx, 1);
0570             } else {
0571                 eventfd_ctx_put(*ctx);
0572                 *ctx = NULL;
0573             }
0574             return 0;
0575         }
0576     } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
0577         uint8_t trigger;
0578 
0579         if (!count)
0580             return -EINVAL;
0581 
0582         trigger = *(uint8_t *)data;
0583         if (trigger && *ctx)
0584             eventfd_signal(*ctx, 1);
0585 
0586         return 0;
0587     } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
0588         int32_t fd;
0589 
0590         if (!count)
0591             return -EINVAL;
0592 
0593         fd = *(int32_t *)data;
0594         if (fd == -1) {
0595             if (*ctx)
0596                 eventfd_ctx_put(*ctx);
0597             *ctx = NULL;
0598         } else if (fd >= 0) {
0599             struct eventfd_ctx *efdctx;
0600 
0601             efdctx = eventfd_ctx_fdget(fd);
0602             if (IS_ERR(efdctx))
0603                 return PTR_ERR(efdctx);
0604 
0605             if (*ctx)
0606                 eventfd_ctx_put(*ctx);
0607 
0608             *ctx = efdctx;
0609         }
0610         return 0;
0611     }
0612 
0613     return -EINVAL;
0614 }
0615 
0616 static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
0617                     unsigned index, unsigned start,
0618                     unsigned count, uint32_t flags, void *data)
0619 {
0620     if (index != VFIO_PCI_ERR_IRQ_INDEX || start != 0 || count > 1)
0621         return -EINVAL;
0622 
0623     return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
0624                            count, flags, data);
0625 }
0626 
0627 static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
0628                     unsigned index, unsigned start,
0629                     unsigned count, uint32_t flags, void *data)
0630 {
0631     if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count > 1)
0632         return -EINVAL;
0633 
0634     return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
0635                            count, flags, data);
0636 }
0637 
0638 int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
0639                 unsigned index, unsigned start, unsigned count,
0640                 void *data)
0641 {
0642     int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
0643             unsigned start, unsigned count, uint32_t flags,
0644             void *data) = NULL;
0645 
0646     switch (index) {
0647     case VFIO_PCI_INTX_IRQ_INDEX:
0648         switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
0649         case VFIO_IRQ_SET_ACTION_MASK:
0650             func = vfio_pci_set_intx_mask;
0651             break;
0652         case VFIO_IRQ_SET_ACTION_UNMASK:
0653             func = vfio_pci_set_intx_unmask;
0654             break;
0655         case VFIO_IRQ_SET_ACTION_TRIGGER:
0656             func = vfio_pci_set_intx_trigger;
0657             break;
0658         }
0659         break;
0660     case VFIO_PCI_MSI_IRQ_INDEX:
0661     case VFIO_PCI_MSIX_IRQ_INDEX:
0662         switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
0663         case VFIO_IRQ_SET_ACTION_MASK:
0664         case VFIO_IRQ_SET_ACTION_UNMASK:
0665             /* XXX Need masking support exported */
0666             break;
0667         case VFIO_IRQ_SET_ACTION_TRIGGER:
0668             func = vfio_pci_set_msi_trigger;
0669             break;
0670         }
0671         break;
0672     case VFIO_PCI_ERR_IRQ_INDEX:
0673         switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
0674         case VFIO_IRQ_SET_ACTION_TRIGGER:
0675             if (pci_is_pcie(vdev->pdev))
0676                 func = vfio_pci_set_err_trigger;
0677             break;
0678         }
0679         break;
0680     case VFIO_PCI_REQ_IRQ_INDEX:
0681         switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
0682         case VFIO_IRQ_SET_ACTION_TRIGGER:
0683             func = vfio_pci_set_req_trigger;
0684             break;
0685         }
0686         break;
0687     }
0688 
0689     if (!func)
0690         return -ENOTTY;
0691 
0692     return func(vdev, index, start, count, flags, data);
0693 }