0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/vfio.h>
0011 #include <linux/eventfd.h>
0012 #include <linux/file.h>
0013 #include <linux/module.h>
0014 #include <linux/slab.h>
0015
0016 #define DRIVER_VERSION "0.1"
0017 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
0018 #define DRIVER_DESC "IRQFD support for VFIO bus drivers"
0019
0020 static struct workqueue_struct *vfio_irqfd_cleanup_wq;
0021 static DEFINE_SPINLOCK(virqfd_lock);
0022
0023 static int __init vfio_virqfd_init(void)
0024 {
0025 vfio_irqfd_cleanup_wq =
0026 create_singlethread_workqueue("vfio-irqfd-cleanup");
0027 if (!vfio_irqfd_cleanup_wq)
0028 return -ENOMEM;
0029
0030 return 0;
0031 }
0032
0033 static void __exit vfio_virqfd_exit(void)
0034 {
0035 destroy_workqueue(vfio_irqfd_cleanup_wq);
0036 }
0037
0038 static void virqfd_deactivate(struct virqfd *virqfd)
0039 {
0040 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
0041 }
0042
0043 static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
0044 {
0045 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
0046 __poll_t flags = key_to_poll(key);
0047
0048 if (flags & EPOLLIN) {
0049 u64 cnt;
0050 eventfd_ctx_do_read(virqfd->eventfd, &cnt);
0051
0052
0053 if ((!virqfd->handler ||
0054 virqfd->handler(virqfd->opaque, virqfd->data)) &&
0055 virqfd->thread)
0056 schedule_work(&virqfd->inject);
0057 }
0058
0059 if (flags & EPOLLHUP) {
0060 unsigned long flags;
0061 spin_lock_irqsave(&virqfd_lock, flags);
0062
0063
0064
0065
0066
0067
0068
0069
0070 if (*(virqfd->pvirqfd) == virqfd) {
0071 *(virqfd->pvirqfd) = NULL;
0072 virqfd_deactivate(virqfd);
0073 }
0074
0075 spin_unlock_irqrestore(&virqfd_lock, flags);
0076 }
0077
0078 return 0;
0079 }
0080
0081 static void virqfd_ptable_queue_proc(struct file *file,
0082 wait_queue_head_t *wqh, poll_table *pt)
0083 {
0084 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
0085 add_wait_queue(wqh, &virqfd->wait);
0086 }
0087
0088 static void virqfd_shutdown(struct work_struct *work)
0089 {
0090 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
0091 u64 cnt;
0092
0093 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
0094 flush_work(&virqfd->inject);
0095 eventfd_ctx_put(virqfd->eventfd);
0096
0097 kfree(virqfd);
0098 }
0099
0100 static void virqfd_inject(struct work_struct *work)
0101 {
0102 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
0103 if (virqfd->thread)
0104 virqfd->thread(virqfd->opaque, virqfd->data);
0105 }
0106
0107 int vfio_virqfd_enable(void *opaque,
0108 int (*handler)(void *, void *),
0109 void (*thread)(void *, void *),
0110 void *data, struct virqfd **pvirqfd, int fd)
0111 {
0112 struct fd irqfd;
0113 struct eventfd_ctx *ctx;
0114 struct virqfd *virqfd;
0115 int ret = 0;
0116 __poll_t events;
0117
0118 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
0119 if (!virqfd)
0120 return -ENOMEM;
0121
0122 virqfd->pvirqfd = pvirqfd;
0123 virqfd->opaque = opaque;
0124 virqfd->handler = handler;
0125 virqfd->thread = thread;
0126 virqfd->data = data;
0127
0128 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
0129 INIT_WORK(&virqfd->inject, virqfd_inject);
0130
0131 irqfd = fdget(fd);
0132 if (!irqfd.file) {
0133 ret = -EBADF;
0134 goto err_fd;
0135 }
0136
0137 ctx = eventfd_ctx_fileget(irqfd.file);
0138 if (IS_ERR(ctx)) {
0139 ret = PTR_ERR(ctx);
0140 goto err_ctx;
0141 }
0142
0143 virqfd->eventfd = ctx;
0144
0145
0146
0147
0148
0149
0150
0151 spin_lock_irq(&virqfd_lock);
0152
0153 if (*pvirqfd) {
0154 spin_unlock_irq(&virqfd_lock);
0155 ret = -EBUSY;
0156 goto err_busy;
0157 }
0158 *pvirqfd = virqfd;
0159
0160 spin_unlock_irq(&virqfd_lock);
0161
0162
0163
0164
0165
0166 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
0167 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
0168
0169 events = vfs_poll(irqfd.file, &virqfd->pt);
0170
0171
0172
0173
0174
0175 if (events & EPOLLIN) {
0176 if ((!handler || handler(opaque, data)) && thread)
0177 schedule_work(&virqfd->inject);
0178 }
0179
0180
0181
0182
0183
0184 fdput(irqfd);
0185
0186 return 0;
0187 err_busy:
0188 eventfd_ctx_put(ctx);
0189 err_ctx:
0190 fdput(irqfd);
0191 err_fd:
0192 kfree(virqfd);
0193
0194 return ret;
0195 }
0196 EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
0197
0198 void vfio_virqfd_disable(struct virqfd **pvirqfd)
0199 {
0200 unsigned long flags;
0201
0202 spin_lock_irqsave(&virqfd_lock, flags);
0203
0204 if (*pvirqfd) {
0205 virqfd_deactivate(*pvirqfd);
0206 *pvirqfd = NULL;
0207 }
0208
0209 spin_unlock_irqrestore(&virqfd_lock, flags);
0210
0211
0212
0213
0214
0215
0216 flush_workqueue(vfio_irqfd_cleanup_wq);
0217 }
0218 EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
0219
0220 module_init(vfio_virqfd_init);
0221 module_exit(vfio_virqfd_exit);
0222
0223 MODULE_VERSION(DRIVER_VERSION);
0224 MODULE_LICENSE("GPL v2");
0225 MODULE_AUTHOR(DRIVER_AUTHOR);
0226 MODULE_DESCRIPTION(DRIVER_DESC);