Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * ACRN HSM eventfd - use eventfd objects to signal expected I/O requests
0004  *
0005  * Copyright (C) 2020 Intel Corporation. All rights reserved.
0006  *
0007  * Authors:
0008  *  Shuo Liu <shuo.a.liu@intel.com>
0009  *  Yakui Zhao <yakui.zhao@intel.com>
0010  */
0011 
0012 #include <linux/eventfd.h>
0013 #include <linux/slab.h>
0014 
0015 #include "acrn_drv.h"
0016 
0017 /**
0018  * struct hsm_ioeventfd - Properties of HSM ioeventfd
0019  * @list:   Entry within &acrn_vm.ioeventfds of ioeventfds of a VM
0020  * @eventfd:    Eventfd of the HSM ioeventfd
0021  * @addr:   Address of I/O range
0022  * @data:   Data for matching
0023  * @length: Length of I/O range
0024  * @type:   Type of I/O range (ACRN_IOREQ_TYPE_MMIO/ACRN_IOREQ_TYPE_PORTIO)
0025  * @wildcard:   Data matching or not
0026  */
0027 struct hsm_ioeventfd {
0028     struct list_head    list;
0029     struct eventfd_ctx  *eventfd;
0030     u64         addr;
0031     u64         data;
0032     int         length;
0033     int         type;
0034     bool            wildcard;
0035 };
0036 
0037 static inline int ioreq_type_from_flags(int flags)
0038 {
0039     return flags & ACRN_IOEVENTFD_FLAG_PIO ?
0040                ACRN_IOREQ_TYPE_PORTIO : ACRN_IOREQ_TYPE_MMIO;
0041 }
0042 
0043 static void acrn_ioeventfd_shutdown(struct acrn_vm *vm, struct hsm_ioeventfd *p)
0044 {
0045     lockdep_assert_held(&vm->ioeventfds_lock);
0046 
0047     eventfd_ctx_put(p->eventfd);
0048     list_del(&p->list);
0049     kfree(p);
0050 }
0051 
0052 static bool hsm_ioeventfd_is_conflict(struct acrn_vm *vm,
0053                       struct hsm_ioeventfd *ioeventfd)
0054 {
0055     struct hsm_ioeventfd *p;
0056 
0057     lockdep_assert_held(&vm->ioeventfds_lock);
0058 
0059     /* Either one is wildcard, the data matching will be skipped. */
0060     list_for_each_entry(p, &vm->ioeventfds, list)
0061         if (p->eventfd == ioeventfd->eventfd &&
0062             p->addr == ioeventfd->addr &&
0063             p->type == ioeventfd->type &&
0064             (p->wildcard || ioeventfd->wildcard ||
0065             p->data == ioeventfd->data))
0066             return true;
0067 
0068     return false;
0069 }
0070 
0071 /*
0072  * Assign an eventfd to a VM and create a HSM ioeventfd associated with the
0073  * eventfd. The properties of the HSM ioeventfd are built from a &struct
0074  * acrn_ioeventfd.
0075  */
0076 static int acrn_ioeventfd_assign(struct acrn_vm *vm,
0077                  struct acrn_ioeventfd *args)
0078 {
0079     struct eventfd_ctx *eventfd;
0080     struct hsm_ioeventfd *p;
0081     int ret;
0082 
0083     /* Check for range overflow */
0084     if (args->addr + args->len < args->addr)
0085         return -EINVAL;
0086 
0087     /*
0088      * Currently, acrn_ioeventfd is used to support vhost. 1,2,4,8 width
0089      * accesses can cover vhost's requirements.
0090      */
0091     if (!(args->len == 1 || args->len == 2 ||
0092           args->len == 4 || args->len == 8))
0093         return -EINVAL;
0094 
0095     eventfd = eventfd_ctx_fdget(args->fd);
0096     if (IS_ERR(eventfd))
0097         return PTR_ERR(eventfd);
0098 
0099     p = kzalloc(sizeof(*p), GFP_KERNEL);
0100     if (!p) {
0101         ret = -ENOMEM;
0102         goto fail;
0103     }
0104 
0105     INIT_LIST_HEAD(&p->list);
0106     p->addr = args->addr;
0107     p->length = args->len;
0108     p->eventfd = eventfd;
0109     p->type = ioreq_type_from_flags(args->flags);
0110 
0111     /*
0112      * ACRN_IOEVENTFD_FLAG_DATAMATCH flag is set in virtio 1.0 support, the
0113      * writing of notification register of each virtqueue may trigger the
0114      * notification. There is no data matching requirement.
0115      */
0116     if (args->flags & ACRN_IOEVENTFD_FLAG_DATAMATCH)
0117         p->data = args->data;
0118     else
0119         p->wildcard = true;
0120 
0121     mutex_lock(&vm->ioeventfds_lock);
0122 
0123     if (hsm_ioeventfd_is_conflict(vm, p)) {
0124         ret = -EEXIST;
0125         goto unlock_fail;
0126     }
0127 
0128     /* register the I/O range into ioreq client */
0129     ret = acrn_ioreq_range_add(vm->ioeventfd_client, p->type,
0130                    p->addr, p->addr + p->length - 1);
0131     if (ret < 0)
0132         goto unlock_fail;
0133 
0134     list_add_tail(&p->list, &vm->ioeventfds);
0135     mutex_unlock(&vm->ioeventfds_lock);
0136 
0137     return 0;
0138 
0139 unlock_fail:
0140     mutex_unlock(&vm->ioeventfds_lock);
0141     kfree(p);
0142 fail:
0143     eventfd_ctx_put(eventfd);
0144     return ret;
0145 }
0146 
0147 static int acrn_ioeventfd_deassign(struct acrn_vm *vm,
0148                    struct acrn_ioeventfd *args)
0149 {
0150     struct hsm_ioeventfd *p;
0151     struct eventfd_ctx *eventfd;
0152 
0153     eventfd = eventfd_ctx_fdget(args->fd);
0154     if (IS_ERR(eventfd))
0155         return PTR_ERR(eventfd);
0156 
0157     mutex_lock(&vm->ioeventfds_lock);
0158     list_for_each_entry(p, &vm->ioeventfds, list) {
0159         if (p->eventfd != eventfd)
0160             continue;
0161 
0162         acrn_ioreq_range_del(vm->ioeventfd_client, p->type,
0163                      p->addr, p->addr + p->length - 1);
0164         acrn_ioeventfd_shutdown(vm, p);
0165         break;
0166     }
0167     mutex_unlock(&vm->ioeventfds_lock);
0168 
0169     eventfd_ctx_put(eventfd);
0170     return 0;
0171 }
0172 
0173 static struct hsm_ioeventfd *hsm_ioeventfd_match(struct acrn_vm *vm, u64 addr,
0174                          u64 data, int len, int type)
0175 {
0176     struct hsm_ioeventfd *p = NULL;
0177 
0178     lockdep_assert_held(&vm->ioeventfds_lock);
0179 
0180     list_for_each_entry(p, &vm->ioeventfds, list) {
0181         if (p->type == type && p->addr == addr && p->length >= len &&
0182             (p->wildcard || p->data == data))
0183             return p;
0184     }
0185 
0186     return NULL;
0187 }
0188 
0189 static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
0190                   struct acrn_io_request *req)
0191 {
0192     struct hsm_ioeventfd *p;
0193     u64 addr, val;
0194     int size;
0195 
0196     if (req->type == ACRN_IOREQ_TYPE_MMIO) {
0197         /*
0198          * I/O requests are dispatched by range check only, so a
0199          * acrn_ioreq_client need process both READ and WRITE accesses
0200          * of same range. READ accesses are safe to be ignored here
0201          * because virtio PCI devices write the notify registers for
0202          * notification.
0203          */
0204         if (req->reqs.mmio_request.direction == ACRN_IOREQ_DIR_READ) {
0205             /* reading does nothing and return 0 */
0206             req->reqs.mmio_request.value = 0;
0207             return 0;
0208         }
0209         addr = req->reqs.mmio_request.address;
0210         size = req->reqs.mmio_request.size;
0211         val = req->reqs.mmio_request.value;
0212     } else {
0213         if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_READ) {
0214             /* reading does nothing and return 0 */
0215             req->reqs.pio_request.value = 0;
0216             return 0;
0217         }
0218         addr = req->reqs.pio_request.address;
0219         size = req->reqs.pio_request.size;
0220         val = req->reqs.pio_request.value;
0221     }
0222 
0223     mutex_lock(&client->vm->ioeventfds_lock);
0224     p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
0225     if (p)
0226         eventfd_signal(p->eventfd, 1);
0227     mutex_unlock(&client->vm->ioeventfds_lock);
0228 
0229     return 0;
0230 }
0231 
0232 int acrn_ioeventfd_config(struct acrn_vm *vm, struct acrn_ioeventfd *args)
0233 {
0234     int ret;
0235 
0236     if (args->flags & ACRN_IOEVENTFD_FLAG_DEASSIGN)
0237         ret = acrn_ioeventfd_deassign(vm, args);
0238     else
0239         ret = acrn_ioeventfd_assign(vm, args);
0240 
0241     return ret;
0242 }
0243 
0244 int acrn_ioeventfd_init(struct acrn_vm *vm)
0245 {
0246     char name[ACRN_NAME_LEN];
0247 
0248     mutex_init(&vm->ioeventfds_lock);
0249     INIT_LIST_HEAD(&vm->ioeventfds);
0250     snprintf(name, sizeof(name), "ioeventfd-%u", vm->vmid);
0251     vm->ioeventfd_client = acrn_ioreq_client_create(vm,
0252                             acrn_ioeventfd_handler,
0253                             NULL, false, name);
0254     if (!vm->ioeventfd_client) {
0255         dev_err(acrn_dev.this_device, "Failed to create ioeventfd ioreq client!\n");
0256         return -EINVAL;
0257     }
0258 
0259     dev_dbg(acrn_dev.this_device, "VM %u ioeventfd init.\n", vm->vmid);
0260     return 0;
0261 }
0262 
0263 void acrn_ioeventfd_deinit(struct acrn_vm *vm)
0264 {
0265     struct hsm_ioeventfd *p, *next;
0266 
0267     dev_dbg(acrn_dev.this_device, "VM %u ioeventfd deinit.\n", vm->vmid);
0268     acrn_ioreq_client_destroy(vm->ioeventfd_client);
0269     mutex_lock(&vm->ioeventfds_lock);
0270     list_for_each_entry_safe(p, next, &vm->ioeventfds, list)
0271         acrn_ioeventfd_shutdown(vm, p);
0272     mutex_unlock(&vm->ioeventfds_lock);
0273 }