Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * ACRN_HSM: Handle I/O requests
0004  *
0005  * Copyright (C) 2020 Intel Corporation. All rights reserved.
0006  *
0007  * Authors:
0008  *  Jason Chen CJ <jason.cj.chen@intel.com>
0009  *  Fengwei Yin <fengwei.yin@intel.com>
0010  */
0011 
0012 #include <linux/interrupt.h>
0013 #include <linux/io.h>
0014 #include <linux/kthread.h>
0015 #include <linux/mm.h>
0016 #include <linux/slab.h>
0017 
0018 #include <asm/acrn.h>
0019 
0020 #include "acrn_drv.h"
0021 
0022 static void ioreq_pause(void);
0023 static void ioreq_resume(void);
0024 
0025 static void ioreq_dispatcher(struct work_struct *work);
0026 static struct workqueue_struct *ioreq_wq;
0027 static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
0028 
0029 static inline bool has_pending_request(struct acrn_ioreq_client *client)
0030 {
0031     return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
0032 }
0033 
0034 static inline bool is_destroying(struct acrn_ioreq_client *client)
0035 {
0036     return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
0037 }
0038 
0039 static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
0040                   struct acrn_io_request *acrn_req)
0041 {
0042     bool polling_mode;
0043     int ret = 0;
0044 
0045     polling_mode = acrn_req->completion_polling;
0046     /* Add barrier() to make sure the writes are done before completion */
0047     smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
0048 
0049     /*
0050      * To fulfill the requirement of real-time in several industry
0051      * scenarios, like automotive, ACRN can run under the partition mode,
0052      * in which User VMs and Service VM are bound to dedicated CPU cores.
0053      * Polling mode of handling the I/O request is introduced to achieve a
0054      * faster I/O request handling. In polling mode, the hypervisor polls
0055      * I/O request's completion. Once an I/O request is marked as
0056      * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
0057      * to continue the I/O request flow. Thus, the completion notification
0058      * from HSM of I/O request is not needed.  Please note,
0059      * completion_polling needs to be read before the I/O request being
0060      * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
0061      * hypervisor.
0062      */
0063     if (!polling_mode) {
0064         ret = hcall_notify_req_finish(vm->vmid, vcpu);
0065         if (ret < 0)
0066             dev_err(acrn_dev.this_device,
0067                 "Notify I/O request finished failed!\n");
0068     }
0069 
0070     return ret;
0071 }
0072 
0073 static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
0074                        u16 vcpu,
0075                        struct acrn_io_request *acrn_req)
0076 {
0077     int ret;
0078 
0079     if (vcpu >= client->vm->vcpu_num)
0080         return -EINVAL;
0081 
0082     clear_bit(vcpu, client->ioreqs_map);
0083     if (!acrn_req) {
0084         acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
0085         acrn_req += vcpu;
0086     }
0087 
0088     ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
0089 
0090     return ret;
0091 }
0092 
0093 int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
0094 {
0095     int ret = 0;
0096 
0097     spin_lock_bh(&vm->ioreq_clients_lock);
0098     if (vm->default_client)
0099         ret = acrn_ioreq_complete_request(vm->default_client,
0100                           vcpu, NULL);
0101     spin_unlock_bh(&vm->ioreq_clients_lock);
0102 
0103     return ret;
0104 }
0105 
0106 /**
0107  * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
0108  * @client: The ioreq client
0109  * @type:   Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
0110  * @start:  Start address of iorange
0111  * @end:    End address of iorange
0112  *
0113  * Return: 0 on success, <0 on error
0114  */
0115 int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
0116              u32 type, u64 start, u64 end)
0117 {
0118     struct acrn_ioreq_range *range;
0119 
0120     if (end < start) {
0121         dev_err(acrn_dev.this_device,
0122             "Invalid IO range [0x%llx,0x%llx]\n", start, end);
0123         return -EINVAL;
0124     }
0125 
0126     range = kzalloc(sizeof(*range), GFP_KERNEL);
0127     if (!range)
0128         return -ENOMEM;
0129 
0130     range->type = type;
0131     range->start = start;
0132     range->end = end;
0133 
0134     write_lock_bh(&client->range_lock);
0135     list_add(&range->list, &client->range_list);
0136     write_unlock_bh(&client->range_lock);
0137 
0138     return 0;
0139 }
0140 
0141 /**
0142  * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
0143  * @client: The ioreq client
0144  * @type:   Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
0145  * @start:  Start address of iorange
0146  * @end:    End address of iorange
0147  */
0148 void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
0149               u32 type, u64 start, u64 end)
0150 {
0151     struct acrn_ioreq_range *range;
0152 
0153     write_lock_bh(&client->range_lock);
0154     list_for_each_entry(range, &client->range_list, list) {
0155         if (type == range->type &&
0156             start == range->start &&
0157             end == range->end) {
0158             list_del(&range->list);
0159             kfree(range);
0160             break;
0161         }
0162     }
0163     write_unlock_bh(&client->range_lock);
0164 }
0165 
0166 /*
0167  * ioreq_task() is the execution entity of handler thread of an I/O client.
0168  * The handler callback of the I/O client is called within the handler thread.
0169  */
0170 static int ioreq_task(void *data)
0171 {
0172     struct acrn_ioreq_client *client = data;
0173     struct acrn_io_request *req;
0174     unsigned long *ioreqs_map;
0175     int vcpu, ret;
0176 
0177     /*
0178      * Lockless access to ioreqs_map is safe, because
0179      * 1) set_bit() and clear_bit() are atomic operations.
0180      * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
0181      *  set_bit() - in ioreq_work handler
0182      *  Handler callback handles corresponding I/O request
0183      *  clear_bit() - in handler thread (include ACRN userspace)
0184      *  Mark corresponding I/O request completed
0185      *  Loop again if a new I/O request occurs
0186      */
0187     ioreqs_map = client->ioreqs_map;
0188     while (!kthread_should_stop()) {
0189         acrn_ioreq_client_wait(client);
0190         while (has_pending_request(client)) {
0191             vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
0192             req = client->vm->ioreq_buf->req_slot + vcpu;
0193             ret = client->handler(client, req);
0194             if (ret < 0) {
0195                 dev_err(acrn_dev.this_device,
0196                     "IO handle failure: %d\n", ret);
0197                 break;
0198             }
0199             acrn_ioreq_complete_request(client, vcpu, req);
0200         }
0201     }
0202 
0203     return 0;
0204 }
0205 
0206 /*
0207  * For the non-default I/O clients, give them chance to complete the current
0208  * I/O requests if there are any. For the default I/O client, it is safe to
0209  * clear all pending I/O requests because the clearing request is from ACRN
0210  * userspace.
0211  */
0212 void acrn_ioreq_request_clear(struct acrn_vm *vm)
0213 {
0214     struct acrn_ioreq_client *client;
0215     bool has_pending = false;
0216     unsigned long vcpu;
0217     int retry = 10;
0218 
0219     /*
0220      * IO requests of this VM will be completed directly in
0221      * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
0222      */
0223     set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
0224 
0225     /*
0226      * acrn_ioreq_request_clear is only called in VM reset case. Simply
0227      * wait 100ms in total for the IO requests' completion.
0228      */
0229     do {
0230         spin_lock_bh(&vm->ioreq_clients_lock);
0231         list_for_each_entry(client, &vm->ioreq_clients, list) {
0232             has_pending = has_pending_request(client);
0233             if (has_pending)
0234                 break;
0235         }
0236         spin_unlock_bh(&vm->ioreq_clients_lock);
0237 
0238         if (has_pending)
0239             schedule_timeout_interruptible(HZ / 100);
0240     } while (has_pending && --retry > 0);
0241     if (retry == 0)
0242         dev_warn(acrn_dev.this_device,
0243              "%s cannot flush pending request!\n", client->name);
0244 
0245     /* Clear all ioreqs belonging to the default client */
0246     spin_lock_bh(&vm->ioreq_clients_lock);
0247     client = vm->default_client;
0248     if (client) {
0249         for_each_set_bit(vcpu, client->ioreqs_map, ACRN_IO_REQUEST_MAX)
0250             acrn_ioreq_complete_request(client, vcpu, NULL);
0251     }
0252     spin_unlock_bh(&vm->ioreq_clients_lock);
0253 
0254     /* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
0255     clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
0256 }
0257 
0258 int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
0259 {
0260     if (client->is_default) {
0261         /*
0262          * In the default client, a user space thread waits on the
0263          * waitqueue. The is_destroying() check is used to notify user
0264          * space the client is going to be destroyed.
0265          */
0266         wait_event_interruptible(client->wq,
0267                      has_pending_request(client) ||
0268                      is_destroying(client));
0269         if (is_destroying(client))
0270             return -ENODEV;
0271     } else {
0272         wait_event_interruptible(client->wq,
0273                      has_pending_request(client) ||
0274                      kthread_should_stop());
0275     }
0276 
0277     return 0;
0278 }
0279 
0280 static bool is_cfg_addr(struct acrn_io_request *req)
0281 {
0282     return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
0283         (req->reqs.pio_request.address == 0xcf8));
0284 }
0285 
0286 static bool is_cfg_data(struct acrn_io_request *req)
0287 {
0288     return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
0289         ((req->reqs.pio_request.address >= 0xcfc) &&
0290          (req->reqs.pio_request.address < (0xcfc + 4))));
0291 }
0292 
0293 /* The low 8-bit of supported pci_reg addr.*/
0294 #define PCI_LOWREG_MASK  0xFC
0295 /* The high 4-bit of supported pci_reg addr */
0296 #define PCI_HIGHREG_MASK 0xF00
0297 /* Max number of supported functions */
0298 #define PCI_FUNCMAX 7
0299 /* Max number of supported slots */
0300 #define PCI_SLOTMAX 31
0301 /* Max number of supported buses */
0302 #define PCI_BUSMAX  255
0303 #define CONF1_ENABLE    0x80000000UL
0304 /*
0305  * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
0306  * following steps:
0307  *   1) writes address into 0xCF8 port
0308  *   2) accesses data in/from 0xCFC
0309  * This function combines such paired PCI configuration space I/O requests into
0310  * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
0311  */
0312 static bool handle_cf8cfc(struct acrn_vm *vm,
0313               struct acrn_io_request *req, u16 vcpu)
0314 {
0315     int offset, pci_cfg_addr, pci_reg;
0316     bool is_handled = false;
0317 
0318     if (is_cfg_addr(req)) {
0319         WARN_ON(req->reqs.pio_request.size != 4);
0320         if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
0321             vm->pci_conf_addr = req->reqs.pio_request.value;
0322         else
0323             req->reqs.pio_request.value = vm->pci_conf_addr;
0324         is_handled = true;
0325     } else if (is_cfg_data(req)) {
0326         if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
0327             if (req->reqs.pio_request.direction ==
0328                     ACRN_IOREQ_DIR_READ)
0329                 req->reqs.pio_request.value = 0xffffffff;
0330             is_handled = true;
0331         } else {
0332             offset = req->reqs.pio_request.address - 0xcfc;
0333 
0334             req->type = ACRN_IOREQ_TYPE_PCICFG;
0335             pci_cfg_addr = vm->pci_conf_addr;
0336             req->reqs.pci_request.bus =
0337                     (pci_cfg_addr >> 16) & PCI_BUSMAX;
0338             req->reqs.pci_request.dev =
0339                     (pci_cfg_addr >> 11) & PCI_SLOTMAX;
0340             req->reqs.pci_request.func =
0341                     (pci_cfg_addr >> 8) & PCI_FUNCMAX;
0342             pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
0343                    ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
0344             req->reqs.pci_request.reg = pci_reg + offset;
0345         }
0346     }
0347 
0348     if (is_handled)
0349         ioreq_complete_request(vm, vcpu, req);
0350 
0351     return is_handled;
0352 }
0353 
0354 static bool in_range(struct acrn_ioreq_range *range,
0355              struct acrn_io_request *req)
0356 {
0357     bool ret = false;
0358 
0359     if (range->type == req->type) {
0360         switch (req->type) {
0361         case ACRN_IOREQ_TYPE_MMIO:
0362             if (req->reqs.mmio_request.address >= range->start &&
0363                 (req->reqs.mmio_request.address +
0364                  req->reqs.mmio_request.size - 1) <= range->end)
0365                 ret = true;
0366             break;
0367         case ACRN_IOREQ_TYPE_PORTIO:
0368             if (req->reqs.pio_request.address >= range->start &&
0369                 (req->reqs.pio_request.address +
0370                  req->reqs.pio_request.size - 1) <= range->end)
0371                 ret = true;
0372             break;
0373         default:
0374             break;
0375         }
0376     }
0377 
0378     return ret;
0379 }
0380 
0381 static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
0382                            struct acrn_io_request *req)
0383 {
0384     struct acrn_ioreq_client *client, *found = NULL;
0385     struct acrn_ioreq_range *range;
0386 
0387     lockdep_assert_held(&vm->ioreq_clients_lock);
0388 
0389     list_for_each_entry(client, &vm->ioreq_clients, list) {
0390         read_lock_bh(&client->range_lock);
0391         list_for_each_entry(range, &client->range_list, list) {
0392             if (in_range(range, req)) {
0393                 found = client;
0394                 break;
0395             }
0396         }
0397         read_unlock_bh(&client->range_lock);
0398         if (found)
0399             break;
0400     }
0401     return found ? found : vm->default_client;
0402 }
0403 
0404 /**
0405  * acrn_ioreq_client_create() - Create an ioreq client
0406  * @vm:     The VM that this client belongs to
0407  * @handler:    The ioreq_handler of ioreq client acrn_hsm will create a kernel
0408  *      thread and call the handler to handle I/O requests.
0409  * @priv:   Private data for the handler
0410  * @is_default: If it is the default client
0411  * @name:   The name of ioreq client
0412  *
0413  * Return: acrn_ioreq_client pointer on success, NULL on error
0414  */
0415 struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
0416                            ioreq_handler_t handler,
0417                            void *priv, bool is_default,
0418                            const char *name)
0419 {
0420     struct acrn_ioreq_client *client;
0421 
0422     if (!handler && !is_default) {
0423         dev_dbg(acrn_dev.this_device,
0424             "Cannot create non-default client w/o handler!\n");
0425         return NULL;
0426     }
0427     client = kzalloc(sizeof(*client), GFP_KERNEL);
0428     if (!client)
0429         return NULL;
0430 
0431     client->handler = handler;
0432     client->vm = vm;
0433     client->priv = priv;
0434     client->is_default = is_default;
0435     if (name)
0436         strncpy(client->name, name, sizeof(client->name) - 1);
0437     rwlock_init(&client->range_lock);
0438     INIT_LIST_HEAD(&client->range_list);
0439     init_waitqueue_head(&client->wq);
0440 
0441     if (client->handler) {
0442         client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
0443                          client->vm->vmid, client->name);
0444         if (IS_ERR(client->thread)) {
0445             kfree(client);
0446             return NULL;
0447         }
0448     }
0449 
0450     spin_lock_bh(&vm->ioreq_clients_lock);
0451     if (is_default)
0452         vm->default_client = client;
0453     else
0454         list_add(&client->list, &vm->ioreq_clients);
0455     spin_unlock_bh(&vm->ioreq_clients_lock);
0456 
0457     dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
0458     return client;
0459 }
0460 
0461 /**
0462  * acrn_ioreq_client_destroy() - Destroy an ioreq client
0463  * @client: The ioreq client
0464  */
0465 void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
0466 {
0467     struct acrn_ioreq_range *range, *next;
0468     struct acrn_vm *vm = client->vm;
0469 
0470     dev_dbg(acrn_dev.this_device,
0471         "Destroy ioreq client %s.\n", client->name);
0472     ioreq_pause();
0473     set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
0474     if (client->is_default)
0475         wake_up_interruptible(&client->wq);
0476     else
0477         kthread_stop(client->thread);
0478 
0479     spin_lock_bh(&vm->ioreq_clients_lock);
0480     if (client->is_default)
0481         vm->default_client = NULL;
0482     else
0483         list_del(&client->list);
0484     spin_unlock_bh(&vm->ioreq_clients_lock);
0485 
0486     write_lock_bh(&client->range_lock);
0487     list_for_each_entry_safe(range, next, &client->range_list, list) {
0488         list_del(&range->list);
0489         kfree(range);
0490     }
0491     write_unlock_bh(&client->range_lock);
0492     kfree(client);
0493 
0494     ioreq_resume();
0495 }
0496 
0497 static int acrn_ioreq_dispatch(struct acrn_vm *vm)
0498 {
0499     struct acrn_ioreq_client *client;
0500     struct acrn_io_request *req;
0501     int i;
0502 
0503     for (i = 0; i < vm->vcpu_num; i++) {
0504         req = vm->ioreq_buf->req_slot + i;
0505 
0506         /* barrier the read of processed of acrn_io_request */
0507         if (smp_load_acquire(&req->processed) ==
0508                      ACRN_IOREQ_STATE_PENDING) {
0509             /* Complete the IO request directly in clearing stage */
0510             if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
0511                 ioreq_complete_request(vm, i, req);
0512                 continue;
0513             }
0514             if (handle_cf8cfc(vm, req, i))
0515                 continue;
0516 
0517             spin_lock_bh(&vm->ioreq_clients_lock);
0518             client = find_ioreq_client(vm, req);
0519             if (!client) {
0520                 dev_err(acrn_dev.this_device,
0521                     "Failed to find ioreq client!\n");
0522                 spin_unlock_bh(&vm->ioreq_clients_lock);
0523                 return -EINVAL;
0524             }
0525             if (!client->is_default)
0526                 req->kernel_handled = 1;
0527             else
0528                 req->kernel_handled = 0;
0529             /*
0530              * Add barrier() to make sure the writes are done
0531              * before setting ACRN_IOREQ_STATE_PROCESSING
0532              */
0533             smp_store_release(&req->processed,
0534                       ACRN_IOREQ_STATE_PROCESSING);
0535             set_bit(i, client->ioreqs_map);
0536             wake_up_interruptible(&client->wq);
0537             spin_unlock_bh(&vm->ioreq_clients_lock);
0538         }
0539     }
0540 
0541     return 0;
0542 }
0543 
0544 static void ioreq_dispatcher(struct work_struct *work)
0545 {
0546     struct acrn_vm *vm;
0547 
0548     read_lock(&acrn_vm_list_lock);
0549     list_for_each_entry(vm, &acrn_vm_list, list) {
0550         if (!vm->ioreq_buf)
0551             break;
0552         acrn_ioreq_dispatch(vm);
0553     }
0554     read_unlock(&acrn_vm_list_lock);
0555 }
0556 
0557 static void ioreq_intr_handler(void)
0558 {
0559     queue_work(ioreq_wq, &ioreq_work);
0560 }
0561 
0562 static void ioreq_pause(void)
0563 {
0564     /* Flush and unarm the handler to ensure no I/O requests pending */
0565     acrn_remove_intr_handler();
0566     drain_workqueue(ioreq_wq);
0567 }
0568 
0569 static void ioreq_resume(void)
0570 {
0571     /* Schedule after enabling in case other clients miss interrupt */
0572     acrn_setup_intr_handler(ioreq_intr_handler);
0573     queue_work(ioreq_wq, &ioreq_work);
0574 }
0575 
0576 int acrn_ioreq_intr_setup(void)
0577 {
0578     acrn_setup_intr_handler(ioreq_intr_handler);
0579     ioreq_wq = alloc_workqueue("ioreq_wq",
0580                    WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
0581     if (!ioreq_wq) {
0582         dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
0583         acrn_remove_intr_handler();
0584         return -ENOMEM;
0585     }
0586     return 0;
0587 }
0588 
0589 void acrn_ioreq_intr_remove(void)
0590 {
0591     if (ioreq_wq)
0592         destroy_workqueue(ioreq_wq);
0593     acrn_remove_intr_handler();
0594 }
0595 
0596 int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
0597 {
0598     struct acrn_ioreq_buffer *set_buffer;
0599     struct page *page;
0600     int ret;
0601 
0602     if (vm->ioreq_buf)
0603         return -EEXIST;
0604 
0605     set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
0606     if (!set_buffer)
0607         return -ENOMEM;
0608 
0609     ret = pin_user_pages_fast(buf_vma, 1,
0610                   FOLL_WRITE | FOLL_LONGTERM, &page);
0611     if (unlikely(ret != 1) || !page) {
0612         dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
0613         ret = -EFAULT;
0614         goto free_buf;
0615     }
0616 
0617     vm->ioreq_buf = page_address(page);
0618     vm->ioreq_page = page;
0619     set_buffer->ioreq_buf = page_to_phys(page);
0620     ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
0621     if (ret < 0) {
0622         dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
0623         unpin_user_page(page);
0624         vm->ioreq_buf = NULL;
0625         goto free_buf;
0626     }
0627 
0628     dev_dbg(acrn_dev.this_device,
0629         "Init ioreq buffer %pK!\n", vm->ioreq_buf);
0630     ret = 0;
0631 free_buf:
0632     kfree(set_buffer);
0633     return ret;
0634 }
0635 
0636 void acrn_ioreq_deinit(struct acrn_vm *vm)
0637 {
0638     struct acrn_ioreq_client *client, *next;
0639 
0640     dev_dbg(acrn_dev.this_device,
0641         "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
0642     /* Destroy all clients belonging to this VM */
0643     list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
0644         acrn_ioreq_client_destroy(client);
0645     if (vm->default_client)
0646         acrn_ioreq_client_destroy(vm->default_client);
0647 
0648     if (vm->ioreq_buf && vm->ioreq_page) {
0649         unpin_user_page(vm->ioreq_page);
0650         vm->ioreq_buf = NULL;
0651     }
0652 }