0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/vmw_vmci_defs.h>
0009 #include <linux/vmw_vmci_api.h>
0010 #include <linux/highmem.h>
0011 #include <linux/kernel.h>
0012 #include <linux/mm.h>
0013 #include <linux/module.h>
0014 #include <linux/mutex.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/pci.h>
0017 #include <linux/sched.h>
0018 #include <linux/slab.h>
0019 #include <linux/uio.h>
0020 #include <linux/wait.h>
0021 #include <linux/vmalloc.h>
0022 #include <linux/skbuff.h>
0023
0024 #include "vmci_handle_array.h"
0025 #include "vmci_queue_pair.h"
0026 #include "vmci_datagram.h"
0027 #include "vmci_resource.h"
0028 #include "vmci_context.h"
0029 #include "vmci_driver.h"
0030 #include "vmci_event.h"
0031 #include "vmci_route.h"
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 struct vmci_queue_kern_if {
0126 struct mutex __mutex;
0127 struct mutex *mutex;
0128 size_t num_pages;
0129 bool host;
0130 union {
0131 struct {
0132 dma_addr_t *pas;
0133 void **vas;
0134 } g;
0135 struct {
0136 struct page **page;
0137 struct page **header_page;
0138 } h;
0139 } u;
0140 };
0141
0142
0143
0144
0145 struct vmci_qp {
0146 struct vmci_handle handle;
0147 struct vmci_queue *produce_q;
0148 struct vmci_queue *consume_q;
0149 u64 produce_q_size;
0150 u64 consume_q_size;
0151 u32 peer;
0152 u32 flags;
0153 u32 priv_flags;
0154 bool guest_endpoint;
0155 unsigned int blocked;
0156 unsigned int generation;
0157 wait_queue_head_t event;
0158 };
0159
0160 enum qp_broker_state {
0161 VMCIQPB_NEW,
0162 VMCIQPB_CREATED_NO_MEM,
0163 VMCIQPB_CREATED_MEM,
0164 VMCIQPB_ATTACHED_NO_MEM,
0165 VMCIQPB_ATTACHED_MEM,
0166 VMCIQPB_SHUTDOWN_NO_MEM,
0167 VMCIQPB_SHUTDOWN_MEM,
0168 VMCIQPB_GONE
0169 };
0170
0171 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
0172 _qpb->state == VMCIQPB_ATTACHED_MEM || \
0173 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 struct qp_entry {
0185 struct list_head list_item;
0186 struct vmci_handle handle;
0187 u32 peer;
0188 u32 flags;
0189 u64 produce_size;
0190 u64 consume_size;
0191 u32 ref_count;
0192 };
0193
0194 struct qp_broker_entry {
0195 struct vmci_resource resource;
0196 struct qp_entry qp;
0197 u32 create_id;
0198 u32 attach_id;
0199 enum qp_broker_state state;
0200 bool require_trusted_attach;
0201 bool created_by_trusted;
0202 bool vmci_page_files;
0203 struct vmci_queue *produce_q;
0204 struct vmci_queue *consume_q;
0205 struct vmci_queue_header saved_produce_q;
0206 struct vmci_queue_header saved_consume_q;
0207 vmci_event_release_cb wakeup_cb;
0208 void *client_data;
0209 void *local_mem;
0210 };
0211
0212 struct qp_guest_endpoint {
0213 struct vmci_resource resource;
0214 struct qp_entry qp;
0215 u64 num_ppns;
0216 void *produce_q;
0217 void *consume_q;
0218 struct ppn_set ppn_set;
0219 };
0220
0221 struct qp_list {
0222 struct list_head head;
0223 struct mutex mutex;
0224 };
0225
0226 static struct qp_list qp_broker_list = {
0227 .head = LIST_HEAD_INIT(qp_broker_list.head),
0228 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
0229 };
0230
0231 static struct qp_list qp_guest_endpoints = {
0232 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
0233 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
0234 };
0235
0236 #define INVALID_VMCI_GUEST_MEM_ID 0
0237 #define QPE_NUM_PAGES(_QPE) ((u32) \
0238 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
0239 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
0240 #define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \
0241 ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \
0242 (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY)
0243
0244
0245
0246
0247
0248 static void qp_free_queue(void *q, u64 size)
0249 {
0250 struct vmci_queue *queue = q;
0251
0252 if (queue) {
0253 u64 i;
0254
0255
0256 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
0257 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
0258 queue->kernel_if->u.g.vas[i],
0259 queue->kernel_if->u.g.pas[i]);
0260 }
0261
0262 vfree(queue);
0263 }
0264 }
0265
0266
0267
0268
0269
0270
0271 static void *qp_alloc_queue(u64 size, u32 flags)
0272 {
0273 u64 i;
0274 struct vmci_queue *queue;
0275 size_t pas_size;
0276 size_t vas_size;
0277 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
0278 u64 num_pages;
0279
0280 if (size > SIZE_MAX - PAGE_SIZE)
0281 return NULL;
0282 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
0283 if (num_pages >
0284 (SIZE_MAX - queue_size) /
0285 (sizeof(*queue->kernel_if->u.g.pas) +
0286 sizeof(*queue->kernel_if->u.g.vas)))
0287 return NULL;
0288
0289 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
0290 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
0291 queue_size += pas_size + vas_size;
0292
0293 queue = vmalloc(queue_size);
0294 if (!queue)
0295 return NULL;
0296
0297 queue->q_header = NULL;
0298 queue->saved_header = NULL;
0299 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
0300 queue->kernel_if->mutex = NULL;
0301 queue->kernel_if->num_pages = num_pages;
0302 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
0303 queue->kernel_if->u.g.vas =
0304 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
0305 queue->kernel_if->host = false;
0306
0307 for (i = 0; i < num_pages; i++) {
0308 queue->kernel_if->u.g.vas[i] =
0309 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
0310 &queue->kernel_if->u.g.pas[i],
0311 GFP_KERNEL);
0312 if (!queue->kernel_if->u.g.vas[i]) {
0313
0314 qp_free_queue(queue, i * PAGE_SIZE);
0315 return NULL;
0316 }
0317 }
0318
0319
0320 queue->q_header = queue->kernel_if->u.g.vas[0];
0321
0322 return queue;
0323 }
0324
0325
0326
0327
0328
0329
0330
0331 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
0332 u64 queue_offset,
0333 struct iov_iter *from,
0334 size_t size)
0335 {
0336 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
0337 size_t bytes_copied = 0;
0338
0339 while (bytes_copied < size) {
0340 const u64 page_index =
0341 (queue_offset + bytes_copied) / PAGE_SIZE;
0342 const size_t page_offset =
0343 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
0344 void *va;
0345 size_t to_copy;
0346
0347 if (kernel_if->host)
0348 va = kmap(kernel_if->u.h.page[page_index]);
0349 else
0350 va = kernel_if->u.g.vas[page_index + 1];
0351
0352
0353 if (size - bytes_copied > PAGE_SIZE - page_offset)
0354
0355 to_copy = PAGE_SIZE - page_offset;
0356 else
0357 to_copy = size - bytes_copied;
0358
0359 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
0360 from)) {
0361 if (kernel_if->host)
0362 kunmap(kernel_if->u.h.page[page_index]);
0363 return VMCI_ERROR_INVALID_ARGS;
0364 }
0365 bytes_copied += to_copy;
0366 if (kernel_if->host)
0367 kunmap(kernel_if->u.h.page[page_index]);
0368 }
0369
0370 return VMCI_SUCCESS;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379 static int qp_memcpy_from_queue_iter(struct iov_iter *to,
0380 const struct vmci_queue *queue,
0381 u64 queue_offset, size_t size)
0382 {
0383 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
0384 size_t bytes_copied = 0;
0385
0386 while (bytes_copied < size) {
0387 const u64 page_index =
0388 (queue_offset + bytes_copied) / PAGE_SIZE;
0389 const size_t page_offset =
0390 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
0391 void *va;
0392 size_t to_copy;
0393 int err;
0394
0395 if (kernel_if->host)
0396 va = kmap(kernel_if->u.h.page[page_index]);
0397 else
0398 va = kernel_if->u.g.vas[page_index + 1];
0399
0400
0401 if (size - bytes_copied > PAGE_SIZE - page_offset)
0402
0403 to_copy = PAGE_SIZE - page_offset;
0404 else
0405 to_copy = size - bytes_copied;
0406
0407 err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
0408 if (err != to_copy) {
0409 if (kernel_if->host)
0410 kunmap(kernel_if->u.h.page[page_index]);
0411 return VMCI_ERROR_INVALID_ARGS;
0412 }
0413 bytes_copied += to_copy;
0414 if (kernel_if->host)
0415 kunmap(kernel_if->u.h.page[page_index]);
0416 }
0417
0418 return VMCI_SUCCESS;
0419 }
0420
0421
0422
0423
0424
0425
0426
0427 static int qp_alloc_ppn_set(void *prod_q,
0428 u64 num_produce_pages,
0429 void *cons_q,
0430 u64 num_consume_pages, struct ppn_set *ppn_set)
0431 {
0432 u64 *produce_ppns;
0433 u64 *consume_ppns;
0434 struct vmci_queue *produce_q = prod_q;
0435 struct vmci_queue *consume_q = cons_q;
0436 u64 i;
0437
0438 if (!produce_q || !num_produce_pages || !consume_q ||
0439 !num_consume_pages || !ppn_set)
0440 return VMCI_ERROR_INVALID_ARGS;
0441
0442 if (ppn_set->initialized)
0443 return VMCI_ERROR_ALREADY_EXISTS;
0444
0445 produce_ppns =
0446 kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
0447 GFP_KERNEL);
0448 if (!produce_ppns)
0449 return VMCI_ERROR_NO_MEM;
0450
0451 consume_ppns =
0452 kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
0453 GFP_KERNEL);
0454 if (!consume_ppns) {
0455 kfree(produce_ppns);
0456 return VMCI_ERROR_NO_MEM;
0457 }
0458
0459 for (i = 0; i < num_produce_pages; i++)
0460 produce_ppns[i] =
0461 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
0462
0463 for (i = 0; i < num_consume_pages; i++)
0464 consume_ppns[i] =
0465 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
0466
0467 ppn_set->num_produce_pages = num_produce_pages;
0468 ppn_set->num_consume_pages = num_consume_pages;
0469 ppn_set->produce_ppns = produce_ppns;
0470 ppn_set->consume_ppns = consume_ppns;
0471 ppn_set->initialized = true;
0472 return VMCI_SUCCESS;
0473 }
0474
0475
0476
0477
0478 static void qp_free_ppn_set(struct ppn_set *ppn_set)
0479 {
0480 if (ppn_set->initialized) {
0481
0482 kfree(ppn_set->produce_ppns);
0483 kfree(ppn_set->consume_ppns);
0484 }
0485 memset(ppn_set, 0, sizeof(*ppn_set));
0486 }
0487
0488
0489
0490
0491
0492 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
0493 {
0494 if (vmci_use_ppn64()) {
0495 memcpy(call_buf, ppn_set->produce_ppns,
0496 ppn_set->num_produce_pages *
0497 sizeof(*ppn_set->produce_ppns));
0498 memcpy(call_buf +
0499 ppn_set->num_produce_pages *
0500 sizeof(*ppn_set->produce_ppns),
0501 ppn_set->consume_ppns,
0502 ppn_set->num_consume_pages *
0503 sizeof(*ppn_set->consume_ppns));
0504 } else {
0505 int i;
0506 u32 *ppns = (u32 *) call_buf;
0507
0508 for (i = 0; i < ppn_set->num_produce_pages; i++)
0509 ppns[i] = (u32) ppn_set->produce_ppns[i];
0510
0511 ppns = &ppns[ppn_set->num_produce_pages];
0512
0513 for (i = 0; i < ppn_set->num_consume_pages; i++)
0514 ppns[i] = (u32) ppn_set->consume_ppns[i];
0515 }
0516
0517 return VMCI_SUCCESS;
0518 }
0519
0520
0521
0522
0523
0524
0525
0526 static struct vmci_queue *qp_host_alloc_queue(u64 size)
0527 {
0528 struct vmci_queue *queue;
0529 size_t queue_page_size;
0530 u64 num_pages;
0531 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
0532
0533 if (size > min_t(size_t, VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE))
0534 return NULL;
0535 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
0536 if (num_pages > (SIZE_MAX - queue_size) /
0537 sizeof(*queue->kernel_if->u.h.page))
0538 return NULL;
0539
0540 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
0541
0542 if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
0543 return NULL;
0544
0545 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
0546 if (queue) {
0547 queue->q_header = NULL;
0548 queue->saved_header = NULL;
0549 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
0550 queue->kernel_if->host = true;
0551 queue->kernel_if->mutex = NULL;
0552 queue->kernel_if->num_pages = num_pages;
0553 queue->kernel_if->u.h.header_page =
0554 (struct page **)((u8 *)queue + queue_size);
0555 queue->kernel_if->u.h.page =
0556 &queue->kernel_if->u.h.header_page[1];
0557 }
0558
0559 return queue;
0560 }
0561
0562
0563
0564
0565
0566 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
0567 {
0568 kfree(queue);
0569 }
0570
0571
0572
0573
0574
0575
0576
0577
0578 static void qp_init_queue_mutex(struct vmci_queue *produce_q,
0579 struct vmci_queue *consume_q)
0580 {
0581
0582
0583
0584
0585
0586 if (produce_q->kernel_if->host) {
0587 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
0588 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
0589 mutex_init(produce_q->kernel_if->mutex);
0590 }
0591 }
0592
0593
0594
0595
0596 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
0597 struct vmci_queue *consume_q)
0598 {
0599 if (produce_q->kernel_if->host) {
0600 produce_q->kernel_if->mutex = NULL;
0601 consume_q->kernel_if->mutex = NULL;
0602 }
0603 }
0604
0605
0606
0607
0608
0609
0610 static void qp_acquire_queue_mutex(struct vmci_queue *queue)
0611 {
0612 if (queue->kernel_if->host)
0613 mutex_lock(queue->kernel_if->mutex);
0614 }
0615
0616
0617
0618
0619
0620
0621 static void qp_release_queue_mutex(struct vmci_queue *queue)
0622 {
0623 if (queue->kernel_if->host)
0624 mutex_unlock(queue->kernel_if->mutex);
0625 }
0626
0627
0628
0629
0630
0631 static void qp_release_pages(struct page **pages,
0632 u64 num_pages, bool dirty)
0633 {
0634 int i;
0635
0636 for (i = 0; i < num_pages; i++) {
0637 if (dirty)
0638 set_page_dirty_lock(pages[i]);
0639
0640 put_page(pages[i]);
0641 pages[i] = NULL;
0642 }
0643 }
0644
0645
0646
0647
0648
0649
0650 static int qp_host_get_user_memory(u64 produce_uva,
0651 u64 consume_uva,
0652 struct vmci_queue *produce_q,
0653 struct vmci_queue *consume_q)
0654 {
0655 int retval;
0656 int err = VMCI_SUCCESS;
0657
0658 retval = get_user_pages_fast((uintptr_t) produce_uva,
0659 produce_q->kernel_if->num_pages,
0660 FOLL_WRITE,
0661 produce_q->kernel_if->u.h.header_page);
0662 if (retval < (int)produce_q->kernel_if->num_pages) {
0663 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
0664 retval);
0665 if (retval > 0)
0666 qp_release_pages(produce_q->kernel_if->u.h.header_page,
0667 retval, false);
0668 err = VMCI_ERROR_NO_MEM;
0669 goto out;
0670 }
0671
0672 retval = get_user_pages_fast((uintptr_t) consume_uva,
0673 consume_q->kernel_if->num_pages,
0674 FOLL_WRITE,
0675 consume_q->kernel_if->u.h.header_page);
0676 if (retval < (int)consume_q->kernel_if->num_pages) {
0677 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
0678 retval);
0679 if (retval > 0)
0680 qp_release_pages(consume_q->kernel_if->u.h.header_page,
0681 retval, false);
0682 qp_release_pages(produce_q->kernel_if->u.h.header_page,
0683 produce_q->kernel_if->num_pages, false);
0684 err = VMCI_ERROR_NO_MEM;
0685 }
0686
0687 out:
0688 return err;
0689 }
0690
0691
0692
0693
0694
0695
0696 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
0697 struct vmci_queue *produce_q,
0698 struct vmci_queue *consume_q)
0699 {
0700 u64 produce_uva;
0701 u64 consume_uva;
0702
0703
0704
0705
0706
0707
0708 produce_uva = page_store->pages;
0709 consume_uva = page_store->pages +
0710 produce_q->kernel_if->num_pages * PAGE_SIZE;
0711 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
0712 consume_q);
0713 }
0714
0715
0716
0717
0718
0719
0720 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
0721 struct vmci_queue *consume_q)
0722 {
0723 qp_release_pages(produce_q->kernel_if->u.h.header_page,
0724 produce_q->kernel_if->num_pages, true);
0725 memset(produce_q->kernel_if->u.h.header_page, 0,
0726 sizeof(*produce_q->kernel_if->u.h.header_page) *
0727 produce_q->kernel_if->num_pages);
0728 qp_release_pages(consume_q->kernel_if->u.h.header_page,
0729 consume_q->kernel_if->num_pages, true);
0730 memset(consume_q->kernel_if->u.h.header_page, 0,
0731 sizeof(*consume_q->kernel_if->u.h.header_page) *
0732 consume_q->kernel_if->num_pages);
0733 }
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743 static int qp_host_map_queues(struct vmci_queue *produce_q,
0744 struct vmci_queue *consume_q)
0745 {
0746 int result;
0747
0748 if (!produce_q->q_header || !consume_q->q_header) {
0749 struct page *headers[2];
0750
0751 if (produce_q->q_header != consume_q->q_header)
0752 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
0753
0754 if (produce_q->kernel_if->u.h.header_page == NULL ||
0755 *produce_q->kernel_if->u.h.header_page == NULL)
0756 return VMCI_ERROR_UNAVAILABLE;
0757
0758 headers[0] = *produce_q->kernel_if->u.h.header_page;
0759 headers[1] = *consume_q->kernel_if->u.h.header_page;
0760
0761 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
0762 if (produce_q->q_header != NULL) {
0763 consume_q->q_header =
0764 (struct vmci_queue_header *)((u8 *)
0765 produce_q->q_header +
0766 PAGE_SIZE);
0767 result = VMCI_SUCCESS;
0768 } else {
0769 pr_warn("vmap failed\n");
0770 result = VMCI_ERROR_NO_MEM;
0771 }
0772 } else {
0773 result = VMCI_SUCCESS;
0774 }
0775
0776 return result;
0777 }
0778
0779
0780
0781
0782
0783 static int qp_host_unmap_queues(u32 gid,
0784 struct vmci_queue *produce_q,
0785 struct vmci_queue *consume_q)
0786 {
0787 if (produce_q->q_header) {
0788 if (produce_q->q_header < consume_q->q_header)
0789 vunmap(produce_q->q_header);
0790 else
0791 vunmap(consume_q->q_header);
0792
0793 produce_q->q_header = NULL;
0794 consume_q->q_header = NULL;
0795 }
0796
0797 return VMCI_SUCCESS;
0798 }
0799
0800
0801
0802
0803
0804 static struct qp_entry *qp_list_find(struct qp_list *qp_list,
0805 struct vmci_handle handle)
0806 {
0807 struct qp_entry *entry;
0808
0809 if (vmci_handle_is_invalid(handle))
0810 return NULL;
0811
0812 list_for_each_entry(entry, &qp_list->head, list_item) {
0813 if (vmci_handle_is_equal(entry->handle, handle))
0814 return entry;
0815 }
0816
0817 return NULL;
0818 }
0819
0820
0821
0822
0823 static struct qp_guest_endpoint *
0824 qp_guest_handle_to_entry(struct vmci_handle handle)
0825 {
0826 struct qp_guest_endpoint *entry;
0827 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
0828
0829 entry = qp ? container_of(
0830 qp, struct qp_guest_endpoint, qp) : NULL;
0831 return entry;
0832 }
0833
0834
0835
0836
0837 static struct qp_broker_entry *
0838 qp_broker_handle_to_entry(struct vmci_handle handle)
0839 {
0840 struct qp_broker_entry *entry;
0841 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
0842
0843 entry = qp ? container_of(
0844 qp, struct qp_broker_entry, qp) : NULL;
0845 return entry;
0846 }
0847
0848
0849
0850
0851
0852 static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
0853 {
0854 u32 context_id = vmci_get_context_id();
0855 struct vmci_event_qp ev;
0856
0857 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
0858 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
0859 VMCI_CONTEXT_RESOURCE_ID);
0860 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
0861 ev.msg.event_data.event =
0862 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
0863 ev.payload.peer_id = context_id;
0864 ev.payload.handle = handle;
0865
0866 return vmci_event_dispatch(&ev.msg.hdr);
0867 }
0868
0869
0870
0871
0872
0873
0874
0875
0876 static struct qp_guest_endpoint *
0877 qp_guest_endpoint_create(struct vmci_handle handle,
0878 u32 peer,
0879 u32 flags,
0880 u64 produce_size,
0881 u64 consume_size,
0882 void *produce_q,
0883 void *consume_q)
0884 {
0885 int result;
0886 struct qp_guest_endpoint *entry;
0887
0888 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
0889 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
0890
0891 if (vmci_handle_is_invalid(handle)) {
0892 u32 context_id = vmci_get_context_id();
0893
0894 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
0895 }
0896
0897 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
0898 if (entry) {
0899 entry->qp.peer = peer;
0900 entry->qp.flags = flags;
0901 entry->qp.produce_size = produce_size;
0902 entry->qp.consume_size = consume_size;
0903 entry->qp.ref_count = 0;
0904 entry->num_ppns = num_ppns;
0905 entry->produce_q = produce_q;
0906 entry->consume_q = consume_q;
0907 INIT_LIST_HEAD(&entry->qp.list_item);
0908
0909
0910 result = vmci_resource_add(&entry->resource,
0911 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
0912 handle);
0913 entry->qp.handle = vmci_resource_handle(&entry->resource);
0914 if ((result != VMCI_SUCCESS) ||
0915 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
0916 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
0917 handle.context, handle.resource, result);
0918 kfree(entry);
0919 entry = NULL;
0920 }
0921 }
0922 return entry;
0923 }
0924
0925
0926
0927
0928 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
0929 {
0930 qp_free_ppn_set(&entry->ppn_set);
0931 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
0932 qp_free_queue(entry->produce_q, entry->qp.produce_size);
0933 qp_free_queue(entry->consume_q, entry->qp.consume_size);
0934
0935 vmci_resource_remove(&entry->resource);
0936
0937 kfree(entry);
0938 }
0939
0940
0941
0942
0943
0944 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
0945 {
0946 struct vmci_qp_alloc_msg *alloc_msg;
0947 size_t msg_size;
0948 size_t ppn_size;
0949 int result;
0950
0951 if (!entry || entry->num_ppns <= 2)
0952 return VMCI_ERROR_INVALID_ARGS;
0953
0954 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
0955 msg_size = sizeof(*alloc_msg) +
0956 (size_t) entry->num_ppns * ppn_size;
0957 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
0958 if (!alloc_msg)
0959 return VMCI_ERROR_NO_MEM;
0960
0961 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
0962 VMCI_QUEUEPAIR_ALLOC);
0963 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
0964 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
0965 alloc_msg->handle = entry->qp.handle;
0966 alloc_msg->peer = entry->qp.peer;
0967 alloc_msg->flags = entry->qp.flags;
0968 alloc_msg->produce_size = entry->qp.produce_size;
0969 alloc_msg->consume_size = entry->qp.consume_size;
0970 alloc_msg->num_ppns = entry->num_ppns;
0971
0972 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
0973 &entry->ppn_set);
0974 if (result == VMCI_SUCCESS)
0975 result = vmci_send_datagram(&alloc_msg->hdr);
0976
0977 kfree(alloc_msg);
0978
0979 return result;
0980 }
0981
0982
0983
0984
0985
0986 static int qp_detatch_hypercall(struct vmci_handle handle)
0987 {
0988 struct vmci_qp_detach_msg detach_msg;
0989
0990 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
0991 VMCI_QUEUEPAIR_DETACH);
0992 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
0993 detach_msg.hdr.payload_size = sizeof(handle);
0994 detach_msg.handle = handle;
0995
0996 return vmci_send_datagram(&detach_msg.hdr);
0997 }
0998
0999
1000
1001
1002 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1003 {
1004 if (entry)
1005 list_add(&entry->list_item, &qp_list->head);
1006 }
1007
1008
1009
1010
1011 static void qp_list_remove_entry(struct qp_list *qp_list,
1012 struct qp_entry *entry)
1013 {
1014 if (entry)
1015 list_del(&entry->list_item);
1016 }
1017
1018
1019
1020
1021
1022 static int qp_detatch_guest_work(struct vmci_handle handle)
1023 {
1024 int result;
1025 struct qp_guest_endpoint *entry;
1026 u32 ref_count = ~0;
1027
1028 mutex_lock(&qp_guest_endpoints.mutex);
1029
1030 entry = qp_guest_handle_to_entry(handle);
1031 if (!entry) {
1032 mutex_unlock(&qp_guest_endpoints.mutex);
1033 return VMCI_ERROR_NOT_FOUND;
1034 }
1035
1036 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1037 result = VMCI_SUCCESS;
1038
1039 if (entry->qp.ref_count > 1) {
1040 result = qp_notify_peer_local(false, handle);
1041
1042
1043
1044
1045
1046
1047 }
1048 } else {
1049 result = qp_detatch_hypercall(handle);
1050 if (result < VMCI_SUCCESS) {
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 mutex_unlock(&qp_guest_endpoints.mutex);
1062 return result;
1063 }
1064 }
1065
1066
1067
1068
1069
1070
1071 entry->qp.ref_count--;
1072 if (entry->qp.ref_count == 0)
1073 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1074
1075
1076 if (entry)
1077 ref_count = entry->qp.ref_count;
1078
1079 mutex_unlock(&qp_guest_endpoints.mutex);
1080
1081 if (ref_count == 0)
1082 qp_guest_endpoint_destroy(entry);
1083
1084 return result;
1085 }
1086
1087
1088
1089
1090
1091
1092 static int qp_alloc_guest_work(struct vmci_handle *handle,
1093 struct vmci_queue **produce_q,
1094 u64 produce_size,
1095 struct vmci_queue **consume_q,
1096 u64 consume_size,
1097 u32 peer,
1098 u32 flags,
1099 u32 priv_flags)
1100 {
1101 const u64 num_produce_pages =
1102 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1103 const u64 num_consume_pages =
1104 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1105 void *my_produce_q = NULL;
1106 void *my_consume_q = NULL;
1107 int result;
1108 struct qp_guest_endpoint *queue_pair_entry = NULL;
1109
1110 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1111 return VMCI_ERROR_NO_ACCESS;
1112
1113 mutex_lock(&qp_guest_endpoints.mutex);
1114
1115 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1116 if (queue_pair_entry) {
1117 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1118
1119 if (queue_pair_entry->qp.ref_count > 1) {
1120 pr_devel("Error attempting to attach more than once\n");
1121 result = VMCI_ERROR_UNAVAILABLE;
1122 goto error_keep_entry;
1123 }
1124
1125 if (queue_pair_entry->qp.produce_size != consume_size ||
1126 queue_pair_entry->qp.consume_size !=
1127 produce_size ||
1128 queue_pair_entry->qp.flags !=
1129 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1130 pr_devel("Error mismatched queue pair in local attach\n");
1131 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1132 goto error_keep_entry;
1133 }
1134
1135
1136
1137
1138
1139
1140 result = qp_notify_peer_local(true, *handle);
1141 if (result < VMCI_SUCCESS)
1142 goto error_keep_entry;
1143
1144 my_produce_q = queue_pair_entry->consume_q;
1145 my_consume_q = queue_pair_entry->produce_q;
1146 goto out;
1147 }
1148
1149 result = VMCI_ERROR_ALREADY_EXISTS;
1150 goto error_keep_entry;
1151 }
1152
1153 my_produce_q = qp_alloc_queue(produce_size, flags);
1154 if (!my_produce_q) {
1155 pr_warn("Error allocating pages for produce queue\n");
1156 result = VMCI_ERROR_NO_MEM;
1157 goto error;
1158 }
1159
1160 my_consume_q = qp_alloc_queue(consume_size, flags);
1161 if (!my_consume_q) {
1162 pr_warn("Error allocating pages for consume queue\n");
1163 result = VMCI_ERROR_NO_MEM;
1164 goto error;
1165 }
1166
1167 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1168 produce_size, consume_size,
1169 my_produce_q, my_consume_q);
1170 if (!queue_pair_entry) {
1171 pr_warn("Error allocating memory in %s\n", __func__);
1172 result = VMCI_ERROR_NO_MEM;
1173 goto error;
1174 }
1175
1176 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1177 num_consume_pages,
1178 &queue_pair_entry->ppn_set);
1179 if (result < VMCI_SUCCESS) {
1180 pr_warn("qp_alloc_ppn_set failed\n");
1181 goto error;
1182 }
1183
1184
1185
1186
1187
1188 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1189
1190 u32 context_id = vmci_get_context_id();
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 if (queue_pair_entry->qp.handle.context != context_id ||
1202 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1203 queue_pair_entry->qp.peer != context_id)) {
1204 result = VMCI_ERROR_NO_ACCESS;
1205 goto error;
1206 }
1207
1208 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1209 result = VMCI_ERROR_NOT_FOUND;
1210 goto error;
1211 }
1212 } else {
1213 result = qp_alloc_hypercall(queue_pair_entry);
1214 if (result < VMCI_SUCCESS) {
1215 pr_devel("qp_alloc_hypercall result = %d\n", result);
1216 goto error;
1217 }
1218 }
1219
1220 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1221 (struct vmci_queue *)my_consume_q);
1222
1223 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1224
1225 out:
1226 queue_pair_entry->qp.ref_count++;
1227 *handle = queue_pair_entry->qp.handle;
1228 *produce_q = (struct vmci_queue *)my_produce_q;
1229 *consume_q = (struct vmci_queue *)my_consume_q;
1230
1231
1232
1233
1234
1235
1236 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1237 queue_pair_entry->qp.ref_count == 1) {
1238 vmci_q_header_init((*produce_q)->q_header, *handle);
1239 vmci_q_header_init((*consume_q)->q_header, *handle);
1240 }
1241
1242 mutex_unlock(&qp_guest_endpoints.mutex);
1243
1244 return VMCI_SUCCESS;
1245
1246 error:
1247 mutex_unlock(&qp_guest_endpoints.mutex);
1248 if (queue_pair_entry) {
1249
1250 qp_guest_endpoint_destroy(queue_pair_entry);
1251 } else {
1252 qp_free_queue(my_produce_q, produce_size);
1253 qp_free_queue(my_consume_q, consume_size);
1254 }
1255 return result;
1256
1257 error_keep_entry:
1258
1259 mutex_unlock(&qp_guest_endpoints.mutex);
1260 return result;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 static int qp_broker_create(struct vmci_handle handle,
1282 u32 peer,
1283 u32 flags,
1284 u32 priv_flags,
1285 u64 produce_size,
1286 u64 consume_size,
1287 struct vmci_qp_page_store *page_store,
1288 struct vmci_ctx *context,
1289 vmci_event_release_cb wakeup_cb,
1290 void *client_data, struct qp_broker_entry **ent)
1291 {
1292 struct qp_broker_entry *entry = NULL;
1293 const u32 context_id = vmci_ctx_get_id(context);
1294 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1295 int result;
1296 u64 guest_produce_size;
1297 u64 guest_consume_size;
1298
1299
1300 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1301 return VMCI_ERROR_NOT_FOUND;
1302
1303
1304
1305
1306
1307 if (handle.context != context_id && handle.context != peer)
1308 return VMCI_ERROR_NO_ACCESS;
1309
1310 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1311 return VMCI_ERROR_DST_UNREACHABLE;
1312
1313
1314
1315
1316
1317 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1318 return VMCI_ERROR_NO_ACCESS;
1319
1320 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1321 if (!entry)
1322 return VMCI_ERROR_NO_MEM;
1323
1324 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1325
1326
1327
1328
1329
1330
1331
1332
1333 guest_produce_size = consume_size;
1334 guest_consume_size = produce_size;
1335 } else {
1336 guest_produce_size = produce_size;
1337 guest_consume_size = consume_size;
1338 }
1339
1340 entry->qp.handle = handle;
1341 entry->qp.peer = peer;
1342 entry->qp.flags = flags;
1343 entry->qp.produce_size = guest_produce_size;
1344 entry->qp.consume_size = guest_consume_size;
1345 entry->qp.ref_count = 1;
1346 entry->create_id = context_id;
1347 entry->attach_id = VMCI_INVALID_ID;
1348 entry->state = VMCIQPB_NEW;
1349 entry->require_trusted_attach =
1350 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1351 entry->created_by_trusted =
1352 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1353 entry->vmci_page_files = false;
1354 entry->wakeup_cb = wakeup_cb;
1355 entry->client_data = client_data;
1356 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1357 if (entry->produce_q == NULL) {
1358 result = VMCI_ERROR_NO_MEM;
1359 goto error;
1360 }
1361 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1362 if (entry->consume_q == NULL) {
1363 result = VMCI_ERROR_NO_MEM;
1364 goto error;
1365 }
1366
1367 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1368
1369 INIT_LIST_HEAD(&entry->qp.list_item);
1370
1371 if (is_local) {
1372 u8 *tmp;
1373
1374 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1375 PAGE_SIZE, GFP_KERNEL);
1376 if (entry->local_mem == NULL) {
1377 result = VMCI_ERROR_NO_MEM;
1378 goto error;
1379 }
1380 entry->state = VMCIQPB_CREATED_MEM;
1381 entry->produce_q->q_header = entry->local_mem;
1382 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1383 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1384 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1385 } else if (page_store) {
1386
1387
1388
1389
1390 result = qp_host_register_user_memory(page_store,
1391 entry->produce_q,
1392 entry->consume_q);
1393 if (result < VMCI_SUCCESS)
1394 goto error;
1395
1396 entry->state = VMCIQPB_CREATED_MEM;
1397 } else {
1398
1399
1400
1401
1402
1403
1404
1405 entry->state = VMCIQPB_CREATED_NO_MEM;
1406 }
1407
1408 qp_list_add_entry(&qp_broker_list, &entry->qp);
1409 if (ent != NULL)
1410 *ent = entry;
1411
1412
1413 result = vmci_resource_add(&entry->resource,
1414 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1415 handle);
1416 if (result != VMCI_SUCCESS) {
1417 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1418 handle.context, handle.resource, result);
1419 goto error;
1420 }
1421
1422 entry->qp.handle = vmci_resource_handle(&entry->resource);
1423 if (is_local) {
1424 vmci_q_header_init(entry->produce_q->q_header,
1425 entry->qp.handle);
1426 vmci_q_header_init(entry->consume_q->q_header,
1427 entry->qp.handle);
1428 }
1429
1430 vmci_ctx_qp_create(context, entry->qp.handle);
1431
1432 return VMCI_SUCCESS;
1433
1434 error:
1435 if (entry != NULL) {
1436 qp_host_free_queue(entry->produce_q, guest_produce_size);
1437 qp_host_free_queue(entry->consume_q, guest_consume_size);
1438 kfree(entry);
1439 }
1440
1441 return result;
1442 }
1443
1444
1445
1446
1447
1448
1449
1450 static int qp_notify_peer(bool attach,
1451 struct vmci_handle handle,
1452 u32 my_id,
1453 u32 peer_id)
1454 {
1455 int rv;
1456 struct vmci_event_qp ev;
1457
1458 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1459 peer_id == VMCI_INVALID_ID)
1460 return VMCI_ERROR_INVALID_ARGS;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1471 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1472 VMCI_CONTEXT_RESOURCE_ID);
1473 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1474 ev.msg.event_data.event = attach ?
1475 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1476 ev.payload.handle = handle;
1477 ev.payload.peer_id = my_id;
1478
1479 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1480 &ev.msg.hdr, false);
1481 if (rv < VMCI_SUCCESS)
1482 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1483 attach ? "ATTACH" : "DETACH", peer_id);
1484
1485 return rv;
1486 }
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509 static int qp_broker_attach(struct qp_broker_entry *entry,
1510 u32 peer,
1511 u32 flags,
1512 u32 priv_flags,
1513 u64 produce_size,
1514 u64 consume_size,
1515 struct vmci_qp_page_store *page_store,
1516 struct vmci_ctx *context,
1517 vmci_event_release_cb wakeup_cb,
1518 void *client_data,
1519 struct qp_broker_entry **ent)
1520 {
1521 const u32 context_id = vmci_ctx_get_id(context);
1522 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1523 int result;
1524
1525 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1526 entry->state != VMCIQPB_CREATED_MEM)
1527 return VMCI_ERROR_UNAVAILABLE;
1528
1529 if (is_local) {
1530 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1531 context_id != entry->create_id) {
1532 return VMCI_ERROR_INVALID_ARGS;
1533 }
1534 } else if (context_id == entry->create_id ||
1535 context_id == entry->attach_id) {
1536 return VMCI_ERROR_ALREADY_EXISTS;
1537 }
1538
1539 if (VMCI_CONTEXT_IS_VM(context_id) &&
1540 VMCI_CONTEXT_IS_VM(entry->create_id))
1541 return VMCI_ERROR_DST_UNREACHABLE;
1542
1543
1544
1545
1546
1547 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1548 !entry->created_by_trusted)
1549 return VMCI_ERROR_NO_ACCESS;
1550
1551
1552
1553
1554
1555 if (entry->require_trusted_attach &&
1556 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1557 return VMCI_ERROR_NO_ACCESS;
1558
1559
1560
1561
1562
1563 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1564 return VMCI_ERROR_NO_ACCESS;
1565
1566 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1567
1568
1569
1570
1571
1572 if (!vmci_ctx_supports_host_qp(context))
1573 return VMCI_ERROR_INVALID_RESOURCE;
1574
1575 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1576 struct vmci_ctx *create_context;
1577 bool supports_host_qp;
1578
1579
1580
1581
1582
1583
1584 create_context = vmci_ctx_get(entry->create_id);
1585 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1586 vmci_ctx_put(create_context);
1587
1588 if (!supports_host_qp)
1589 return VMCI_ERROR_INVALID_RESOURCE;
1590 }
1591
1592 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1593 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1594
1595 if (context_id != VMCI_HOST_CONTEXT_ID) {
1596
1597
1598
1599
1600
1601
1602 if (entry->qp.produce_size != produce_size ||
1603 entry->qp.consume_size != consume_size) {
1604 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1605 }
1606 } else if (entry->qp.produce_size != consume_size ||
1607 entry->qp.consume_size != produce_size) {
1608 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1609 }
1610
1611 if (context_id != VMCI_HOST_CONTEXT_ID) {
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1626 return VMCI_ERROR_INVALID_ARGS;
1627
1628 if (page_store != NULL) {
1629
1630
1631
1632
1633
1634
1635
1636 result = qp_host_register_user_memory(page_store,
1637 entry->produce_q,
1638 entry->consume_q);
1639 if (result < VMCI_SUCCESS)
1640 return result;
1641
1642 entry->state = VMCIQPB_ATTACHED_MEM;
1643 } else {
1644 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1645 }
1646 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1647
1648
1649
1650
1651
1652
1653
1654 return VMCI_ERROR_UNAVAILABLE;
1655 } else {
1656
1657 entry->state = VMCIQPB_ATTACHED_MEM;
1658 }
1659
1660 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1661 result =
1662 qp_notify_peer(true, entry->qp.handle, context_id,
1663 entry->create_id);
1664 if (result < VMCI_SUCCESS)
1665 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1666 entry->create_id, entry->qp.handle.context,
1667 entry->qp.handle.resource);
1668 }
1669
1670 entry->attach_id = context_id;
1671 entry->qp.ref_count++;
1672 if (wakeup_cb) {
1673 entry->wakeup_cb = wakeup_cb;
1674 entry->client_data = client_data;
1675 }
1676
1677
1678
1679
1680
1681 if (!is_local)
1682 vmci_ctx_qp_create(context, entry->qp.handle);
1683
1684 if (ent != NULL)
1685 *ent = entry;
1686
1687 return VMCI_SUCCESS;
1688 }
1689
1690
1691
1692
1693
1694 static int qp_broker_alloc(struct vmci_handle handle,
1695 u32 peer,
1696 u32 flags,
1697 u32 priv_flags,
1698 u64 produce_size,
1699 u64 consume_size,
1700 struct vmci_qp_page_store *page_store,
1701 struct vmci_ctx *context,
1702 vmci_event_release_cb wakeup_cb,
1703 void *client_data,
1704 struct qp_broker_entry **ent,
1705 bool *swap)
1706 {
1707 const u32 context_id = vmci_ctx_get_id(context);
1708 bool create;
1709 struct qp_broker_entry *entry = NULL;
1710 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1711 int result;
1712
1713 if (vmci_handle_is_invalid(handle) ||
1714 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1715 !(produce_size || consume_size) ||
1716 !context || context_id == VMCI_INVALID_ID ||
1717 handle.context == VMCI_INVALID_ID) {
1718 return VMCI_ERROR_INVALID_ARGS;
1719 }
1720
1721 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1722 return VMCI_ERROR_INVALID_ARGS;
1723
1724
1725
1726
1727
1728
1729 mutex_lock(&qp_broker_list.mutex);
1730
1731 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1732 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1733 context_id, handle.context, handle.resource);
1734 mutex_unlock(&qp_broker_list.mutex);
1735 return VMCI_ERROR_ALREADY_EXISTS;
1736 }
1737
1738 if (handle.resource != VMCI_INVALID_ID)
1739 entry = qp_broker_handle_to_entry(handle);
1740
1741 if (!entry) {
1742 create = true;
1743 result =
1744 qp_broker_create(handle, peer, flags, priv_flags,
1745 produce_size, consume_size, page_store,
1746 context, wakeup_cb, client_data, ent);
1747 } else {
1748 create = false;
1749 result =
1750 qp_broker_attach(entry, peer, flags, priv_flags,
1751 produce_size, consume_size, page_store,
1752 context, wakeup_cb, client_data, ent);
1753 }
1754
1755 mutex_unlock(&qp_broker_list.mutex);
1756
1757 if (swap)
1758 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1759 !(create && is_local);
1760
1761 return result;
1762 }
1763
1764
1765
1766
1767
1768 static int qp_alloc_host_work(struct vmci_handle *handle,
1769 struct vmci_queue **produce_q,
1770 u64 produce_size,
1771 struct vmci_queue **consume_q,
1772 u64 consume_size,
1773 u32 peer,
1774 u32 flags,
1775 u32 priv_flags,
1776 vmci_event_release_cb wakeup_cb,
1777 void *client_data)
1778 {
1779 struct vmci_handle new_handle;
1780 struct vmci_ctx *context;
1781 struct qp_broker_entry *entry;
1782 int result;
1783 bool swap;
1784
1785 if (vmci_handle_is_invalid(*handle)) {
1786 new_handle = vmci_make_handle(
1787 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1788 } else
1789 new_handle = *handle;
1790
1791 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1792 entry = NULL;
1793 result =
1794 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1795 produce_size, consume_size, NULL, context,
1796 wakeup_cb, client_data, &entry, &swap);
1797 if (result == VMCI_SUCCESS) {
1798 if (swap) {
1799
1800
1801
1802
1803
1804
1805 *produce_q = entry->consume_q;
1806 *consume_q = entry->produce_q;
1807 } else {
1808 *produce_q = entry->produce_q;
1809 *consume_q = entry->consume_q;
1810 }
1811
1812 *handle = vmci_resource_handle(&entry->resource);
1813 } else {
1814 *handle = VMCI_INVALID_HANDLE;
1815 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1816 result);
1817 }
1818 vmci_ctx_put(context);
1819 return result;
1820 }
1821
1822
1823
1824
1825
1826
1827 int vmci_qp_alloc(struct vmci_handle *handle,
1828 struct vmci_queue **produce_q,
1829 u64 produce_size,
1830 struct vmci_queue **consume_q,
1831 u64 consume_size,
1832 u32 peer,
1833 u32 flags,
1834 u32 priv_flags,
1835 bool guest_endpoint,
1836 vmci_event_release_cb wakeup_cb,
1837 void *client_data)
1838 {
1839 if (!handle || !produce_q || !consume_q ||
1840 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1841 return VMCI_ERROR_INVALID_ARGS;
1842
1843 if (guest_endpoint) {
1844 return qp_alloc_guest_work(handle, produce_q,
1845 produce_size, consume_q,
1846 consume_size, peer,
1847 flags, priv_flags);
1848 } else {
1849 return qp_alloc_host_work(handle, produce_q,
1850 produce_size, consume_q,
1851 consume_size, peer, flags,
1852 priv_flags, wakeup_cb, client_data);
1853 }
1854 }
1855
1856
1857
1858
1859
1860 static int qp_detatch_host_work(struct vmci_handle handle)
1861 {
1862 int result;
1863 struct vmci_ctx *context;
1864
1865 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1866
1867 result = vmci_qp_broker_detach(handle, context);
1868
1869 vmci_ctx_put(context);
1870 return result;
1871 }
1872
1873
1874
1875
1876
1877 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1878 {
1879 if (vmci_handle_is_invalid(handle))
1880 return VMCI_ERROR_INVALID_ARGS;
1881
1882 if (guest_endpoint)
1883 return qp_detatch_guest_work(handle);
1884 else
1885 return qp_detatch_host_work(handle);
1886 }
1887
1888
1889
1890
1891
1892 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1893 {
1894 if (!list_empty(&qp_list->head)) {
1895 struct qp_entry *entry =
1896 list_first_entry(&qp_list->head, struct qp_entry,
1897 list_item);
1898 return entry;
1899 }
1900
1901 return NULL;
1902 }
1903
1904 void vmci_qp_broker_exit(void)
1905 {
1906 struct qp_entry *entry;
1907 struct qp_broker_entry *be;
1908
1909 mutex_lock(&qp_broker_list.mutex);
1910
1911 while ((entry = qp_list_get_head(&qp_broker_list))) {
1912 be = (struct qp_broker_entry *)entry;
1913
1914 qp_list_remove_entry(&qp_broker_list, entry);
1915 kfree(be);
1916 }
1917
1918 mutex_unlock(&qp_broker_list.mutex);
1919 }
1920
1921
1922
1923
1924
1925
1926
1927
1928 int vmci_qp_broker_alloc(struct vmci_handle handle,
1929 u32 peer,
1930 u32 flags,
1931 u32 priv_flags,
1932 u64 produce_size,
1933 u64 consume_size,
1934 struct vmci_qp_page_store *page_store,
1935 struct vmci_ctx *context)
1936 {
1937 if (!QP_SIZES_ARE_VALID(produce_size, consume_size))
1938 return VMCI_ERROR_NO_RESOURCES;
1939
1940 return qp_broker_alloc(handle, peer, flags, priv_flags,
1941 produce_size, consume_size,
1942 page_store, context, NULL, NULL, NULL, NULL);
1943 }
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 int vmci_qp_broker_set_page_store(struct vmci_handle handle,
1962 u64 produce_uva,
1963 u64 consume_uva,
1964 struct vmci_ctx *context)
1965 {
1966 struct qp_broker_entry *entry;
1967 int result;
1968 const u32 context_id = vmci_ctx_get_id(context);
1969
1970 if (vmci_handle_is_invalid(handle) || !context ||
1971 context_id == VMCI_INVALID_ID)
1972 return VMCI_ERROR_INVALID_ARGS;
1973
1974
1975
1976
1977
1978
1979 if (produce_uva == 0 || consume_uva == 0)
1980 return VMCI_ERROR_INVALID_ARGS;
1981
1982 mutex_lock(&qp_broker_list.mutex);
1983
1984 if (!vmci_ctx_qp_exists(context, handle)) {
1985 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
1986 context_id, handle.context, handle.resource);
1987 result = VMCI_ERROR_NOT_FOUND;
1988 goto out;
1989 }
1990
1991 entry = qp_broker_handle_to_entry(handle);
1992 if (!entry) {
1993 result = VMCI_ERROR_NOT_FOUND;
1994 goto out;
1995 }
1996
1997
1998
1999
2000
2001
2002
2003 if (entry->create_id != context_id &&
2004 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2005 entry->attach_id != context_id)) {
2006 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2007 goto out;
2008 }
2009
2010 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2011 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2012 result = VMCI_ERROR_UNAVAILABLE;
2013 goto out;
2014 }
2015
2016 result = qp_host_get_user_memory(produce_uva, consume_uva,
2017 entry->produce_q, entry->consume_q);
2018 if (result < VMCI_SUCCESS)
2019 goto out;
2020
2021 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2022 if (result < VMCI_SUCCESS) {
2023 qp_host_unregister_user_memory(entry->produce_q,
2024 entry->consume_q);
2025 goto out;
2026 }
2027
2028 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2029 entry->state = VMCIQPB_CREATED_MEM;
2030 else
2031 entry->state = VMCIQPB_ATTACHED_MEM;
2032
2033 entry->vmci_page_files = true;
2034
2035 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2036 result =
2037 qp_notify_peer(true, handle, context_id, entry->create_id);
2038 if (result < VMCI_SUCCESS) {
2039 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2040 entry->create_id, entry->qp.handle.context,
2041 entry->qp.handle.resource);
2042 }
2043 }
2044
2045 result = VMCI_SUCCESS;
2046 out:
2047 mutex_unlock(&qp_broker_list.mutex);
2048 return result;
2049 }
2050
2051
2052
2053
2054
2055
2056 static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2057 {
2058 entry->produce_q->saved_header = NULL;
2059 entry->consume_q->saved_header = NULL;
2060 }
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2081 {
2082 struct qp_broker_entry *entry;
2083 const u32 context_id = vmci_ctx_get_id(context);
2084 u32 peer_id;
2085 bool is_local = false;
2086 int result;
2087
2088 if (vmci_handle_is_invalid(handle) || !context ||
2089 context_id == VMCI_INVALID_ID) {
2090 return VMCI_ERROR_INVALID_ARGS;
2091 }
2092
2093 mutex_lock(&qp_broker_list.mutex);
2094
2095 if (!vmci_ctx_qp_exists(context, handle)) {
2096 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2097 context_id, handle.context, handle.resource);
2098 result = VMCI_ERROR_NOT_FOUND;
2099 goto out;
2100 }
2101
2102 entry = qp_broker_handle_to_entry(handle);
2103 if (!entry) {
2104 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2105 context_id, handle.context, handle.resource);
2106 result = VMCI_ERROR_NOT_FOUND;
2107 goto out;
2108 }
2109
2110 if (context_id != entry->create_id && context_id != entry->attach_id) {
2111 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2112 goto out;
2113 }
2114
2115 if (context_id == entry->create_id) {
2116 peer_id = entry->attach_id;
2117 entry->create_id = VMCI_INVALID_ID;
2118 } else {
2119 peer_id = entry->create_id;
2120 entry->attach_id = VMCI_INVALID_ID;
2121 }
2122 entry->qp.ref_count--;
2123
2124 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2125
2126 if (context_id != VMCI_HOST_CONTEXT_ID) {
2127 bool headers_mapped;
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 qp_acquire_queue_mutex(entry->produce_q);
2138 headers_mapped = entry->produce_q->q_header ||
2139 entry->consume_q->q_header;
2140 if (QPBROKERSTATE_HAS_MEM(entry)) {
2141 result =
2142 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2143 entry->produce_q,
2144 entry->consume_q);
2145 if (result < VMCI_SUCCESS)
2146 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2147 handle.context, handle.resource,
2148 result);
2149
2150 qp_host_unregister_user_memory(entry->produce_q,
2151 entry->consume_q);
2152
2153 }
2154
2155 if (!headers_mapped)
2156 qp_reset_saved_headers(entry);
2157
2158 qp_release_queue_mutex(entry->produce_q);
2159
2160 if (!headers_mapped && entry->wakeup_cb)
2161 entry->wakeup_cb(entry->client_data);
2162
2163 } else {
2164 if (entry->wakeup_cb) {
2165 entry->wakeup_cb = NULL;
2166 entry->client_data = NULL;
2167 }
2168 }
2169
2170 if (entry->qp.ref_count == 0) {
2171 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2172
2173 if (is_local)
2174 kfree(entry->local_mem);
2175
2176 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2177 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2178 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2179
2180 vmci_resource_remove(&entry->resource);
2181
2182 kfree(entry);
2183
2184 vmci_ctx_qp_destroy(context, handle);
2185 } else {
2186 qp_notify_peer(false, handle, context_id, peer_id);
2187 if (context_id == VMCI_HOST_CONTEXT_ID &&
2188 QPBROKERSTATE_HAS_MEM(entry)) {
2189 entry->state = VMCIQPB_SHUTDOWN_MEM;
2190 } else {
2191 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2192 }
2193
2194 if (!is_local)
2195 vmci_ctx_qp_destroy(context, handle);
2196
2197 }
2198 result = VMCI_SUCCESS;
2199 out:
2200 mutex_unlock(&qp_broker_list.mutex);
2201 return result;
2202 }
2203
2204
2205
2206
2207
2208
2209
2210 int vmci_qp_broker_map(struct vmci_handle handle,
2211 struct vmci_ctx *context,
2212 u64 guest_mem)
2213 {
2214 struct qp_broker_entry *entry;
2215 const u32 context_id = vmci_ctx_get_id(context);
2216 int result;
2217
2218 if (vmci_handle_is_invalid(handle) || !context ||
2219 context_id == VMCI_INVALID_ID)
2220 return VMCI_ERROR_INVALID_ARGS;
2221
2222 mutex_lock(&qp_broker_list.mutex);
2223
2224 if (!vmci_ctx_qp_exists(context, handle)) {
2225 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2226 context_id, handle.context, handle.resource);
2227 result = VMCI_ERROR_NOT_FOUND;
2228 goto out;
2229 }
2230
2231 entry = qp_broker_handle_to_entry(handle);
2232 if (!entry) {
2233 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2234 context_id, handle.context, handle.resource);
2235 result = VMCI_ERROR_NOT_FOUND;
2236 goto out;
2237 }
2238
2239 if (context_id != entry->create_id && context_id != entry->attach_id) {
2240 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2241 goto out;
2242 }
2243
2244 result = VMCI_SUCCESS;
2245
2246 if (context_id != VMCI_HOST_CONTEXT_ID &&
2247 !QPBROKERSTATE_HAS_MEM(entry)) {
2248 struct vmci_qp_page_store page_store;
2249
2250 page_store.pages = guest_mem;
2251 page_store.len = QPE_NUM_PAGES(entry->qp);
2252
2253 qp_acquire_queue_mutex(entry->produce_q);
2254 qp_reset_saved_headers(entry);
2255 result =
2256 qp_host_register_user_memory(&page_store,
2257 entry->produce_q,
2258 entry->consume_q);
2259 qp_release_queue_mutex(entry->produce_q);
2260 if (result == VMCI_SUCCESS) {
2261
2262
2263 entry->state++;
2264
2265 if (entry->wakeup_cb)
2266 entry->wakeup_cb(entry->client_data);
2267 }
2268 }
2269
2270 out:
2271 mutex_unlock(&qp_broker_list.mutex);
2272 return result;
2273 }
2274
2275
2276
2277
2278
2279
2280
2281
2282 static int qp_save_headers(struct qp_broker_entry *entry)
2283 {
2284 int result;
2285
2286 if (entry->produce_q->saved_header != NULL &&
2287 entry->consume_q->saved_header != NULL) {
2288
2289
2290
2291
2292
2293
2294 return VMCI_SUCCESS;
2295 }
2296
2297 if (NULL == entry->produce_q->q_header ||
2298 NULL == entry->consume_q->q_header) {
2299 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2300 if (result < VMCI_SUCCESS)
2301 return result;
2302 }
2303
2304 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2305 sizeof(entry->saved_produce_q));
2306 entry->produce_q->saved_header = &entry->saved_produce_q;
2307 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2308 sizeof(entry->saved_consume_q));
2309 entry->consume_q->saved_header = &entry->saved_consume_q;
2310
2311 return VMCI_SUCCESS;
2312 }
2313
2314
2315
2316
2317
2318
2319
2320 int vmci_qp_broker_unmap(struct vmci_handle handle,
2321 struct vmci_ctx *context,
2322 u32 gid)
2323 {
2324 struct qp_broker_entry *entry;
2325 const u32 context_id = vmci_ctx_get_id(context);
2326 int result;
2327
2328 if (vmci_handle_is_invalid(handle) || !context ||
2329 context_id == VMCI_INVALID_ID)
2330 return VMCI_ERROR_INVALID_ARGS;
2331
2332 mutex_lock(&qp_broker_list.mutex);
2333
2334 if (!vmci_ctx_qp_exists(context, handle)) {
2335 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2336 context_id, handle.context, handle.resource);
2337 result = VMCI_ERROR_NOT_FOUND;
2338 goto out;
2339 }
2340
2341 entry = qp_broker_handle_to_entry(handle);
2342 if (!entry) {
2343 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2344 context_id, handle.context, handle.resource);
2345 result = VMCI_ERROR_NOT_FOUND;
2346 goto out;
2347 }
2348
2349 if (context_id != entry->create_id && context_id != entry->attach_id) {
2350 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2351 goto out;
2352 }
2353
2354 if (context_id != VMCI_HOST_CONTEXT_ID &&
2355 QPBROKERSTATE_HAS_MEM(entry)) {
2356 qp_acquire_queue_mutex(entry->produce_q);
2357 result = qp_save_headers(entry);
2358 if (result < VMCI_SUCCESS)
2359 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2360 handle.context, handle.resource, result);
2361
2362 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2363
2364
2365
2366
2367
2368
2369
2370
2371 qp_host_unregister_user_memory(entry->produce_q,
2372 entry->consume_q);
2373
2374
2375
2376
2377 entry->state--;
2378
2379 qp_release_queue_mutex(entry->produce_q);
2380 }
2381
2382 result = VMCI_SUCCESS;
2383
2384 out:
2385 mutex_unlock(&qp_broker_list.mutex);
2386 return result;
2387 }
2388
2389
2390
2391
2392
2393
2394
2395 void vmci_qp_guest_endpoints_exit(void)
2396 {
2397 struct qp_entry *entry;
2398 struct qp_guest_endpoint *ep;
2399
2400 mutex_lock(&qp_guest_endpoints.mutex);
2401
2402 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2403 ep = (struct qp_guest_endpoint *)entry;
2404
2405
2406 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2407 qp_detatch_hypercall(entry->handle);
2408
2409
2410 entry->ref_count = 0;
2411 qp_list_remove_entry(&qp_guest_endpoints, entry);
2412
2413 qp_guest_endpoint_destroy(ep);
2414 }
2415
2416 mutex_unlock(&qp_guest_endpoints.mutex);
2417 }
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 static void qp_lock(const struct vmci_qp *qpair)
2428 {
2429 qp_acquire_queue_mutex(qpair->produce_q);
2430 }
2431
2432
2433
2434
2435
2436 static void qp_unlock(const struct vmci_qp *qpair)
2437 {
2438 qp_release_queue_mutex(qpair->produce_q);
2439 }
2440
2441
2442
2443
2444
2445 static int qp_map_queue_headers(struct vmci_queue *produce_q,
2446 struct vmci_queue *consume_q)
2447 {
2448 int result;
2449
2450 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2451 result = qp_host_map_queues(produce_q, consume_q);
2452 if (result < VMCI_SUCCESS)
2453 return (produce_q->saved_header &&
2454 consume_q->saved_header) ?
2455 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2456 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2457 }
2458
2459 return VMCI_SUCCESS;
2460 }
2461
2462
2463
2464
2465
2466
2467
2468 static int qp_get_queue_headers(const struct vmci_qp *qpair,
2469 struct vmci_queue_header **produce_q_header,
2470 struct vmci_queue_header **consume_q_header)
2471 {
2472 int result;
2473
2474 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2475 if (result == VMCI_SUCCESS) {
2476 *produce_q_header = qpair->produce_q->q_header;
2477 *consume_q_header = qpair->consume_q->q_header;
2478 } else if (qpair->produce_q->saved_header &&
2479 qpair->consume_q->saved_header) {
2480 *produce_q_header = qpair->produce_q->saved_header;
2481 *consume_q_header = qpair->consume_q->saved_header;
2482 result = VMCI_SUCCESS;
2483 }
2484
2485 return result;
2486 }
2487
2488
2489
2490
2491
2492
2493 static int qp_wakeup_cb(void *client_data)
2494 {
2495 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2496
2497 qp_lock(qpair);
2498 while (qpair->blocked > 0) {
2499 qpair->blocked--;
2500 qpair->generation++;
2501 wake_up(&qpair->event);
2502 }
2503 qp_unlock(qpair);
2504
2505 return VMCI_SUCCESS;
2506 }
2507
2508
2509
2510
2511
2512
2513 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2514 {
2515 unsigned int generation;
2516
2517 qpair->blocked++;
2518 generation = qpair->generation;
2519 qp_unlock(qpair);
2520 wait_event(qpair->event, generation != qpair->generation);
2521 qp_lock(qpair);
2522
2523 return true;
2524 }
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2539 struct vmci_queue *consume_q,
2540 const u64 produce_q_size,
2541 struct iov_iter *from)
2542 {
2543 s64 free_space;
2544 u64 tail;
2545 size_t buf_size = iov_iter_count(from);
2546 size_t written;
2547 ssize_t result;
2548
2549 result = qp_map_queue_headers(produce_q, consume_q);
2550 if (unlikely(result != VMCI_SUCCESS))
2551 return result;
2552
2553 free_space = vmci_q_header_free_space(produce_q->q_header,
2554 consume_q->q_header,
2555 produce_q_size);
2556 if (free_space == 0)
2557 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2558
2559 if (free_space < VMCI_SUCCESS)
2560 return (ssize_t) free_space;
2561
2562 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2563 tail = vmci_q_header_producer_tail(produce_q->q_header);
2564 if (likely(tail + written < produce_q_size)) {
2565 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2566 } else {
2567
2568
2569 const size_t tmp = (size_t) (produce_q_size - tail);
2570
2571 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2572 if (result >= VMCI_SUCCESS)
2573 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2574 written - tmp);
2575 }
2576
2577 if (result < VMCI_SUCCESS)
2578 return result;
2579
2580
2581
2582
2583
2584 virt_wmb();
2585
2586 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2587 produce_q_size);
2588 return written;
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2605 struct vmci_queue *consume_q,
2606 const u64 consume_q_size,
2607 struct iov_iter *to,
2608 bool update_consumer)
2609 {
2610 size_t buf_size = iov_iter_count(to);
2611 s64 buf_ready;
2612 u64 head;
2613 size_t read;
2614 ssize_t result;
2615
2616 result = qp_map_queue_headers(produce_q, consume_q);
2617 if (unlikely(result != VMCI_SUCCESS))
2618 return result;
2619
2620 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2621 produce_q->q_header,
2622 consume_q_size);
2623 if (buf_ready == 0)
2624 return VMCI_ERROR_QUEUEPAIR_NODATA;
2625
2626 if (buf_ready < VMCI_SUCCESS)
2627 return (ssize_t) buf_ready;
2628
2629
2630
2631
2632
2633 virt_rmb();
2634
2635 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2636 head = vmci_q_header_consumer_head(produce_q->q_header);
2637 if (likely(head + read < consume_q_size)) {
2638 result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2639 } else {
2640
2641
2642 const size_t tmp = (size_t) (consume_q_size - head);
2643
2644 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2645 if (result >= VMCI_SUCCESS)
2646 result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2647 read - tmp);
2648
2649 }
2650
2651 if (result < VMCI_SUCCESS)
2652 return result;
2653
2654 if (update_consumer)
2655 vmci_q_header_add_consumer_head(produce_q->q_header,
2656 read, consume_q_size);
2657
2658 return read;
2659 }
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 int vmci_qpair_alloc(struct vmci_qp **qpair,
2678 struct vmci_handle *handle,
2679 u64 produce_qsize,
2680 u64 consume_qsize,
2681 u32 peer,
2682 u32 flags,
2683 u32 priv_flags)
2684 {
2685 struct vmci_qp *my_qpair;
2686 int retval;
2687 struct vmci_handle src = VMCI_INVALID_HANDLE;
2688 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2689 enum vmci_route route;
2690 vmci_event_release_cb wakeup_cb;
2691 void *client_data;
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710 if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize))
2711 return VMCI_ERROR_NO_RESOURCES;
2712
2713 retval = vmci_route(&src, &dst, false, &route);
2714 if (retval < VMCI_SUCCESS)
2715 route = vmci_guest_code_active() ?
2716 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2717
2718 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2719 pr_devel("NONBLOCK OR PINNED set");
2720 return VMCI_ERROR_INVALID_ARGS;
2721 }
2722
2723 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2724 if (!my_qpair)
2725 return VMCI_ERROR_NO_MEM;
2726
2727 my_qpair->produce_q_size = produce_qsize;
2728 my_qpair->consume_q_size = consume_qsize;
2729 my_qpair->peer = peer;
2730 my_qpair->flags = flags;
2731 my_qpair->priv_flags = priv_flags;
2732
2733 wakeup_cb = NULL;
2734 client_data = NULL;
2735
2736 if (VMCI_ROUTE_AS_HOST == route) {
2737 my_qpair->guest_endpoint = false;
2738 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2739 my_qpair->blocked = 0;
2740 my_qpair->generation = 0;
2741 init_waitqueue_head(&my_qpair->event);
2742 wakeup_cb = qp_wakeup_cb;
2743 client_data = (void *)my_qpair;
2744 }
2745 } else {
2746 my_qpair->guest_endpoint = true;
2747 }
2748
2749 retval = vmci_qp_alloc(handle,
2750 &my_qpair->produce_q,
2751 my_qpair->produce_q_size,
2752 &my_qpair->consume_q,
2753 my_qpair->consume_q_size,
2754 my_qpair->peer,
2755 my_qpair->flags,
2756 my_qpair->priv_flags,
2757 my_qpair->guest_endpoint,
2758 wakeup_cb, client_data);
2759
2760 if (retval < VMCI_SUCCESS) {
2761 kfree(my_qpair);
2762 return retval;
2763 }
2764
2765 *qpair = my_qpair;
2766 my_qpair->handle = *handle;
2767
2768 return retval;
2769 }
2770 EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780 int vmci_qpair_detach(struct vmci_qp **qpair)
2781 {
2782 int result;
2783 struct vmci_qp *old_qpair;
2784
2785 if (!qpair || !(*qpair))
2786 return VMCI_ERROR_INVALID_ARGS;
2787
2788 old_qpair = *qpair;
2789 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801 memset(old_qpair, 0, sizeof(*old_qpair));
2802 old_qpair->handle = VMCI_INVALID_HANDLE;
2803 old_qpair->peer = VMCI_INVALID_ID;
2804 kfree(old_qpair);
2805 *qpair = NULL;
2806
2807 return result;
2808 }
2809 EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2821 u64 *producer_tail,
2822 u64 *consumer_head)
2823 {
2824 struct vmci_queue_header *produce_q_header;
2825 struct vmci_queue_header *consume_q_header;
2826 int result;
2827
2828 if (!qpair)
2829 return VMCI_ERROR_INVALID_ARGS;
2830
2831 qp_lock(qpair);
2832 result =
2833 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2834 if (result == VMCI_SUCCESS)
2835 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2836 producer_tail, consumer_head);
2837 qp_unlock(qpair);
2838
2839 if (result == VMCI_SUCCESS &&
2840 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2841 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2842 return VMCI_ERROR_INVALID_SIZE;
2843
2844 return result;
2845 }
2846 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2858 u64 *consumer_tail,
2859 u64 *producer_head)
2860 {
2861 struct vmci_queue_header *produce_q_header;
2862 struct vmci_queue_header *consume_q_header;
2863 int result;
2864
2865 if (!qpair)
2866 return VMCI_ERROR_INVALID_ARGS;
2867
2868 qp_lock(qpair);
2869 result =
2870 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2871 if (result == VMCI_SUCCESS)
2872 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2873 consumer_tail, producer_head);
2874 qp_unlock(qpair);
2875
2876 if (result == VMCI_SUCCESS &&
2877 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2878 (producer_head && *producer_head >= qpair->consume_q_size)))
2879 return VMCI_ERROR_INVALID_SIZE;
2880
2881 return result;
2882 }
2883 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2895 {
2896 struct vmci_queue_header *produce_q_header;
2897 struct vmci_queue_header *consume_q_header;
2898 s64 result;
2899
2900 if (!qpair)
2901 return VMCI_ERROR_INVALID_ARGS;
2902
2903 qp_lock(qpair);
2904 result =
2905 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2906 if (result == VMCI_SUCCESS)
2907 result = vmci_q_header_free_space(produce_q_header,
2908 consume_q_header,
2909 qpair->produce_q_size);
2910 else
2911 result = 0;
2912
2913 qp_unlock(qpair);
2914
2915 return result;
2916 }
2917 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2929 {
2930 struct vmci_queue_header *produce_q_header;
2931 struct vmci_queue_header *consume_q_header;
2932 s64 result;
2933
2934 if (!qpair)
2935 return VMCI_ERROR_INVALID_ARGS;
2936
2937 qp_lock(qpair);
2938 result =
2939 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2940 if (result == VMCI_SUCCESS)
2941 result = vmci_q_header_free_space(consume_q_header,
2942 produce_q_header,
2943 qpair->consume_q_size);
2944 else
2945 result = 0;
2946
2947 qp_unlock(qpair);
2948
2949 return result;
2950 }
2951 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
2964 {
2965 struct vmci_queue_header *produce_q_header;
2966 struct vmci_queue_header *consume_q_header;
2967 s64 result;
2968
2969 if (!qpair)
2970 return VMCI_ERROR_INVALID_ARGS;
2971
2972 qp_lock(qpair);
2973 result =
2974 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2975 if (result == VMCI_SUCCESS)
2976 result = vmci_q_header_buf_ready(produce_q_header,
2977 consume_q_header,
2978 qpair->produce_q_size);
2979 else
2980 result = 0;
2981
2982 qp_unlock(qpair);
2983
2984 return result;
2985 }
2986 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
2999 {
3000 struct vmci_queue_header *produce_q_header;
3001 struct vmci_queue_header *consume_q_header;
3002 s64 result;
3003
3004 if (!qpair)
3005 return VMCI_ERROR_INVALID_ARGS;
3006
3007 qp_lock(qpair);
3008 result =
3009 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
3010 if (result == VMCI_SUCCESS)
3011 result = vmci_q_header_buf_ready(consume_q_header,
3012 produce_q_header,
3013 qpair->consume_q_size);
3014 else
3015 result = 0;
3016
3017 qp_unlock(qpair);
3018
3019 return result;
3020 }
3021 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3034 const void *buf,
3035 size_t buf_size,
3036 int buf_type)
3037 {
3038 ssize_t result;
3039 struct iov_iter from;
3040 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3041
3042 if (!qpair || !buf)
3043 return VMCI_ERROR_INVALID_ARGS;
3044
3045 iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
3046
3047 qp_lock(qpair);
3048
3049 do {
3050 result = qp_enqueue_locked(qpair->produce_q,
3051 qpair->consume_q,
3052 qpair->produce_q_size,
3053 &from);
3054
3055 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3056 !qp_wait_for_ready_queue(qpair))
3057 result = VMCI_ERROR_WOULD_BLOCK;
3058
3059 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3060
3061 qp_unlock(qpair);
3062
3063 return result;
3064 }
3065 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3078 void *buf,
3079 size_t buf_size,
3080 int buf_type)
3081 {
3082 ssize_t result;
3083 struct iov_iter to;
3084 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3085
3086 if (!qpair || !buf)
3087 return VMCI_ERROR_INVALID_ARGS;
3088
3089 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3090
3091 qp_lock(qpair);
3092
3093 do {
3094 result = qp_dequeue_locked(qpair->produce_q,
3095 qpair->consume_q,
3096 qpair->consume_q_size,
3097 &to, true);
3098
3099 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3100 !qp_wait_for_ready_queue(qpair))
3101 result = VMCI_ERROR_WOULD_BLOCK;
3102
3103 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3104
3105 qp_unlock(qpair);
3106
3107 return result;
3108 }
3109 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122 ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3123 void *buf,
3124 size_t buf_size,
3125 int buf_type)
3126 {
3127 struct iov_iter to;
3128 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3129 ssize_t result;
3130
3131 if (!qpair || !buf)
3132 return VMCI_ERROR_INVALID_ARGS;
3133
3134 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3135
3136 qp_lock(qpair);
3137
3138 do {
3139 result = qp_dequeue_locked(qpair->produce_q,
3140 qpair->consume_q,
3141 qpair->consume_q_size,
3142 &to, false);
3143
3144 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3145 !qp_wait_for_ready_queue(qpair))
3146 result = VMCI_ERROR_WOULD_BLOCK;
3147
3148 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3149
3150 qp_unlock(qpair);
3151
3152 return result;
3153 }
3154 EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3168 struct msghdr *msg,
3169 size_t iov_size,
3170 int buf_type)
3171 {
3172 ssize_t result;
3173
3174 if (!qpair)
3175 return VMCI_ERROR_INVALID_ARGS;
3176
3177 qp_lock(qpair);
3178
3179 do {
3180 result = qp_enqueue_locked(qpair->produce_q,
3181 qpair->consume_q,
3182 qpair->produce_q_size,
3183 &msg->msg_iter);
3184
3185 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3186 !qp_wait_for_ready_queue(qpair))
3187 result = VMCI_ERROR_WOULD_BLOCK;
3188
3189 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3190
3191 qp_unlock(qpair);
3192
3193 return result;
3194 }
3195 EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3209 struct msghdr *msg,
3210 size_t iov_size,
3211 int buf_type)
3212 {
3213 ssize_t result;
3214
3215 if (!qpair)
3216 return VMCI_ERROR_INVALID_ARGS;
3217
3218 qp_lock(qpair);
3219
3220 do {
3221 result = qp_dequeue_locked(qpair->produce_q,
3222 qpair->consume_q,
3223 qpair->consume_q_size,
3224 &msg->msg_iter, true);
3225
3226 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3227 !qp_wait_for_ready_queue(qpair))
3228 result = VMCI_ERROR_WOULD_BLOCK;
3229
3230 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3231
3232 qp_unlock(qpair);
3233
3234 return result;
3235 }
3236 EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3251 struct msghdr *msg,
3252 size_t iov_size,
3253 int buf_type)
3254 {
3255 ssize_t result;
3256
3257 if (!qpair)
3258 return VMCI_ERROR_INVALID_ARGS;
3259
3260 qp_lock(qpair);
3261
3262 do {
3263 result = qp_dequeue_locked(qpair->produce_q,
3264 qpair->consume_q,
3265 qpair->consume_q_size,
3266 &msg->msg_iter, false);
3267
3268 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3269 !qp_wait_for_ready_queue(qpair))
3270 result = VMCI_ERROR_WOULD_BLOCK;
3271
3272 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3273
3274 qp_unlock(qpair);
3275 return result;
3276 }
3277 EXPORT_SYMBOL_GPL(vmci_qpair_peekv);