0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/vmw_vmci_defs.h>
0009 #include <linux/vmw_vmci_api.h>
0010 #include <linux/miscdevice.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/highmem.h>
0013 #include <linux/atomic.h>
0014 #include <linux/kernel.h>
0015 #include <linux/module.h>
0016 #include <linux/mutex.h>
0017 #include <linux/sched.h>
0018 #include <linux/cred.h>
0019 #include <linux/slab.h>
0020 #include <linux/file.h>
0021 #include <linux/init.h>
0022 #include <linux/poll.h>
0023 #include <linux/pci.h>
0024 #include <linux/smp.h>
0025 #include <linux/fs.h>
0026 #include <linux/io.h>
0027
0028 #include "vmci_handle_array.h"
0029 #include "vmci_queue_pair.h"
0030 #include "vmci_datagram.h"
0031 #include "vmci_doorbell.h"
0032 #include "vmci_resource.h"
0033 #include "vmci_context.h"
0034 #include "vmci_driver.h"
0035 #include "vmci_event.h"
0036
0037 #define VMCI_UTIL_NUM_RESOURCES 1
0038
0039 enum {
0040 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
0041 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
0042 };
0043
0044 enum {
0045 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
0046 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
0047 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
0048 };
0049
0050
0051
0052
0053
0054 struct vmci_init_blk {
0055 u32 cid;
0056 u32 flags;
0057 };
0058
0059
0060 struct vmci_qp_alloc_info_vmvm {
0061 struct vmci_handle handle;
0062 u32 peer;
0063 u32 flags;
0064 u64 produce_size;
0065 u64 consume_size;
0066 u64 produce_page_file;
0067 u64 consume_page_file;
0068 u64 produce_page_file_size;
0069 u64 consume_page_file_size;
0070 s32 result;
0071 u32 _pad;
0072 };
0073
0074
0075 struct vmci_set_notify_info {
0076 u64 notify_uva;
0077 s32 result;
0078 u32 _pad;
0079 };
0080
0081
0082
0083
0084 struct vmci_host_dev {
0085 struct vmci_ctx *context;
0086 int user_version;
0087 enum vmci_obj_type ct_type;
0088 struct mutex lock;
0089 };
0090
0091 static struct vmci_ctx *host_context;
0092 static bool vmci_host_device_initialized;
0093 static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 bool vmci_host_code_active(void)
0105 {
0106 return vmci_host_device_initialized &&
0107 (!vmci_guest_code_active() ||
0108 atomic_read(&vmci_host_active_users) > 0);
0109 }
0110
0111 int vmci_host_users(void)
0112 {
0113 return atomic_read(&vmci_host_active_users);
0114 }
0115
0116
0117
0118
0119 static int vmci_host_open(struct inode *inode, struct file *filp)
0120 {
0121 struct vmci_host_dev *vmci_host_dev;
0122
0123 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
0124 if (vmci_host_dev == NULL)
0125 return -ENOMEM;
0126
0127 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
0128 mutex_init(&vmci_host_dev->lock);
0129 filp->private_data = vmci_host_dev;
0130
0131 return 0;
0132 }
0133
0134
0135
0136
0137
0138 static int vmci_host_close(struct inode *inode, struct file *filp)
0139 {
0140 struct vmci_host_dev *vmci_host_dev = filp->private_data;
0141
0142 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
0143 vmci_ctx_destroy(vmci_host_dev->context);
0144 vmci_host_dev->context = NULL;
0145
0146
0147
0148
0149
0150
0151
0152 atomic_dec(&vmci_host_active_users);
0153 }
0154 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
0155
0156 kfree(vmci_host_dev);
0157 filp->private_data = NULL;
0158 return 0;
0159 }
0160
0161
0162
0163
0164
0165 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
0166 {
0167 struct vmci_host_dev *vmci_host_dev = filp->private_data;
0168 struct vmci_ctx *context = vmci_host_dev->context;
0169 __poll_t mask = 0;
0170
0171 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
0172
0173 if (wait)
0174 poll_wait(filp, &context->host_context.wait_queue,
0175 wait);
0176
0177 spin_lock(&context->lock);
0178 if (context->pending_datagrams > 0 ||
0179 vmci_handle_arr_get_size(
0180 context->pending_doorbell_array) > 0) {
0181 mask = EPOLLIN;
0182 }
0183 spin_unlock(&context->lock);
0184 }
0185 return mask;
0186 }
0187
0188
0189
0190
0191
0192
0193
0194 static int drv_cp_harray_to_user(void __user *user_buf_uva,
0195 u64 *user_buf_size,
0196 struct vmci_handle_arr *handle_array,
0197 int *retval)
0198 {
0199 u32 array_size = 0;
0200 struct vmci_handle *handles;
0201
0202 if (handle_array)
0203 array_size = vmci_handle_arr_get_size(handle_array);
0204
0205 if (array_size * sizeof(*handles) > *user_buf_size)
0206 return VMCI_ERROR_MORE_DATA;
0207
0208 *user_buf_size = array_size * sizeof(*handles);
0209 if (*user_buf_size)
0210 *retval = copy_to_user(user_buf_uva,
0211 vmci_handle_arr_get_handles
0212 (handle_array), *user_buf_size);
0213
0214 return VMCI_SUCCESS;
0215 }
0216
0217
0218
0219
0220
0221 static int vmci_host_setup_notify(struct vmci_ctx *context,
0222 unsigned long uva)
0223 {
0224 int retval;
0225
0226 if (context->notify_page) {
0227 pr_devel("%s: Notify mechanism is already set up\n", __func__);
0228 return VMCI_ERROR_DUPLICATE_ENTRY;
0229 }
0230
0231
0232
0233
0234
0235 BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
0236
0237
0238
0239
0240 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
0241 if (retval != 1) {
0242 context->notify_page = NULL;
0243 return VMCI_ERROR_GENERIC;
0244 }
0245
0246
0247
0248
0249 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1));
0250 vmci_ctx_check_signal_notify(context);
0251
0252 return VMCI_SUCCESS;
0253 }
0254
0255 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
0256 unsigned int cmd, void __user *uptr)
0257 {
0258 if (cmd == IOCTL_VMCI_VERSION2) {
0259 int __user *vptr = uptr;
0260 if (get_user(vmci_host_dev->user_version, vptr))
0261 return -EFAULT;
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278 if (vmci_host_dev->user_version > 0 &&
0279 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
0280 return vmci_host_dev->user_version;
0281 }
0282
0283 return VMCI_VERSION;
0284 }
0285
0286 #define vmci_ioctl_err(fmt, ...) \
0287 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
0288
0289 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
0290 const char *ioctl_name,
0291 void __user *uptr)
0292 {
0293 struct vmci_init_blk init_block;
0294 const struct cred *cred;
0295 int retval;
0296
0297 if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
0298 vmci_ioctl_err("error reading init block\n");
0299 return -EFAULT;
0300 }
0301
0302 mutex_lock(&vmci_host_dev->lock);
0303
0304 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
0305 vmci_ioctl_err("received VMCI init on initialized handle\n");
0306 retval = -EINVAL;
0307 goto out;
0308 }
0309
0310 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
0311 vmci_ioctl_err("unsupported VMCI restriction flag\n");
0312 retval = -EINVAL;
0313 goto out;
0314 }
0315
0316 cred = get_current_cred();
0317 vmci_host_dev->context = vmci_ctx_create(init_block.cid,
0318 init_block.flags, 0,
0319 vmci_host_dev->user_version,
0320 cred);
0321 put_cred(cred);
0322 if (IS_ERR(vmci_host_dev->context)) {
0323 retval = PTR_ERR(vmci_host_dev->context);
0324 vmci_ioctl_err("error initializing context\n");
0325 goto out;
0326 }
0327
0328
0329
0330
0331
0332 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
0333 if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
0334 vmci_ctx_destroy(vmci_host_dev->context);
0335 vmci_host_dev->context = NULL;
0336 vmci_ioctl_err("error writing init block\n");
0337 retval = -EFAULT;
0338 goto out;
0339 }
0340
0341 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
0342 atomic_inc(&vmci_host_active_users);
0343
0344 vmci_call_vsock_callback(true);
0345
0346 retval = 0;
0347
0348 out:
0349 mutex_unlock(&vmci_host_dev->lock);
0350 return retval;
0351 }
0352
0353 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
0354 const char *ioctl_name,
0355 void __user *uptr)
0356 {
0357 struct vmci_datagram_snd_rcv_info send_info;
0358 struct vmci_datagram *dg = NULL;
0359 u32 cid;
0360
0361 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0362 vmci_ioctl_err("only valid for contexts\n");
0363 return -EINVAL;
0364 }
0365
0366 if (copy_from_user(&send_info, uptr, sizeof(send_info)))
0367 return -EFAULT;
0368
0369 if (send_info.len > VMCI_MAX_DG_SIZE) {
0370 vmci_ioctl_err("datagram is too big (size=%d)\n",
0371 send_info.len);
0372 return -EINVAL;
0373 }
0374
0375 if (send_info.len < sizeof(*dg)) {
0376 vmci_ioctl_err("datagram is too small (size=%d)\n",
0377 send_info.len);
0378 return -EINVAL;
0379 }
0380
0381 dg = memdup_user((void __user *)(uintptr_t)send_info.addr,
0382 send_info.len);
0383 if (IS_ERR(dg)) {
0384 vmci_ioctl_err(
0385 "cannot allocate memory to dispatch datagram\n");
0386 return PTR_ERR(dg);
0387 }
0388
0389 if (VMCI_DG_SIZE(dg) != send_info.len) {
0390 vmci_ioctl_err("datagram size mismatch\n");
0391 kfree(dg);
0392 return -EINVAL;
0393 }
0394
0395 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
0396 dg->dst.context, dg->dst.resource,
0397 dg->src.context, dg->src.resource,
0398 (unsigned long long)dg->payload_size);
0399
0400
0401 cid = vmci_ctx_get_id(vmci_host_dev->context);
0402 send_info.result = vmci_datagram_dispatch(cid, dg, true);
0403 kfree(dg);
0404
0405 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
0406 }
0407
0408 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
0409 const char *ioctl_name,
0410 void __user *uptr)
0411 {
0412 struct vmci_datagram_snd_rcv_info recv_info;
0413 struct vmci_datagram *dg = NULL;
0414 int retval;
0415 size_t size;
0416
0417 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0418 vmci_ioctl_err("only valid for contexts\n");
0419 return -EINVAL;
0420 }
0421
0422 if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
0423 return -EFAULT;
0424
0425 size = recv_info.len;
0426 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
0427 &size, &dg);
0428
0429 if (recv_info.result >= VMCI_SUCCESS) {
0430 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
0431 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
0432 kfree(dg);
0433 if (retval != 0)
0434 return -EFAULT;
0435 }
0436
0437 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
0438 }
0439
0440 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
0441 const char *ioctl_name,
0442 void __user *uptr)
0443 {
0444 struct vmci_handle handle;
0445 int vmci_status;
0446 int __user *retptr;
0447
0448 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0449 vmci_ioctl_err("only valid for contexts\n");
0450 return -EINVAL;
0451 }
0452
0453 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
0454 struct vmci_qp_alloc_info_vmvm alloc_info;
0455 struct vmci_qp_alloc_info_vmvm __user *info = uptr;
0456
0457 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
0458 return -EFAULT;
0459
0460 handle = alloc_info.handle;
0461 retptr = &info->result;
0462
0463 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
0464 alloc_info.peer,
0465 alloc_info.flags,
0466 VMCI_NO_PRIVILEGE_FLAGS,
0467 alloc_info.produce_size,
0468 alloc_info.consume_size,
0469 NULL,
0470 vmci_host_dev->context);
0471
0472 if (vmci_status == VMCI_SUCCESS)
0473 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
0474 } else {
0475 struct vmci_qp_alloc_info alloc_info;
0476 struct vmci_qp_alloc_info __user *info = uptr;
0477 struct vmci_qp_page_store page_store;
0478
0479 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
0480 return -EFAULT;
0481
0482 handle = alloc_info.handle;
0483 retptr = &info->result;
0484
0485 page_store.pages = alloc_info.ppn_va;
0486 page_store.len = alloc_info.num_ppns;
0487
0488 vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
0489 alloc_info.peer,
0490 alloc_info.flags,
0491 VMCI_NO_PRIVILEGE_FLAGS,
0492 alloc_info.produce_size,
0493 alloc_info.consume_size,
0494 &page_store,
0495 vmci_host_dev->context);
0496 }
0497
0498 if (put_user(vmci_status, retptr)) {
0499 if (vmci_status >= VMCI_SUCCESS) {
0500 vmci_status = vmci_qp_broker_detach(handle,
0501 vmci_host_dev->context);
0502 }
0503 return -EFAULT;
0504 }
0505
0506 return 0;
0507 }
0508
0509 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
0510 const char *ioctl_name,
0511 void __user *uptr)
0512 {
0513 struct vmci_qp_set_va_info set_va_info;
0514 struct vmci_qp_set_va_info __user *info = uptr;
0515 s32 result;
0516
0517 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0518 vmci_ioctl_err("only valid for contexts\n");
0519 return -EINVAL;
0520 }
0521
0522 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
0523 vmci_ioctl_err("is not allowed\n");
0524 return -EINVAL;
0525 }
0526
0527 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
0528 return -EFAULT;
0529
0530 if (set_va_info.va) {
0531
0532
0533
0534
0535 result = vmci_qp_broker_map(set_va_info.handle,
0536 vmci_host_dev->context,
0537 set_va_info.va);
0538 } else {
0539
0540
0541
0542
0543 result = vmci_qp_broker_unmap(set_va_info.handle,
0544 vmci_host_dev->context, 0);
0545 }
0546
0547 return put_user(result, &info->result) ? -EFAULT : 0;
0548 }
0549
0550 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
0551 const char *ioctl_name,
0552 void __user *uptr)
0553 {
0554 struct vmci_qp_page_file_info page_file_info;
0555 struct vmci_qp_page_file_info __user *info = uptr;
0556 s32 result;
0557
0558 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
0559 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
0560 vmci_ioctl_err("not supported on this VMX (version=%d)\n",
0561 vmci_host_dev->user_version);
0562 return -EINVAL;
0563 }
0564
0565 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0566 vmci_ioctl_err("only valid for contexts\n");
0567 return -EINVAL;
0568 }
0569
0570 if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
0571 return -EFAULT;
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589 if (put_user(VMCI_SUCCESS, &info->result)) {
0590
0591
0592
0593
0594
0595 return -EFAULT;
0596 }
0597
0598 result = vmci_qp_broker_set_page_store(page_file_info.handle,
0599 page_file_info.produce_va,
0600 page_file_info.consume_va,
0601 vmci_host_dev->context);
0602 if (result < VMCI_SUCCESS) {
0603 if (put_user(result, &info->result)) {
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621 return -EFAULT;
0622 }
0623 }
0624
0625 return 0;
0626 }
0627
0628 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
0629 const char *ioctl_name,
0630 void __user *uptr)
0631 {
0632 struct vmci_qp_dtch_info detach_info;
0633 struct vmci_qp_dtch_info __user *info = uptr;
0634 s32 result;
0635
0636 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0637 vmci_ioctl_err("only valid for contexts\n");
0638 return -EINVAL;
0639 }
0640
0641 if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
0642 return -EFAULT;
0643
0644 result = vmci_qp_broker_detach(detach_info.handle,
0645 vmci_host_dev->context);
0646 if (result == VMCI_SUCCESS &&
0647 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
0648 result = VMCI_SUCCESS_LAST_DETACH;
0649 }
0650
0651 return put_user(result, &info->result) ? -EFAULT : 0;
0652 }
0653
0654 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
0655 const char *ioctl_name,
0656 void __user *uptr)
0657 {
0658 struct vmci_ctx_info ar_info;
0659 struct vmci_ctx_info __user *info = uptr;
0660 s32 result;
0661 u32 cid;
0662
0663 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0664 vmci_ioctl_err("only valid for contexts\n");
0665 return -EINVAL;
0666 }
0667
0668 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
0669 return -EFAULT;
0670
0671 cid = vmci_ctx_get_id(vmci_host_dev->context);
0672 result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
0673
0674 return put_user(result, &info->result) ? -EFAULT : 0;
0675 }
0676
0677 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
0678 const char *ioctl_name,
0679 void __user *uptr)
0680 {
0681 struct vmci_ctx_info ar_info;
0682 struct vmci_ctx_info __user *info = uptr;
0683 u32 cid;
0684 int result;
0685
0686 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0687 vmci_ioctl_err("only valid for contexts\n");
0688 return -EINVAL;
0689 }
0690
0691 if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
0692 return -EFAULT;
0693
0694 cid = vmci_ctx_get_id(vmci_host_dev->context);
0695 result = vmci_ctx_remove_notification(cid,
0696 ar_info.remote_cid);
0697
0698 return put_user(result, &info->result) ? -EFAULT : 0;
0699 }
0700
0701 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
0702 const char *ioctl_name,
0703 void __user *uptr)
0704 {
0705 struct vmci_ctx_chkpt_buf_info get_info;
0706 u32 cid;
0707 void *cpt_buf;
0708 int retval;
0709
0710 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0711 vmci_ioctl_err("only valid for contexts\n");
0712 return -EINVAL;
0713 }
0714
0715 if (copy_from_user(&get_info, uptr, sizeof(get_info)))
0716 return -EFAULT;
0717
0718 cid = vmci_ctx_get_id(vmci_host_dev->context);
0719 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
0720 &get_info.buf_size, &cpt_buf);
0721 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
0722 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
0723 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
0724 kfree(cpt_buf);
0725
0726 if (retval)
0727 return -EFAULT;
0728 }
0729
0730 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
0731 }
0732
0733 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
0734 const char *ioctl_name,
0735 void __user *uptr)
0736 {
0737 struct vmci_ctx_chkpt_buf_info set_info;
0738 u32 cid;
0739 void *cpt_buf;
0740 int retval;
0741
0742 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0743 vmci_ioctl_err("only valid for contexts\n");
0744 return -EINVAL;
0745 }
0746
0747 if (copy_from_user(&set_info, uptr, sizeof(set_info)))
0748 return -EFAULT;
0749
0750 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf,
0751 set_info.buf_size);
0752 if (IS_ERR(cpt_buf))
0753 return PTR_ERR(cpt_buf);
0754
0755 cid = vmci_ctx_get_id(vmci_host_dev->context);
0756 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
0757 set_info.buf_size, cpt_buf);
0758
0759 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
0760
0761 kfree(cpt_buf);
0762 return retval;
0763 }
0764
0765 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
0766 const char *ioctl_name,
0767 void __user *uptr)
0768 {
0769 u32 __user *u32ptr = uptr;
0770
0771 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
0772 }
0773
0774 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
0775 const char *ioctl_name,
0776 void __user *uptr)
0777 {
0778 struct vmci_set_notify_info notify_info;
0779
0780 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0781 vmci_ioctl_err("only valid for contexts\n");
0782 return -EINVAL;
0783 }
0784
0785 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info)))
0786 return -EFAULT;
0787
0788 if (notify_info.notify_uva) {
0789 notify_info.result =
0790 vmci_host_setup_notify(vmci_host_dev->context,
0791 notify_info.notify_uva);
0792 } else {
0793 vmci_ctx_unset_notify(vmci_host_dev->context);
0794 notify_info.result = VMCI_SUCCESS;
0795 }
0796
0797 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ?
0798 -EFAULT : 0;
0799 }
0800
0801 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
0802 const char *ioctl_name,
0803 void __user *uptr)
0804 {
0805 struct vmci_dbell_notify_resource_info info;
0806 u32 cid;
0807
0808 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
0809 vmci_ioctl_err("invalid for current VMX versions\n");
0810 return -EINVAL;
0811 }
0812
0813 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0814 vmci_ioctl_err("only valid for contexts\n");
0815 return -EINVAL;
0816 }
0817
0818 if (copy_from_user(&info, uptr, sizeof(info)))
0819 return -EFAULT;
0820
0821 cid = vmci_ctx_get_id(vmci_host_dev->context);
0822
0823 switch (info.action) {
0824 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
0825 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
0826 u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
0827 info.result = vmci_ctx_notify_dbell(cid, info.handle,
0828 flags);
0829 } else {
0830 info.result = VMCI_ERROR_UNAVAILABLE;
0831 }
0832 break;
0833
0834 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
0835 info.result = vmci_ctx_dbell_create(cid, info.handle);
0836 break;
0837
0838 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
0839 info.result = vmci_ctx_dbell_destroy(cid, info.handle);
0840 break;
0841
0842 default:
0843 vmci_ioctl_err("got unknown action (action=%d)\n",
0844 info.action);
0845 info.result = VMCI_ERROR_INVALID_ARGS;
0846 }
0847
0848 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
0849 }
0850
0851 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
0852 const char *ioctl_name,
0853 void __user *uptr)
0854 {
0855 struct vmci_ctx_notify_recv_info info;
0856 struct vmci_handle_arr *db_handle_array;
0857 struct vmci_handle_arr *qp_handle_array;
0858 void __user *ubuf;
0859 u32 cid;
0860 int retval = 0;
0861
0862 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
0863 vmci_ioctl_err("only valid for contexts\n");
0864 return -EINVAL;
0865 }
0866
0867 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
0868 vmci_ioctl_err("not supported for the current vmx version\n");
0869 return -EINVAL;
0870 }
0871
0872 if (copy_from_user(&info, uptr, sizeof(info)))
0873 return -EFAULT;
0874
0875 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
0876 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
0877 return -EINVAL;
0878 }
0879
0880 cid = vmci_ctx_get_id(vmci_host_dev->context);
0881
0882 info.result = vmci_ctx_rcv_notifications_get(cid,
0883 &db_handle_array, &qp_handle_array);
0884 if (info.result != VMCI_SUCCESS)
0885 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
0886
0887 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
0888 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
0889 db_handle_array, &retval);
0890 if (info.result == VMCI_SUCCESS && !retval) {
0891 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
0892 info.result = drv_cp_harray_to_user(ubuf,
0893 &info.qp_handle_buf_size,
0894 qp_handle_array, &retval);
0895 }
0896
0897 if (!retval && copy_to_user(uptr, &info, sizeof(info)))
0898 retval = -EFAULT;
0899
0900 vmci_ctx_rcv_notifications_release(cid,
0901 db_handle_array, qp_handle_array,
0902 info.result == VMCI_SUCCESS && !retval);
0903
0904 return retval;
0905 }
0906
0907 static long vmci_host_unlocked_ioctl(struct file *filp,
0908 unsigned int iocmd, unsigned long ioarg)
0909 {
0910 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \
0911 char *name = "IOCTL_VMCI_" # ioctl_name; \
0912 return vmci_host_do_ ## ioctl_fn( \
0913 vmci_host_dev, name, uptr); \
0914 } while (0)
0915
0916 struct vmci_host_dev *vmci_host_dev = filp->private_data;
0917 void __user *uptr = (void __user *)ioarg;
0918
0919 switch (iocmd) {
0920 case IOCTL_VMCI_INIT_CONTEXT:
0921 VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
0922 case IOCTL_VMCI_DATAGRAM_SEND:
0923 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
0924 case IOCTL_VMCI_DATAGRAM_RECEIVE:
0925 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
0926 case IOCTL_VMCI_QUEUEPAIR_ALLOC:
0927 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
0928 case IOCTL_VMCI_QUEUEPAIR_SETVA:
0929 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
0930 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
0931 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
0932 case IOCTL_VMCI_QUEUEPAIR_DETACH:
0933 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
0934 case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
0935 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
0936 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
0937 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
0938 case IOCTL_VMCI_CTX_GET_CPT_STATE:
0939 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
0940 case IOCTL_VMCI_CTX_SET_CPT_STATE:
0941 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
0942 case IOCTL_VMCI_GET_CONTEXT_ID:
0943 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
0944 case IOCTL_VMCI_SET_NOTIFY:
0945 VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
0946 case IOCTL_VMCI_NOTIFY_RESOURCE:
0947 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
0948 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
0949 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
0950
0951 case IOCTL_VMCI_VERSION:
0952 case IOCTL_VMCI_VERSION2:
0953 return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
0954
0955 default:
0956 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
0957 return -EINVAL;
0958 }
0959
0960 #undef VMCI_DO_IOCTL
0961 }
0962
0963 static const struct file_operations vmuser_fops = {
0964 .owner = THIS_MODULE,
0965 .open = vmci_host_open,
0966 .release = vmci_host_close,
0967 .poll = vmci_host_poll,
0968 .unlocked_ioctl = vmci_host_unlocked_ioctl,
0969 .compat_ioctl = compat_ptr_ioctl,
0970 };
0971
0972 static struct miscdevice vmci_host_miscdev = {
0973 .name = "vmci",
0974 .minor = MISC_DYNAMIC_MINOR,
0975 .fops = &vmuser_fops,
0976 };
0977
0978 int __init vmci_host_init(void)
0979 {
0980 int error;
0981
0982 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
0983 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
0984 -1, VMCI_VERSION, NULL);
0985 if (IS_ERR(host_context)) {
0986 error = PTR_ERR(host_context);
0987 pr_warn("Failed to initialize VMCIContext (error%d)\n",
0988 error);
0989 return error;
0990 }
0991
0992 error = misc_register(&vmci_host_miscdev);
0993 if (error) {
0994 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
0995 vmci_host_miscdev.name,
0996 MISC_MAJOR, vmci_host_miscdev.minor,
0997 error);
0998 pr_warn("Unable to initialize host personality\n");
0999 vmci_ctx_destroy(host_context);
1000 return error;
1001 }
1002
1003 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
1004 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
1005
1006 vmci_host_device_initialized = true;
1007 return 0;
1008 }
1009
1010 void __exit vmci_host_exit(void)
1011 {
1012 vmci_host_device_initialized = false;
1013
1014 misc_deregister(&vmci_host_miscdev);
1015 vmci_ctx_destroy(host_context);
1016 vmci_qp_broker_exit();
1017
1018 pr_debug("VMCI host driver module unloaded\n");
1019 }