0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/errno.h>
0010 #include <linux/io.h>
0011 #include <linux/kernel.h>
0012 #include <linux/mm.h>
0013 #include <linux/module.h>
0014 #include <linux/sizes.h>
0015 #include <linux/slab.h>
0016 #include <linux/uaccess.h>
0017 #include <linux/vmalloc.h>
0018 #include <linux/vbox_err.h>
0019 #include <linux/vbox_utils.h>
0020 #include "vboxguest_core.h"
0021
0022
0023 #define VMMDEV_HGCM_CALL_PARMS(a) \
0024 ((struct vmmdev_hgcm_function_parameter *)( \
0025 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
0026
0027
0028 #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
0029
0030 #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
0031
0032 #define VBG_DEBUG_PORT 0x504
0033
0034
0035 static DEFINE_SPINLOCK(vbg_log_lock);
0036 static char vbg_log_buf[128];
0037
0038 #define VBG_LOG(name, pr_func) \
0039 void name(const char *fmt, ...) \
0040 { \
0041 unsigned long flags; \
0042 va_list args; \
0043 int i, count; \
0044 \
0045 va_start(args, fmt); \
0046 spin_lock_irqsave(&vbg_log_lock, flags); \
0047 \
0048 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
0049 for (i = 0; i < count; i++) \
0050 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
0051 \
0052 pr_func("%s", vbg_log_buf); \
0053 \
0054 spin_unlock_irqrestore(&vbg_log_lock, flags); \
0055 va_end(args); \
0056 } \
0057 EXPORT_SYMBOL(name)
0058
0059 VBG_LOG(vbg_info, pr_info);
0060 VBG_LOG(vbg_warn, pr_warn);
0061 VBG_LOG(vbg_err, pr_err);
0062 VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited);
0063 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
0064 VBG_LOG(vbg_debug, pr_debug);
0065 #endif
0066
0067 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
0068 u32 requestor)
0069 {
0070 struct vmmdev_request_header *req;
0071 int order = get_order(PAGE_ALIGN(len));
0072
0073 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
0074 if (!req)
0075 return NULL;
0076
0077 memset(req, 0xaa, len);
0078
0079 req->size = len;
0080 req->version = VMMDEV_REQUEST_HEADER_VERSION;
0081 req->request_type = req_type;
0082 req->rc = VERR_GENERAL_FAILURE;
0083 req->reserved1 = 0;
0084 req->requestor = requestor;
0085
0086 return req;
0087 }
0088
0089 void vbg_req_free(void *req, size_t len)
0090 {
0091 if (!req)
0092 return;
0093
0094 free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
0095 }
0096
0097
0098 int vbg_req_perform(struct vbg_dev *gdev, void *req)
0099 {
0100 unsigned long phys_req = virt_to_phys(req);
0101
0102 outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
0103
0104
0105
0106
0107 mb();
0108
0109 return ((struct vmmdev_request_header *)req)->rc;
0110 }
0111
0112 static bool hgcm_req_done(struct vbg_dev *gdev,
0113 struct vmmdev_hgcmreq_header *header)
0114 {
0115 unsigned long flags;
0116 bool done;
0117
0118 spin_lock_irqsave(&gdev->event_spinlock, flags);
0119 done = header->flags & VMMDEV_HGCM_REQ_DONE;
0120 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
0121
0122 return done;
0123 }
0124
0125 int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
0126 struct vmmdev_hgcm_service_location *loc,
0127 u32 *client_id, int *vbox_status)
0128 {
0129 struct vmmdev_hgcm_connect *hgcm_connect = NULL;
0130 int rc;
0131
0132 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
0133 VMMDEVREQ_HGCM_CONNECT, requestor);
0134 if (!hgcm_connect)
0135 return -ENOMEM;
0136
0137 hgcm_connect->header.flags = 0;
0138 memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
0139 hgcm_connect->client_id = 0;
0140
0141 rc = vbg_req_perform(gdev, hgcm_connect);
0142
0143 if (rc == VINF_HGCM_ASYNC_EXECUTE)
0144 wait_event(gdev->hgcm_wq,
0145 hgcm_req_done(gdev, &hgcm_connect->header));
0146
0147 if (rc >= 0) {
0148 *client_id = hgcm_connect->client_id;
0149 rc = hgcm_connect->header.result;
0150 }
0151
0152 vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
0153
0154 *vbox_status = rc;
0155 return 0;
0156 }
0157 EXPORT_SYMBOL(vbg_hgcm_connect);
0158
0159 int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
0160 u32 client_id, int *vbox_status)
0161 {
0162 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
0163 int rc;
0164
0165 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
0166 VMMDEVREQ_HGCM_DISCONNECT,
0167 requestor);
0168 if (!hgcm_disconnect)
0169 return -ENOMEM;
0170
0171 hgcm_disconnect->header.flags = 0;
0172 hgcm_disconnect->client_id = client_id;
0173
0174 rc = vbg_req_perform(gdev, hgcm_disconnect);
0175
0176 if (rc == VINF_HGCM_ASYNC_EXECUTE)
0177 wait_event(gdev->hgcm_wq,
0178 hgcm_req_done(gdev, &hgcm_disconnect->header));
0179
0180 if (rc >= 0)
0181 rc = hgcm_disconnect->header.result;
0182
0183 vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
0184
0185 *vbox_status = rc;
0186 return 0;
0187 }
0188 EXPORT_SYMBOL(vbg_hgcm_disconnect);
0189
0190 static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
0191 {
0192 u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
0193
0194 return size >> PAGE_SHIFT;
0195 }
0196
0197 static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
0198 {
0199 u32 page_count;
0200
0201 page_count = hgcm_call_buf_size_in_pages(buf, len);
0202 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
0203 }
0204
0205 static int hgcm_call_preprocess_linaddr(
0206 const struct vmmdev_hgcm_function_parameter *src_parm,
0207 void **bounce_buf_ret, size_t *extra)
0208 {
0209 void *buf, *bounce_buf;
0210 bool copy_in;
0211 u32 len;
0212 int ret;
0213
0214 buf = (void *)src_parm->u.pointer.u.linear_addr;
0215 len = src_parm->u.pointer.size;
0216 copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
0217
0218 if (len > VBG_MAX_HGCM_USER_PARM)
0219 return -E2BIG;
0220
0221 bounce_buf = kvmalloc(len, GFP_KERNEL);
0222 if (!bounce_buf)
0223 return -ENOMEM;
0224
0225 *bounce_buf_ret = bounce_buf;
0226
0227 if (copy_in) {
0228 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
0229 if (ret)
0230 return -EFAULT;
0231 } else {
0232 memset(bounce_buf, 0, len);
0233 }
0234
0235 hgcm_call_add_pagelist_size(bounce_buf, len, extra);
0236 return 0;
0237 }
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 static int hgcm_call_preprocess(
0250 const struct vmmdev_hgcm_function_parameter *src_parm,
0251 u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
0252 {
0253 void *buf, **bounce_bufs = NULL;
0254 u32 i, len;
0255 int ret;
0256
0257 for (i = 0; i < parm_count; i++, src_parm++) {
0258 switch (src_parm->type) {
0259 case VMMDEV_HGCM_PARM_TYPE_32BIT:
0260 case VMMDEV_HGCM_PARM_TYPE_64BIT:
0261 break;
0262
0263 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0264 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0265 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0266 if (!bounce_bufs) {
0267 bounce_bufs = kcalloc(parm_count,
0268 sizeof(void *),
0269 GFP_KERNEL);
0270 if (!bounce_bufs)
0271 return -ENOMEM;
0272
0273 *bounce_bufs_ret = bounce_bufs;
0274 }
0275
0276 ret = hgcm_call_preprocess_linaddr(src_parm,
0277 &bounce_bufs[i],
0278 extra);
0279 if (ret)
0280 return ret;
0281
0282 break;
0283
0284 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
0285 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
0286 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
0287 buf = (void *)src_parm->u.pointer.u.linear_addr;
0288 len = src_parm->u.pointer.size;
0289 if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
0290 return -E2BIG;
0291
0292 hgcm_call_add_pagelist_size(buf, len, extra);
0293 break;
0294
0295 default:
0296 return -EINVAL;
0297 }
0298 }
0299
0300 return 0;
0301 }
0302
0303
0304
0305
0306
0307
0308
0309 static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
0310 enum vmmdev_hgcm_function_parameter_type type)
0311 {
0312 switch (type) {
0313 default:
0314 WARN_ON(1);
0315 fallthrough;
0316 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0317 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
0318 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
0319
0320 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0321 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
0322 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
0323
0324 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0325 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
0326 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
0327 }
0328 }
0329
0330 static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
0331 struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
0332 enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
0333 {
0334 struct vmmdev_hgcm_pagelist *dst_pg_lst;
0335 struct page *page;
0336 bool is_vmalloc;
0337 u32 i, page_count;
0338
0339 dst_parm->type = type;
0340
0341 if (len == 0) {
0342 dst_parm->u.pointer.size = 0;
0343 dst_parm->u.pointer.u.linear_addr = 0;
0344 return;
0345 }
0346
0347 dst_pg_lst = (void *)call + *off_extra;
0348 page_count = hgcm_call_buf_size_in_pages(buf, len);
0349 is_vmalloc = is_vmalloc_addr(buf);
0350
0351 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
0352 dst_parm->u.page_list.size = len;
0353 dst_parm->u.page_list.offset = *off_extra;
0354 dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
0355 dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
0356 dst_pg_lst->page_count = page_count;
0357
0358 for (i = 0; i < page_count; i++) {
0359 if (is_vmalloc)
0360 page = vmalloc_to_page(buf);
0361 else
0362 page = virt_to_page(buf);
0363
0364 dst_pg_lst->pages[i] = page_to_phys(page);
0365 buf += PAGE_SIZE;
0366 }
0367
0368 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
0369 }
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 static void hgcm_call_init_call(
0381 struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
0382 const struct vmmdev_hgcm_function_parameter *src_parm,
0383 u32 parm_count, void **bounce_bufs)
0384 {
0385 struct vmmdev_hgcm_function_parameter *dst_parm =
0386 VMMDEV_HGCM_CALL_PARMS(call);
0387 u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
0388 void *buf;
0389
0390 call->header.flags = 0;
0391 call->header.result = VINF_SUCCESS;
0392 call->client_id = client_id;
0393 call->function = function;
0394 call->parm_count = parm_count;
0395
0396 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
0397 switch (src_parm->type) {
0398 case VMMDEV_HGCM_PARM_TYPE_32BIT:
0399 case VMMDEV_HGCM_PARM_TYPE_64BIT:
0400 *dst_parm = *src_parm;
0401 break;
0402
0403 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0404 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0405 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0406 hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
0407 src_parm->u.pointer.size,
0408 src_parm->type, &off_extra);
0409 break;
0410
0411 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
0412 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
0413 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
0414 buf = (void *)src_parm->u.pointer.u.linear_addr;
0415 hgcm_call_init_linaddr(call, dst_parm, buf,
0416 src_parm->u.pointer.size,
0417 src_parm->type, &off_extra);
0418 break;
0419
0420 default:
0421 WARN_ON(1);
0422 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
0423 }
0424 }
0425 }
0426
0427
0428
0429
0430
0431
0432 static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
0433 {
0434 int rc;
0435
0436
0437
0438
0439
0440
0441 mutex_lock(&gdev->cancel_req_mutex);
0442 gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
0443 rc = vbg_req_perform(gdev, gdev->cancel_req);
0444 mutex_unlock(&gdev->cancel_req_mutex);
0445
0446 if (rc == VERR_NOT_IMPLEMENTED) {
0447 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
0448 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
0449
0450 rc = vbg_req_perform(gdev, call);
0451 if (rc == VERR_INVALID_PARAMETER)
0452 rc = VERR_NOT_FOUND;
0453 }
0454
0455 if (rc >= 0)
0456 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
0457
0458 return rc;
0459 }
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
0471 u32 timeout_ms, bool interruptible, bool *leak_it)
0472 {
0473 int rc, cancel_rc, ret;
0474 long timeout;
0475
0476 *leak_it = false;
0477
0478 rc = vbg_req_perform(gdev, call);
0479
0480
0481
0482
0483
0484 if (rc < 0) {
0485 call->header.result = rc;
0486 return 0;
0487 }
0488
0489 if (rc != VINF_HGCM_ASYNC_EXECUTE)
0490 return 0;
0491
0492
0493 if (timeout_ms == U32_MAX)
0494 timeout = MAX_SCHEDULE_TIMEOUT;
0495 else
0496 timeout = msecs_to_jiffies(timeout_ms);
0497
0498 if (interruptible) {
0499 timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
0500 hgcm_req_done(gdev, &call->header),
0501 timeout);
0502 } else {
0503 timeout = wait_event_timeout(gdev->hgcm_wq,
0504 hgcm_req_done(gdev, &call->header),
0505 timeout);
0506 }
0507
0508
0509 if (timeout > 0)
0510 return 0;
0511
0512 if (timeout == 0)
0513 ret = -ETIMEDOUT;
0514 else
0515 ret = -EINTR;
0516
0517
0518 cancel_rc = hgcm_cancel_call(gdev, call);
0519 if (cancel_rc >= 0)
0520 return ret;
0521
0522
0523
0524
0525
0526 if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
0527 timeout = msecs_to_jiffies(500);
0528 else
0529 timeout = msecs_to_jiffies(2000);
0530
0531 timeout = wait_event_timeout(gdev->hgcm_wq,
0532 hgcm_req_done(gdev, &call->header),
0533 timeout);
0534
0535 if (WARN_ON(timeout == 0)) {
0536
0537 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
0538 __func__);
0539 *leak_it = true;
0540 return ret;
0541 }
0542
0543
0544 return 0;
0545 }
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 static int hgcm_call_copy_back_result(
0557 const struct vmmdev_hgcm_call *call,
0558 struct vmmdev_hgcm_function_parameter *dst_parm,
0559 u32 parm_count, void **bounce_bufs)
0560 {
0561 const struct vmmdev_hgcm_function_parameter *src_parm =
0562 VMMDEV_HGCM_CALL_PARMS(call);
0563 void __user *p;
0564 int ret;
0565 u32 i;
0566
0567
0568 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
0569 switch (dst_parm->type) {
0570 case VMMDEV_HGCM_PARM_TYPE_32BIT:
0571 case VMMDEV_HGCM_PARM_TYPE_64BIT:
0572 *dst_parm = *src_parm;
0573 break;
0574
0575 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
0576 dst_parm->u.page_list.size = src_parm->u.page_list.size;
0577 break;
0578
0579 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0580 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
0581 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
0582 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
0583 dst_parm->u.pointer.size = src_parm->u.pointer.size;
0584 break;
0585
0586 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0587 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0588 dst_parm->u.pointer.size = src_parm->u.pointer.size;
0589
0590 p = (void __user *)dst_parm->u.pointer.u.linear_addr;
0591 ret = copy_to_user(p, bounce_bufs[i],
0592 min(src_parm->u.pointer.size,
0593 dst_parm->u.pointer.size));
0594 if (ret)
0595 return -EFAULT;
0596 break;
0597
0598 default:
0599 WARN_ON(1);
0600 return -EINVAL;
0601 }
0602 }
0603
0604 return 0;
0605 }
0606
0607 int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
0608 u32 function, u32 timeout_ms,
0609 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
0610 int *vbox_status)
0611 {
0612 struct vmmdev_hgcm_call *call;
0613 void **bounce_bufs = NULL;
0614 bool leak_it;
0615 size_t size;
0616 int i, ret;
0617
0618 size = sizeof(struct vmmdev_hgcm_call) +
0619 parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
0620
0621
0622
0623
0624 ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
0625 if (ret) {
0626
0627 goto free_bounce_bufs;
0628 }
0629
0630 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
0631 if (!call) {
0632 ret = -ENOMEM;
0633 goto free_bounce_bufs;
0634 }
0635
0636 hgcm_call_init_call(call, client_id, function, parms, parm_count,
0637 bounce_bufs);
0638
0639 ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
0640 requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
0641 if (ret == 0) {
0642 *vbox_status = call->header.result;
0643 ret = hgcm_call_copy_back_result(call, parms, parm_count,
0644 bounce_bufs);
0645 }
0646
0647 if (!leak_it)
0648 vbg_req_free(call, size);
0649
0650 free_bounce_bufs:
0651 if (bounce_bufs) {
0652 for (i = 0; i < parm_count; i++)
0653 kvfree(bounce_bufs[i]);
0654 kfree(bounce_bufs);
0655 }
0656
0657 return ret;
0658 }
0659 EXPORT_SYMBOL(vbg_hgcm_call);
0660
0661 #ifdef CONFIG_COMPAT
0662 int vbg_hgcm_call32(
0663 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
0664 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
0665 u32 parm_count, int *vbox_status)
0666 {
0667 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
0668 u32 i, size;
0669 int ret = 0;
0670
0671
0672 size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
0673 parm64 = kzalloc(size, GFP_KERNEL);
0674 if (!parm64)
0675 return -ENOMEM;
0676
0677 for (i = 0; i < parm_count; i++) {
0678 switch (parm32[i].type) {
0679 case VMMDEV_HGCM_PARM_TYPE_32BIT:
0680 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
0681 parm64[i].u.value32 = parm32[i].u.value32;
0682 break;
0683
0684 case VMMDEV_HGCM_PARM_TYPE_64BIT:
0685 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
0686 parm64[i].u.value64 = parm32[i].u.value64;
0687 break;
0688
0689 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0690 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0691 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0692 parm64[i].type = parm32[i].type;
0693 parm64[i].u.pointer.size = parm32[i].u.pointer.size;
0694 parm64[i].u.pointer.u.linear_addr =
0695 parm32[i].u.pointer.u.linear_addr;
0696 break;
0697
0698 default:
0699 ret = -EINVAL;
0700 }
0701 if (ret < 0)
0702 goto out_free;
0703 }
0704
0705 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
0706 parm64, parm_count, vbox_status);
0707 if (ret < 0)
0708 goto out_free;
0709
0710
0711 for (i = 0; i < parm_count; i++, parm32++, parm64++) {
0712 switch (parm64[i].type) {
0713 case VMMDEV_HGCM_PARM_TYPE_32BIT:
0714 parm32[i].u.value32 = parm64[i].u.value32;
0715 break;
0716
0717 case VMMDEV_HGCM_PARM_TYPE_64BIT:
0718 parm32[i].u.value64 = parm64[i].u.value64;
0719 break;
0720
0721 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
0722 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
0723 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
0724 parm32[i].u.pointer.size = parm64[i].u.pointer.size;
0725 break;
0726
0727 default:
0728 WARN_ON(1);
0729 ret = -EINVAL;
0730 }
0731 }
0732
0733 out_free:
0734 kfree(parm64);
0735 return ret;
0736 }
0737 #endif
0738
0739 static const int vbg_status_code_to_errno_table[] = {
0740 [-VERR_ACCESS_DENIED] = -EPERM,
0741 [-VERR_FILE_NOT_FOUND] = -ENOENT,
0742 [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
0743 [-VERR_INTERRUPTED] = -EINTR,
0744 [-VERR_DEV_IO_ERROR] = -EIO,
0745 [-VERR_TOO_MUCH_DATA] = -E2BIG,
0746 [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
0747 [-VERR_INVALID_HANDLE] = -EBADF,
0748 [-VERR_TRY_AGAIN] = -EAGAIN,
0749 [-VERR_NO_MEMORY] = -ENOMEM,
0750 [-VERR_INVALID_POINTER] = -EFAULT,
0751 [-VERR_RESOURCE_BUSY] = -EBUSY,
0752 [-VERR_ALREADY_EXISTS] = -EEXIST,
0753 [-VERR_NOT_SAME_DEVICE] = -EXDEV,
0754 [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
0755 [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
0756 [-VERR_INVALID_NAME] = -ENOENT,
0757 [-VERR_IS_A_DIRECTORY] = -EISDIR,
0758 [-VERR_INVALID_PARAMETER] = -EINVAL,
0759 [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
0760 [-VERR_INVALID_FUNCTION] = -ENOTTY,
0761 [-VERR_SHARING_VIOLATION] = -ETXTBSY,
0762 [-VERR_FILE_TOO_BIG] = -EFBIG,
0763 [-VERR_DISK_FULL] = -ENOSPC,
0764 [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
0765 [-VERR_WRITE_PROTECT] = -EROFS,
0766 [-VERR_BROKEN_PIPE] = -EPIPE,
0767 [-VERR_DEADLOCK] = -EDEADLK,
0768 [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
0769 [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
0770 [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
0771 [-VERR_NOT_SUPPORTED] = -ENOSYS,
0772 [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
0773 [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
0774 [-VERR_NO_MORE_FILES] = -ENODATA,
0775 [-VERR_NO_DATA] = -ENODATA,
0776 [-VERR_NET_NO_NETWORK] = -ENONET,
0777 [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
0778 [-VERR_NO_TRANSLATION] = -EILSEQ,
0779 [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
0780 [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
0781 [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
0782 [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
0783 [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
0784 [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
0785 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
0786 [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
0787 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
0788 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
0789 [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
0790 [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
0791 [-VERR_NET_DOWN] = -ENETDOWN,
0792 [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
0793 [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
0794 [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
0795 [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
0796 [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
0797 [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
0798 [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
0799 [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
0800 [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
0801 [-VERR_TIMEOUT] = -ETIMEDOUT,
0802 [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
0803 [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
0804 [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
0805 [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
0806 [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
0807 [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
0808 [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
0809 };
0810
0811 int vbg_status_code_to_errno(int rc)
0812 {
0813 if (rc >= 0)
0814 return 0;
0815
0816 rc = -rc;
0817 if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
0818 vbg_status_code_to_errno_table[rc] == 0) {
0819 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
0820 return -EPROTO;
0821 }
0822
0823 return vbg_status_code_to_errno_table[rc];
0824 }
0825 EXPORT_SYMBOL(vbg_status_code_to_errno);