Linux 5.1.15
[linux/fpc-iii.git] / drivers / virt / vboxguest / vboxguest_utils.c
blob75fd140b02ff8aa41816a1f0284a4926c214df7e
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3 * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
4 * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
6 * Copyright (C) 2006-2016 Oracle Corporation
7 */
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/vbox_err.h>
18 #include <linux/vbox_utils.h>
19 #include "vboxguest_core.h"
21 /* Get the pointer to the first parameter of a HGCM call request. */
22 #define VMMDEV_HGCM_CALL_PARMS(a) \
23 ((struct vmmdev_hgcm_function_parameter *)( \
24 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
26 /* The max parameter buffer size for a user request. */
27 #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
28 /* The max parameter buffer size for a kernel request. */
29 #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
31 #define VBG_DEBUG_PORT 0x504
33 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
34 static DEFINE_SPINLOCK(vbg_log_lock);
35 static char vbg_log_buf[128];
37 #define VBG_LOG(name, pr_func) \
38 void name(const char *fmt, ...) \
39 { \
40 unsigned long flags; \
41 va_list args; \
42 int i, count; \
44 va_start(args, fmt); \
45 spin_lock_irqsave(&vbg_log_lock, flags); \
47 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
48 for (i = 0; i < count; i++) \
49 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
51 pr_func("%s", vbg_log_buf); \
53 spin_unlock_irqrestore(&vbg_log_lock, flags); \
54 va_end(args); \
55 } \
56 EXPORT_SYMBOL(name)
58 VBG_LOG(vbg_info, pr_info);
59 VBG_LOG(vbg_warn, pr_warn);
60 VBG_LOG(vbg_err, pr_err);
61 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
62 VBG_LOG(vbg_debug, pr_debug);
63 #endif
65 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
68 struct vmmdev_request_header *req;
69 int order = get_order(PAGE_ALIGN(len));
71 req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
72 if (!req)
73 return NULL;
75 memset(req, 0xaa, len);
77 req->size = len;
78 req->version = VMMDEV_REQUEST_HEADER_VERSION;
79 req->request_type = req_type;
80 req->rc = VERR_GENERAL_FAILURE;
81 req->reserved1 = 0;
82 req->requestor = requestor;
84 return req;
87 void vbg_req_free(void *req, size_t len)
89 if (!req)
90 return;
92 free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
95 /* Note this function returns a VBox status code, not a negative errno!! */
96 int vbg_req_perform(struct vbg_dev *gdev, void *req)
98 unsigned long phys_req = virt_to_phys(req);
100 outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
102 * The host changes the request as a result of the outl, make sure
103 * the outl and any reads of the req happen in the correct order.
105 mb();
107 return ((struct vmmdev_request_header *)req)->rc;
110 static bool hgcm_req_done(struct vbg_dev *gdev,
111 struct vmmdev_hgcmreq_header *header)
113 unsigned long flags;
114 bool done;
116 spin_lock_irqsave(&gdev->event_spinlock, flags);
117 done = header->flags & VMMDEV_HGCM_REQ_DONE;
118 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
120 return done;
123 int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
124 struct vmmdev_hgcm_service_location *loc,
125 u32 *client_id, int *vbox_status)
127 struct vmmdev_hgcm_connect *hgcm_connect = NULL;
128 int rc;
130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
131 VMMDEVREQ_HGCM_CONNECT, requestor);
132 if (!hgcm_connect)
133 return -ENOMEM;
135 hgcm_connect->header.flags = 0;
136 memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
137 hgcm_connect->client_id = 0;
139 rc = vbg_req_perform(gdev, hgcm_connect);
141 if (rc == VINF_HGCM_ASYNC_EXECUTE)
142 wait_event(gdev->hgcm_wq,
143 hgcm_req_done(gdev, &hgcm_connect->header));
145 if (rc >= 0) {
146 *client_id = hgcm_connect->client_id;
147 rc = hgcm_connect->header.result;
150 vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
152 *vbox_status = rc;
153 return 0;
155 EXPORT_SYMBOL(vbg_hgcm_connect);
157 int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
161 int rc;
163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
166 if (!hgcm_disconnect)
167 return -ENOMEM;
169 hgcm_disconnect->header.flags = 0;
170 hgcm_disconnect->client_id = client_id;
172 rc = vbg_req_perform(gdev, hgcm_disconnect);
174 if (rc == VINF_HGCM_ASYNC_EXECUTE)
175 wait_event(gdev->hgcm_wq,
176 hgcm_req_done(gdev, &hgcm_disconnect->header));
178 if (rc >= 0)
179 rc = hgcm_disconnect->header.result;
181 vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
183 *vbox_status = rc;
184 return 0;
186 EXPORT_SYMBOL(vbg_hgcm_disconnect);
188 static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
190 u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
192 return size >> PAGE_SHIFT;
195 static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
197 u32 page_count;
199 page_count = hgcm_call_buf_size_in_pages(buf, len);
200 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
203 static int hgcm_call_preprocess_linaddr(
204 const struct vmmdev_hgcm_function_parameter *src_parm,
205 void **bounce_buf_ret, size_t *extra)
207 void *buf, *bounce_buf;
208 bool copy_in;
209 u32 len;
210 int ret;
212 buf = (void *)src_parm->u.pointer.u.linear_addr;
213 len = src_parm->u.pointer.size;
214 copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
216 if (len > VBG_MAX_HGCM_USER_PARM)
217 return -E2BIG;
219 bounce_buf = kvmalloc(len, GFP_KERNEL);
220 if (!bounce_buf)
221 return -ENOMEM;
223 if (copy_in) {
224 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
225 if (ret)
226 return -EFAULT;
227 } else {
228 memset(bounce_buf, 0, len);
231 *bounce_buf_ret = bounce_buf;
232 hgcm_call_add_pagelist_size(bounce_buf, len, extra);
233 return 0;
237 * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
238 * figure out how much extra storage we need for page lists.
239 * Return: 0 or negative errno value.
240 * @src_parm: Pointer to source function call parameters
241 * @parm_count: Number of function call parameters.
242 * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
243 * @extra: Where to return the extra request space needed for
244 * physical page lists.
246 static int hgcm_call_preprocess(
247 const struct vmmdev_hgcm_function_parameter *src_parm,
248 u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
250 void *buf, **bounce_bufs = NULL;
251 u32 i, len;
252 int ret;
254 for (i = 0; i < parm_count; i++, src_parm++) {
255 switch (src_parm->type) {
256 case VMMDEV_HGCM_PARM_TYPE_32BIT:
257 case VMMDEV_HGCM_PARM_TYPE_64BIT:
258 break;
260 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
261 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
262 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
263 if (!bounce_bufs) {
264 bounce_bufs = kcalloc(parm_count,
265 sizeof(void *),
266 GFP_KERNEL);
267 if (!bounce_bufs)
268 return -ENOMEM;
270 *bounce_bufs_ret = bounce_bufs;
273 ret = hgcm_call_preprocess_linaddr(src_parm,
274 &bounce_bufs[i],
275 extra);
276 if (ret)
277 return ret;
279 break;
281 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
282 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
283 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
284 buf = (void *)src_parm->u.pointer.u.linear_addr;
285 len = src_parm->u.pointer.size;
286 if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
287 return -E2BIG;
289 hgcm_call_add_pagelist_size(buf, len, extra);
290 break;
292 default:
293 return -EINVAL;
297 return 0;
301 * Translates linear address types to page list direction flags.
303 * Return: page list flags.
304 * @type: The type.
306 static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
307 enum vmmdev_hgcm_function_parameter_type type)
309 switch (type) {
310 default:
311 WARN_ON(1);
312 /* Fall through */
313 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
314 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
315 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
317 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
318 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
319 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
321 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
322 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
323 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
327 static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
328 struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
329 enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
331 struct vmmdev_hgcm_pagelist *dst_pg_lst;
332 struct page *page;
333 bool is_vmalloc;
334 u32 i, page_count;
336 dst_parm->type = type;
338 if (len == 0) {
339 dst_parm->u.pointer.size = 0;
340 dst_parm->u.pointer.u.linear_addr = 0;
341 return;
344 dst_pg_lst = (void *)call + *off_extra;
345 page_count = hgcm_call_buf_size_in_pages(buf, len);
346 is_vmalloc = is_vmalloc_addr(buf);
348 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
349 dst_parm->u.page_list.size = len;
350 dst_parm->u.page_list.offset = *off_extra;
351 dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
352 dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
353 dst_pg_lst->page_count = page_count;
355 for (i = 0; i < page_count; i++) {
356 if (is_vmalloc)
357 page = vmalloc_to_page(buf);
358 else
359 page = virt_to_page(buf);
361 dst_pg_lst->pages[i] = page_to_phys(page);
362 buf += PAGE_SIZE;
365 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
369 * Initializes the call request that we're sending to the host.
370 * @call: The call to initialize.
371 * @client_id: The client ID of the caller.
372 * @function: The function number of the function to call.
373 * @src_parm: Pointer to source function call parameters.
374 * @parm_count: Number of function call parameters.
375 * @bounce_bufs: The bouncebuffer array.
377 static void hgcm_call_init_call(
378 struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
379 const struct vmmdev_hgcm_function_parameter *src_parm,
380 u32 parm_count, void **bounce_bufs)
382 struct vmmdev_hgcm_function_parameter *dst_parm =
383 VMMDEV_HGCM_CALL_PARMS(call);
384 u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
385 void *buf;
387 call->header.flags = 0;
388 call->header.result = VINF_SUCCESS;
389 call->client_id = client_id;
390 call->function = function;
391 call->parm_count = parm_count;
393 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
394 switch (src_parm->type) {
395 case VMMDEV_HGCM_PARM_TYPE_32BIT:
396 case VMMDEV_HGCM_PARM_TYPE_64BIT:
397 *dst_parm = *src_parm;
398 break;
400 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
401 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
402 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
403 hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
404 src_parm->u.pointer.size,
405 src_parm->type, &off_extra);
406 break;
408 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
409 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
410 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
411 buf = (void *)src_parm->u.pointer.u.linear_addr;
412 hgcm_call_init_linaddr(call, dst_parm, buf,
413 src_parm->u.pointer.size,
414 src_parm->type, &off_extra);
415 break;
417 default:
418 WARN_ON(1);
419 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
425 * Tries to cancel a pending HGCM call.
427 * Return: VBox status code
429 static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
431 int rc;
434 * We use a pre-allocated request for cancellations, which is
435 * protected by cancel_req_mutex. This means that all cancellations
436 * get serialized, this should be fine since they should be rare.
438 mutex_lock(&gdev->cancel_req_mutex);
439 gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
440 rc = vbg_req_perform(gdev, gdev->cancel_req);
441 mutex_unlock(&gdev->cancel_req_mutex);
443 if (rc == VERR_NOT_IMPLEMENTED) {
444 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
445 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
447 rc = vbg_req_perform(gdev, call);
448 if (rc == VERR_INVALID_PARAMETER)
449 rc = VERR_NOT_FOUND;
452 if (rc >= 0)
453 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
455 return rc;
459 * Performs the call and completion wait.
460 * Return: 0 or negative errno value.
461 * @gdev: The VBoxGuest device extension.
462 * @call: The call to execute.
463 * @timeout_ms: Timeout in ms.
464 * @leak_it: Where to return the leak it / free it, indicator.
465 * Cancellation fun.
467 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
468 u32 timeout_ms, bool *leak_it)
470 int rc, cancel_rc, ret;
471 long timeout;
473 *leak_it = false;
475 rc = vbg_req_perform(gdev, call);
478 * If the call failed, then pretend success. Upper layers will
479 * interpret the result code in the packet.
481 if (rc < 0) {
482 call->header.result = rc;
483 return 0;
486 if (rc != VINF_HGCM_ASYNC_EXECUTE)
487 return 0;
489 /* Host decided to process the request asynchronously, wait for it */
490 if (timeout_ms == U32_MAX)
491 timeout = MAX_SCHEDULE_TIMEOUT;
492 else
493 timeout = msecs_to_jiffies(timeout_ms);
495 timeout = wait_event_interruptible_timeout(
496 gdev->hgcm_wq,
497 hgcm_req_done(gdev, &call->header),
498 timeout);
500 /* timeout > 0 means hgcm_req_done has returned true, so success */
501 if (timeout > 0)
502 return 0;
504 if (timeout == 0)
505 ret = -ETIMEDOUT;
506 else
507 ret = -EINTR;
509 /* Cancel the request */
510 cancel_rc = hgcm_cancel_call(gdev, call);
511 if (cancel_rc >= 0)
512 return ret;
515 * Failed to cancel, this should mean that the cancel has lost the
516 * race with normal completion, wait while the host completes it.
518 if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
519 timeout = msecs_to_jiffies(500);
520 else
521 timeout = msecs_to_jiffies(2000);
523 timeout = wait_event_timeout(gdev->hgcm_wq,
524 hgcm_req_done(gdev, &call->header),
525 timeout);
527 if (WARN_ON(timeout == 0)) {
528 /* We really should never get here */
529 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
530 __func__);
531 *leak_it = true;
532 return ret;
535 /* The call has completed normally after all */
536 return 0;
540 * Copies the result of the call back to the caller info structure and user
541 * buffers.
542 * Return: 0 or negative errno value.
543 * @call: HGCM call request.
544 * @dst_parm: Pointer to function call parameters destination.
545 * @parm_count: Number of function call parameters.
546 * @bounce_bufs: The bouncebuffer array.
548 static int hgcm_call_copy_back_result(
549 const struct vmmdev_hgcm_call *call,
550 struct vmmdev_hgcm_function_parameter *dst_parm,
551 u32 parm_count, void **bounce_bufs)
553 const struct vmmdev_hgcm_function_parameter *src_parm =
554 VMMDEV_HGCM_CALL_PARMS(call);
555 void __user *p;
556 int ret;
557 u32 i;
559 /* Copy back parameters. */
560 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
561 switch (dst_parm->type) {
562 case VMMDEV_HGCM_PARM_TYPE_32BIT:
563 case VMMDEV_HGCM_PARM_TYPE_64BIT:
564 *dst_parm = *src_parm;
565 break;
567 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
568 dst_parm->u.page_list.size = src_parm->u.page_list.size;
569 break;
571 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
572 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
573 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
574 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
575 dst_parm->u.pointer.size = src_parm->u.pointer.size;
576 break;
578 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
579 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
580 dst_parm->u.pointer.size = src_parm->u.pointer.size;
582 p = (void __user *)dst_parm->u.pointer.u.linear_addr;
583 ret = copy_to_user(p, bounce_bufs[i],
584 min(src_parm->u.pointer.size,
585 dst_parm->u.pointer.size));
586 if (ret)
587 return -EFAULT;
588 break;
590 default:
591 WARN_ON(1);
592 return -EINVAL;
596 return 0;
599 int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
600 u32 function, u32 timeout_ms,
601 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
602 int *vbox_status)
604 struct vmmdev_hgcm_call *call;
605 void **bounce_bufs = NULL;
606 bool leak_it;
607 size_t size;
608 int i, ret;
610 size = sizeof(struct vmmdev_hgcm_call) +
611 parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
613 * Validate and buffer the parameters for the call. This also increases
614 * call_size with the amount of extra space needed for page lists.
616 ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
617 if (ret) {
618 /* Even on error bounce bufs may still have been allocated */
619 goto free_bounce_bufs;
622 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
623 if (!call) {
624 ret = -ENOMEM;
625 goto free_bounce_bufs;
628 hgcm_call_init_call(call, client_id, function, parms, parm_count,
629 bounce_bufs);
631 ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
632 if (ret == 0) {
633 *vbox_status = call->header.result;
634 ret = hgcm_call_copy_back_result(call, parms, parm_count,
635 bounce_bufs);
638 if (!leak_it)
639 vbg_req_free(call, size);
641 free_bounce_bufs:
642 if (bounce_bufs) {
643 for (i = 0; i < parm_count; i++)
644 kvfree(bounce_bufs[i]);
645 kfree(bounce_bufs);
648 return ret;
650 EXPORT_SYMBOL(vbg_hgcm_call);
652 #ifdef CONFIG_COMPAT
653 int vbg_hgcm_call32(
654 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
656 u32 parm_count, int *vbox_status)
658 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
659 u32 i, size;
660 int ret = 0;
662 /* KISS allocate a temporary request and convert the parameters. */
663 size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
664 parm64 = kzalloc(size, GFP_KERNEL);
665 if (!parm64)
666 return -ENOMEM;
668 for (i = 0; i < parm_count; i++) {
669 switch (parm32[i].type) {
670 case VMMDEV_HGCM_PARM_TYPE_32BIT:
671 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
672 parm64[i].u.value32 = parm32[i].u.value32;
673 break;
675 case VMMDEV_HGCM_PARM_TYPE_64BIT:
676 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
677 parm64[i].u.value64 = parm32[i].u.value64;
678 break;
680 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
681 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
682 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
683 parm64[i].type = parm32[i].type;
684 parm64[i].u.pointer.size = parm32[i].u.pointer.size;
685 parm64[i].u.pointer.u.linear_addr =
686 parm32[i].u.pointer.u.linear_addr;
687 break;
689 default:
690 ret = -EINVAL;
692 if (ret < 0)
693 goto out_free;
696 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
697 parm64, parm_count, vbox_status);
698 if (ret < 0)
699 goto out_free;
701 /* Copy back. */
702 for (i = 0; i < parm_count; i++, parm32++, parm64++) {
703 switch (parm64[i].type) {
704 case VMMDEV_HGCM_PARM_TYPE_32BIT:
705 parm32[i].u.value32 = parm64[i].u.value32;
706 break;
708 case VMMDEV_HGCM_PARM_TYPE_64BIT:
709 parm32[i].u.value64 = parm64[i].u.value64;
710 break;
712 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
713 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
714 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
715 parm32[i].u.pointer.size = parm64[i].u.pointer.size;
716 break;
718 default:
719 WARN_ON(1);
720 ret = -EINVAL;
724 out_free:
725 kfree(parm64);
726 return ret;
728 #endif
730 static const int vbg_status_code_to_errno_table[] = {
731 [-VERR_ACCESS_DENIED] = -EPERM,
732 [-VERR_FILE_NOT_FOUND] = -ENOENT,
733 [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
734 [-VERR_INTERRUPTED] = -EINTR,
735 [-VERR_DEV_IO_ERROR] = -EIO,
736 [-VERR_TOO_MUCH_DATA] = -E2BIG,
737 [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
738 [-VERR_INVALID_HANDLE] = -EBADF,
739 [-VERR_TRY_AGAIN] = -EAGAIN,
740 [-VERR_NO_MEMORY] = -ENOMEM,
741 [-VERR_INVALID_POINTER] = -EFAULT,
742 [-VERR_RESOURCE_BUSY] = -EBUSY,
743 [-VERR_ALREADY_EXISTS] = -EEXIST,
744 [-VERR_NOT_SAME_DEVICE] = -EXDEV,
745 [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
746 [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
747 [-VERR_INVALID_NAME] = -ENOENT,
748 [-VERR_IS_A_DIRECTORY] = -EISDIR,
749 [-VERR_INVALID_PARAMETER] = -EINVAL,
750 [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
751 [-VERR_INVALID_FUNCTION] = -ENOTTY,
752 [-VERR_SHARING_VIOLATION] = -ETXTBSY,
753 [-VERR_FILE_TOO_BIG] = -EFBIG,
754 [-VERR_DISK_FULL] = -ENOSPC,
755 [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
756 [-VERR_WRITE_PROTECT] = -EROFS,
757 [-VERR_BROKEN_PIPE] = -EPIPE,
758 [-VERR_DEADLOCK] = -EDEADLK,
759 [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
760 [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
761 [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
762 [-VERR_NOT_SUPPORTED] = -ENOSYS,
763 [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
764 [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
765 [-VERR_NO_MORE_FILES] = -ENODATA,
766 [-VERR_NO_DATA] = -ENODATA,
767 [-VERR_NET_NO_NETWORK] = -ENONET,
768 [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
769 [-VERR_NO_TRANSLATION] = -EILSEQ,
770 [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
771 [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
772 [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
773 [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
774 [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
775 [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
776 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
777 [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
778 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
779 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
780 [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
781 [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
782 [-VERR_NET_DOWN] = -ENETDOWN,
783 [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
784 [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
785 [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
786 [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
787 [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
788 [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
789 [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
790 [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
791 [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
792 [-VERR_TIMEOUT] = -ETIMEDOUT,
793 [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
794 [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
795 [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
796 [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
797 [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
798 [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
799 [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
802 int vbg_status_code_to_errno(int rc)
804 if (rc >= 0)
805 return 0;
807 rc = -rc;
808 if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
809 vbg_status_code_to_errno_table[rc] == 0) {
810 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
811 return -EPROTO;
814 return vbg_status_code_to_errno_table[rc];
816 EXPORT_SYMBOL(vbg_status_code_to_errno);