x86/topology: Fix function name in documentation
[cris-mirror.git] / drivers / virt / vboxguest / vboxguest_utils.c
blob0f0dab8023cf6dfd5cd47ecc507cae0a2df550d8
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3 * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
4 * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
6 * Copyright (C) 2006-2016 Oracle Corporation
7 */
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/module.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/vbox_err.h>
18 #include <linux/vbox_utils.h>
19 #include "vboxguest_core.h"
21 /* Get the pointer to the first parameter of a HGCM call request. */
22 #define VMMDEV_HGCM_CALL_PARMS(a) \
23 ((struct vmmdev_hgcm_function_parameter *)( \
24 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
26 /* The max parameter buffer size for a user request. */
27 #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
28 /* The max parameter buffer size for a kernel request. */
29 #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
31 #define VBG_DEBUG_PORT 0x504
33 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
34 static DEFINE_SPINLOCK(vbg_log_lock);
35 static char vbg_log_buf[128];
37 #define VBG_LOG(name, pr_func) \
38 void name(const char *fmt, ...) \
39 { \
40 unsigned long flags; \
41 va_list args; \
42 int i, count; \
44 va_start(args, fmt); \
45 spin_lock_irqsave(&vbg_log_lock, flags); \
47 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
48 for (i = 0; i < count; i++) \
49 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
51 pr_func("%s", vbg_log_buf); \
53 spin_unlock_irqrestore(&vbg_log_lock, flags); \
54 va_end(args); \
55 } \
56 EXPORT_SYMBOL(name)
58 VBG_LOG(vbg_info, pr_info);
59 VBG_LOG(vbg_warn, pr_warn);
60 VBG_LOG(vbg_err, pr_err);
61 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
62 VBG_LOG(vbg_debug, pr_debug);
63 #endif
65 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
67 struct vmmdev_request_header *req;
69 req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
70 if (!req)
71 return NULL;
73 memset(req, 0xaa, len);
75 req->size = len;
76 req->version = VMMDEV_REQUEST_HEADER_VERSION;
77 req->request_type = req_type;
78 req->rc = VERR_GENERAL_FAILURE;
79 req->reserved1 = 0;
80 req->reserved2 = 0;
82 return req;
85 /* Note this function returns a VBox status code, not a negative errno!! */
86 int vbg_req_perform(struct vbg_dev *gdev, void *req)
88 unsigned long phys_req = virt_to_phys(req);
90 outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
92 * The host changes the request as a result of the outl, make sure
93 * the outl and any reads of the req happen in the correct order.
95 mb();
97 return ((struct vmmdev_request_header *)req)->rc;
100 static bool hgcm_req_done(struct vbg_dev *gdev,
101 struct vmmdev_hgcmreq_header *header)
103 unsigned long flags;
104 bool done;
106 spin_lock_irqsave(&gdev->event_spinlock, flags);
107 done = header->flags & VMMDEV_HGCM_REQ_DONE;
108 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
110 return done;
113 int vbg_hgcm_connect(struct vbg_dev *gdev,
114 struct vmmdev_hgcm_service_location *loc,
115 u32 *client_id, int *vbox_status)
117 struct vmmdev_hgcm_connect *hgcm_connect = NULL;
118 int rc;
120 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
121 VMMDEVREQ_HGCM_CONNECT);
122 if (!hgcm_connect)
123 return -ENOMEM;
125 hgcm_connect->header.flags = 0;
126 memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
127 hgcm_connect->client_id = 0;
129 rc = vbg_req_perform(gdev, hgcm_connect);
131 if (rc == VINF_HGCM_ASYNC_EXECUTE)
132 wait_event(gdev->hgcm_wq,
133 hgcm_req_done(gdev, &hgcm_connect->header));
135 if (rc >= 0) {
136 *client_id = hgcm_connect->client_id;
137 rc = hgcm_connect->header.result;
140 kfree(hgcm_connect);
142 *vbox_status = rc;
143 return 0;
145 EXPORT_SYMBOL(vbg_hgcm_connect);
147 int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
149 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
150 int rc;
152 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
153 VMMDEVREQ_HGCM_DISCONNECT);
154 if (!hgcm_disconnect)
155 return -ENOMEM;
157 hgcm_disconnect->header.flags = 0;
158 hgcm_disconnect->client_id = client_id;
160 rc = vbg_req_perform(gdev, hgcm_disconnect);
162 if (rc == VINF_HGCM_ASYNC_EXECUTE)
163 wait_event(gdev->hgcm_wq,
164 hgcm_req_done(gdev, &hgcm_disconnect->header));
166 if (rc >= 0)
167 rc = hgcm_disconnect->header.result;
169 kfree(hgcm_disconnect);
171 *vbox_status = rc;
172 return 0;
174 EXPORT_SYMBOL(vbg_hgcm_disconnect);
176 static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
178 u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
180 return size >> PAGE_SHIFT;
183 static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
185 u32 page_count;
187 page_count = hgcm_call_buf_size_in_pages(buf, len);
188 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
191 static int hgcm_call_preprocess_linaddr(
192 const struct vmmdev_hgcm_function_parameter *src_parm,
193 void **bounce_buf_ret, size_t *extra)
195 void *buf, *bounce_buf;
196 bool copy_in;
197 u32 len;
198 int ret;
200 buf = (void *)src_parm->u.pointer.u.linear_addr;
201 len = src_parm->u.pointer.size;
202 copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
204 if (len > VBG_MAX_HGCM_USER_PARM)
205 return -E2BIG;
207 bounce_buf = kvmalloc(len, GFP_KERNEL);
208 if (!bounce_buf)
209 return -ENOMEM;
211 if (copy_in) {
212 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
213 if (ret)
214 return -EFAULT;
215 } else {
216 memset(bounce_buf, 0, len);
219 *bounce_buf_ret = bounce_buf;
220 hgcm_call_add_pagelist_size(bounce_buf, len, extra);
221 return 0;
225 * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
226 * figure out how much extra storage we need for page lists.
227 * Return: 0 or negative errno value.
228 * @src_parm: Pointer to source function call parameters
229 * @parm_count: Number of function call parameters.
230 * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
231 * @extra: Where to return the extra request space needed for
232 * physical page lists.
234 static int hgcm_call_preprocess(
235 const struct vmmdev_hgcm_function_parameter *src_parm,
236 u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
238 void *buf, **bounce_bufs = NULL;
239 u32 i, len;
240 int ret;
242 for (i = 0; i < parm_count; i++, src_parm++) {
243 switch (src_parm->type) {
244 case VMMDEV_HGCM_PARM_TYPE_32BIT:
245 case VMMDEV_HGCM_PARM_TYPE_64BIT:
246 break;
248 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
249 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
250 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
251 if (!bounce_bufs) {
252 bounce_bufs = kcalloc(parm_count,
253 sizeof(void *),
254 GFP_KERNEL);
255 if (!bounce_bufs)
256 return -ENOMEM;
258 *bounce_bufs_ret = bounce_bufs;
261 ret = hgcm_call_preprocess_linaddr(src_parm,
262 &bounce_bufs[i],
263 extra);
264 if (ret)
265 return ret;
267 break;
269 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
270 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
271 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
272 buf = (void *)src_parm->u.pointer.u.linear_addr;
273 len = src_parm->u.pointer.size;
274 if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
275 return -E2BIG;
277 hgcm_call_add_pagelist_size(buf, len, extra);
278 break;
280 default:
281 return -EINVAL;
285 return 0;
289 * Translates linear address types to page list direction flags.
291 * Return: page list flags.
292 * @type: The type.
294 static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
295 enum vmmdev_hgcm_function_parameter_type type)
297 switch (type) {
298 default:
299 WARN_ON(1);
300 /* Fall through */
301 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
302 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
303 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
305 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
306 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
307 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
309 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
310 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
311 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
315 static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
316 struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
317 enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
319 struct vmmdev_hgcm_pagelist *dst_pg_lst;
320 struct page *page;
321 bool is_vmalloc;
322 u32 i, page_count;
324 dst_parm->type = type;
326 if (len == 0) {
327 dst_parm->u.pointer.size = 0;
328 dst_parm->u.pointer.u.linear_addr = 0;
329 return;
332 dst_pg_lst = (void *)call + *off_extra;
333 page_count = hgcm_call_buf_size_in_pages(buf, len);
334 is_vmalloc = is_vmalloc_addr(buf);
336 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
337 dst_parm->u.page_list.size = len;
338 dst_parm->u.page_list.offset = *off_extra;
339 dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
340 dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
341 dst_pg_lst->page_count = page_count;
343 for (i = 0; i < page_count; i++) {
344 if (is_vmalloc)
345 page = vmalloc_to_page(buf);
346 else
347 page = virt_to_page(buf);
349 dst_pg_lst->pages[i] = page_to_phys(page);
350 buf += PAGE_SIZE;
353 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
357 * Initializes the call request that we're sending to the host.
358 * @call: The call to initialize.
359 * @client_id: The client ID of the caller.
360 * @function: The function number of the function to call.
361 * @src_parm: Pointer to source function call parameters.
362 * @parm_count: Number of function call parameters.
363 * @bounce_bufs: The bouncebuffer array.
365 static void hgcm_call_init_call(
366 struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
367 const struct vmmdev_hgcm_function_parameter *src_parm,
368 u32 parm_count, void **bounce_bufs)
370 struct vmmdev_hgcm_function_parameter *dst_parm =
371 VMMDEV_HGCM_CALL_PARMS(call);
372 u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
373 void *buf;
375 call->header.flags = 0;
376 call->header.result = VINF_SUCCESS;
377 call->client_id = client_id;
378 call->function = function;
379 call->parm_count = parm_count;
381 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
382 switch (src_parm->type) {
383 case VMMDEV_HGCM_PARM_TYPE_32BIT:
384 case VMMDEV_HGCM_PARM_TYPE_64BIT:
385 *dst_parm = *src_parm;
386 break;
388 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
389 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
390 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
391 hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
392 src_parm->u.pointer.size,
393 src_parm->type, &off_extra);
394 break;
396 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
397 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
398 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
399 buf = (void *)src_parm->u.pointer.u.linear_addr;
400 hgcm_call_init_linaddr(call, dst_parm, buf,
401 src_parm->u.pointer.size,
402 src_parm->type, &off_extra);
403 break;
405 default:
406 WARN_ON(1);
407 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
413 * Tries to cancel a pending HGCM call.
415 * Return: VBox status code
417 static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
419 int rc;
422 * We use a pre-allocated request for cancellations, which is
423 * protected by cancel_req_mutex. This means that all cancellations
424 * get serialized, this should be fine since they should be rare.
426 mutex_lock(&gdev->cancel_req_mutex);
427 gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
428 rc = vbg_req_perform(gdev, gdev->cancel_req);
429 mutex_unlock(&gdev->cancel_req_mutex);
431 if (rc == VERR_NOT_IMPLEMENTED) {
432 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
433 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
435 rc = vbg_req_perform(gdev, call);
436 if (rc == VERR_INVALID_PARAMETER)
437 rc = VERR_NOT_FOUND;
440 if (rc >= 0)
441 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
443 return rc;
447 * Performs the call and completion wait.
448 * Return: 0 or negative errno value.
449 * @gdev: The VBoxGuest device extension.
450 * @call: The call to execute.
451 * @timeout_ms: Timeout in ms.
452 * @leak_it: Where to return the leak it / free it, indicator.
453 * Cancellation fun.
455 static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
456 u32 timeout_ms, bool *leak_it)
458 int rc, cancel_rc, ret;
459 long timeout;
461 *leak_it = false;
463 rc = vbg_req_perform(gdev, call);
466 * If the call failed, then pretend success. Upper layers will
467 * interpret the result code in the packet.
469 if (rc < 0) {
470 call->header.result = rc;
471 return 0;
474 if (rc != VINF_HGCM_ASYNC_EXECUTE)
475 return 0;
477 /* Host decided to process the request asynchronously, wait for it */
478 if (timeout_ms == U32_MAX)
479 timeout = MAX_SCHEDULE_TIMEOUT;
480 else
481 timeout = msecs_to_jiffies(timeout_ms);
483 timeout = wait_event_interruptible_timeout(
484 gdev->hgcm_wq,
485 hgcm_req_done(gdev, &call->header),
486 timeout);
488 /* timeout > 0 means hgcm_req_done has returned true, so success */
489 if (timeout > 0)
490 return 0;
492 if (timeout == 0)
493 ret = -ETIMEDOUT;
494 else
495 ret = -EINTR;
497 /* Cancel the request */
498 cancel_rc = hgcm_cancel_call(gdev, call);
499 if (cancel_rc >= 0)
500 return ret;
503 * Failed to cancel, this should mean that the cancel has lost the
504 * race with normal completion, wait while the host completes it.
506 if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
507 timeout = msecs_to_jiffies(500);
508 else
509 timeout = msecs_to_jiffies(2000);
511 timeout = wait_event_timeout(gdev->hgcm_wq,
512 hgcm_req_done(gdev, &call->header),
513 timeout);
515 if (WARN_ON(timeout == 0)) {
516 /* We really should never get here */
517 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
518 __func__);
519 *leak_it = true;
520 return ret;
523 /* The call has completed normally after all */
524 return 0;
528 * Copies the result of the call back to the caller info structure and user
529 * buffers.
530 * Return: 0 or negative errno value.
531 * @call: HGCM call request.
532 * @dst_parm: Pointer to function call parameters destination.
533 * @parm_count: Number of function call parameters.
534 * @bounce_bufs: The bouncebuffer array.
536 static int hgcm_call_copy_back_result(
537 const struct vmmdev_hgcm_call *call,
538 struct vmmdev_hgcm_function_parameter *dst_parm,
539 u32 parm_count, void **bounce_bufs)
541 const struct vmmdev_hgcm_function_parameter *src_parm =
542 VMMDEV_HGCM_CALL_PARMS(call);
543 void __user *p;
544 int ret;
545 u32 i;
547 /* Copy back parameters. */
548 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
549 switch (dst_parm->type) {
550 case VMMDEV_HGCM_PARM_TYPE_32BIT:
551 case VMMDEV_HGCM_PARM_TYPE_64BIT:
552 *dst_parm = *src_parm;
553 break;
555 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
556 dst_parm->u.page_list.size = src_parm->u.page_list.size;
557 break;
559 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
560 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
561 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
562 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
563 dst_parm->u.pointer.size = src_parm->u.pointer.size;
564 break;
566 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
567 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
568 dst_parm->u.pointer.size = src_parm->u.pointer.size;
570 p = (void __user *)dst_parm->u.pointer.u.linear_addr;
571 ret = copy_to_user(p, bounce_bufs[i],
572 min(src_parm->u.pointer.size,
573 dst_parm->u.pointer.size));
574 if (ret)
575 return -EFAULT;
576 break;
578 default:
579 WARN_ON(1);
580 return -EINVAL;
584 return 0;
587 int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
588 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
589 u32 parm_count, int *vbox_status)
591 struct vmmdev_hgcm_call *call;
592 void **bounce_bufs = NULL;
593 bool leak_it;
594 size_t size;
595 int i, ret;
597 size = sizeof(struct vmmdev_hgcm_call) +
598 parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
600 * Validate and buffer the parameters for the call. This also increases
601 * call_size with the amount of extra space needed for page lists.
603 ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
604 if (ret) {
605 /* Even on error bounce bufs may still have been allocated */
606 goto free_bounce_bufs;
609 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
610 if (!call) {
611 ret = -ENOMEM;
612 goto free_bounce_bufs;
615 hgcm_call_init_call(call, client_id, function, parms, parm_count,
616 bounce_bufs);
618 ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
619 if (ret == 0) {
620 *vbox_status = call->header.result;
621 ret = hgcm_call_copy_back_result(call, parms, parm_count,
622 bounce_bufs);
625 if (!leak_it)
626 kfree(call);
628 free_bounce_bufs:
629 if (bounce_bufs) {
630 for (i = 0; i < parm_count; i++)
631 kvfree(bounce_bufs[i]);
632 kfree(bounce_bufs);
635 return ret;
637 EXPORT_SYMBOL(vbg_hgcm_call);
639 #ifdef CONFIG_COMPAT
640 int vbg_hgcm_call32(
641 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
642 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
643 int *vbox_status)
645 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
646 u32 i, size;
647 int ret = 0;
649 /* KISS allocate a temporary request and convert the parameters. */
650 size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
651 parm64 = kzalloc(size, GFP_KERNEL);
652 if (!parm64)
653 return -ENOMEM;
655 for (i = 0; i < parm_count; i++) {
656 switch (parm32[i].type) {
657 case VMMDEV_HGCM_PARM_TYPE_32BIT:
658 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
659 parm64[i].u.value32 = parm32[i].u.value32;
660 break;
662 case VMMDEV_HGCM_PARM_TYPE_64BIT:
663 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
664 parm64[i].u.value64 = parm32[i].u.value64;
665 break;
667 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
668 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
669 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
670 parm64[i].type = parm32[i].type;
671 parm64[i].u.pointer.size = parm32[i].u.pointer.size;
672 parm64[i].u.pointer.u.linear_addr =
673 parm32[i].u.pointer.u.linear_addr;
674 break;
676 default:
677 ret = -EINVAL;
679 if (ret < 0)
680 goto out_free;
683 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
684 parm64, parm_count, vbox_status);
685 if (ret < 0)
686 goto out_free;
688 /* Copy back. */
689 for (i = 0; i < parm_count; i++, parm32++, parm64++) {
690 switch (parm64[i].type) {
691 case VMMDEV_HGCM_PARM_TYPE_32BIT:
692 parm32[i].u.value32 = parm64[i].u.value32;
693 break;
695 case VMMDEV_HGCM_PARM_TYPE_64BIT:
696 parm32[i].u.value64 = parm64[i].u.value64;
697 break;
699 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
700 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
701 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
702 parm32[i].u.pointer.size = parm64[i].u.pointer.size;
703 break;
705 default:
706 WARN_ON(1);
707 ret = -EINVAL;
711 out_free:
712 kfree(parm64);
713 return ret;
715 #endif
717 static const int vbg_status_code_to_errno_table[] = {
718 [-VERR_ACCESS_DENIED] = -EPERM,
719 [-VERR_FILE_NOT_FOUND] = -ENOENT,
720 [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
721 [-VERR_INTERRUPTED] = -EINTR,
722 [-VERR_DEV_IO_ERROR] = -EIO,
723 [-VERR_TOO_MUCH_DATA] = -E2BIG,
724 [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
725 [-VERR_INVALID_HANDLE] = -EBADF,
726 [-VERR_TRY_AGAIN] = -EAGAIN,
727 [-VERR_NO_MEMORY] = -ENOMEM,
728 [-VERR_INVALID_POINTER] = -EFAULT,
729 [-VERR_RESOURCE_BUSY] = -EBUSY,
730 [-VERR_ALREADY_EXISTS] = -EEXIST,
731 [-VERR_NOT_SAME_DEVICE] = -EXDEV,
732 [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
733 [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
734 [-VERR_INVALID_NAME] = -ENOENT,
735 [-VERR_IS_A_DIRECTORY] = -EISDIR,
736 [-VERR_INVALID_PARAMETER] = -EINVAL,
737 [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
738 [-VERR_INVALID_FUNCTION] = -ENOTTY,
739 [-VERR_SHARING_VIOLATION] = -ETXTBSY,
740 [-VERR_FILE_TOO_BIG] = -EFBIG,
741 [-VERR_DISK_FULL] = -ENOSPC,
742 [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
743 [-VERR_WRITE_PROTECT] = -EROFS,
744 [-VERR_BROKEN_PIPE] = -EPIPE,
745 [-VERR_DEADLOCK] = -EDEADLK,
746 [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
747 [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
748 [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
749 [-VERR_NOT_SUPPORTED] = -ENOSYS,
750 [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
751 [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
752 [-VERR_NO_MORE_FILES] = -ENODATA,
753 [-VERR_NO_DATA] = -ENODATA,
754 [-VERR_NET_NO_NETWORK] = -ENONET,
755 [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
756 [-VERR_NO_TRANSLATION] = -EILSEQ,
757 [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
758 [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
759 [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
760 [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
761 [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
762 [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
763 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
764 [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
765 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
766 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
767 [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
768 [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
769 [-VERR_NET_DOWN] = -ENETDOWN,
770 [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
771 [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
772 [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
773 [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
774 [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
775 [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
776 [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
777 [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
778 [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
779 [-VERR_TIMEOUT] = -ETIMEDOUT,
780 [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
781 [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
782 [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
783 [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
784 [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
785 [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
786 [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
789 int vbg_status_code_to_errno(int rc)
791 if (rc >= 0)
792 return 0;
794 rc = -rc;
795 if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
796 vbg_status_code_to_errno_table[rc] == 0) {
797 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
798 return -EPROTO;
801 return vbg_status_code_to_errno_table[rc];
803 EXPORT_SYMBOL(vbg_status_code_to_errno);