1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
4 * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
6 * Copyright (C) 2006-2016 Oracle Corporation
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sizes.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/vbox_err.h>
18 #include <linux/vbox_utils.h>
19 #include "vboxguest_core.h"
21 /* Get the pointer to the first parameter of a HGCM call request. */
22 #define VMMDEV_HGCM_CALL_PARMS(a) \
23 ((struct vmmdev_hgcm_function_parameter *)( \
24 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
26 /* The max parameter buffer size for a user request. */
27 #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
28 /* The max parameter buffer size for a kernel request. */
29 #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
31 #define VBG_DEBUG_PORT 0x504
33 /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
34 static DEFINE_SPINLOCK(vbg_log_lock
);
35 static char vbg_log_buf
[128];
37 #define VBG_LOG(name, pr_func) \
38 void name(const char *fmt, ...) \
40 unsigned long flags; \
44 va_start(args, fmt); \
45 spin_lock_irqsave(&vbg_log_lock, flags); \
47 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
48 for (i = 0; i < count; i++) \
49 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
51 pr_func("%s", vbg_log_buf); \
53 spin_unlock_irqrestore(&vbg_log_lock, flags); \
58 VBG_LOG(vbg_info
, pr_info
);
59 VBG_LOG(vbg_warn
, pr_warn
);
60 VBG_LOG(vbg_err
, pr_err
);
61 #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
62 VBG_LOG(vbg_debug
, pr_debug
);
65 void *vbg_req_alloc(size_t len
, enum vmmdev_request_type req_type
)
67 struct vmmdev_request_header
*req
;
68 int order
= get_order(PAGE_ALIGN(len
));
70 req
= (void *)__get_free_pages(GFP_KERNEL
| GFP_DMA32
, order
);
74 memset(req
, 0xaa, len
);
77 req
->version
= VMMDEV_REQUEST_HEADER_VERSION
;
78 req
->request_type
= req_type
;
79 req
->rc
= VERR_GENERAL_FAILURE
;
86 void vbg_req_free(void *req
, size_t len
)
91 free_pages((unsigned long)req
, get_order(PAGE_ALIGN(len
)));
94 /* Note this function returns a VBox status code, not a negative errno!! */
95 int vbg_req_perform(struct vbg_dev
*gdev
, void *req
)
97 unsigned long phys_req
= virt_to_phys(req
);
99 outl(phys_req
, gdev
->io_port
+ VMMDEV_PORT_OFF_REQUEST
);
101 * The host changes the request as a result of the outl, make sure
102 * the outl and any reads of the req happen in the correct order.
106 return ((struct vmmdev_request_header
*)req
)->rc
;
109 static bool hgcm_req_done(struct vbg_dev
*gdev
,
110 struct vmmdev_hgcmreq_header
*header
)
115 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
116 done
= header
->flags
& VMMDEV_HGCM_REQ_DONE
;
117 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
122 int vbg_hgcm_connect(struct vbg_dev
*gdev
,
123 struct vmmdev_hgcm_service_location
*loc
,
124 u32
*client_id
, int *vbox_status
)
126 struct vmmdev_hgcm_connect
*hgcm_connect
= NULL
;
129 hgcm_connect
= vbg_req_alloc(sizeof(*hgcm_connect
),
130 VMMDEVREQ_HGCM_CONNECT
);
134 hgcm_connect
->header
.flags
= 0;
135 memcpy(&hgcm_connect
->loc
, loc
, sizeof(*loc
));
136 hgcm_connect
->client_id
= 0;
138 rc
= vbg_req_perform(gdev
, hgcm_connect
);
140 if (rc
== VINF_HGCM_ASYNC_EXECUTE
)
141 wait_event(gdev
->hgcm_wq
,
142 hgcm_req_done(gdev
, &hgcm_connect
->header
));
145 *client_id
= hgcm_connect
->client_id
;
146 rc
= hgcm_connect
->header
.result
;
149 vbg_req_free(hgcm_connect
, sizeof(*hgcm_connect
));
154 EXPORT_SYMBOL(vbg_hgcm_connect
);
156 int vbg_hgcm_disconnect(struct vbg_dev
*gdev
, u32 client_id
, int *vbox_status
)
158 struct vmmdev_hgcm_disconnect
*hgcm_disconnect
= NULL
;
161 hgcm_disconnect
= vbg_req_alloc(sizeof(*hgcm_disconnect
),
162 VMMDEVREQ_HGCM_DISCONNECT
);
163 if (!hgcm_disconnect
)
166 hgcm_disconnect
->header
.flags
= 0;
167 hgcm_disconnect
->client_id
= client_id
;
169 rc
= vbg_req_perform(gdev
, hgcm_disconnect
);
171 if (rc
== VINF_HGCM_ASYNC_EXECUTE
)
172 wait_event(gdev
->hgcm_wq
,
173 hgcm_req_done(gdev
, &hgcm_disconnect
->header
));
176 rc
= hgcm_disconnect
->header
.result
;
178 vbg_req_free(hgcm_disconnect
, sizeof(*hgcm_disconnect
));
183 EXPORT_SYMBOL(vbg_hgcm_disconnect
);
185 static u32
hgcm_call_buf_size_in_pages(void *buf
, u32 len
)
187 u32 size
= PAGE_ALIGN(len
+ ((unsigned long)buf
& ~PAGE_MASK
));
189 return size
>> PAGE_SHIFT
;
192 static void hgcm_call_add_pagelist_size(void *buf
, u32 len
, size_t *extra
)
196 page_count
= hgcm_call_buf_size_in_pages(buf
, len
);
197 *extra
+= offsetof(struct vmmdev_hgcm_pagelist
, pages
[page_count
]);
200 static int hgcm_call_preprocess_linaddr(
201 const struct vmmdev_hgcm_function_parameter
*src_parm
,
202 void **bounce_buf_ret
, size_t *extra
)
204 void *buf
, *bounce_buf
;
209 buf
= (void *)src_parm
->u
.pointer
.u
.linear_addr
;
210 len
= src_parm
->u
.pointer
.size
;
211 copy_in
= src_parm
->type
!= VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
;
213 if (len
> VBG_MAX_HGCM_USER_PARM
)
216 bounce_buf
= kvmalloc(len
, GFP_KERNEL
);
220 *bounce_buf_ret
= bounce_buf
;
223 ret
= copy_from_user(bounce_buf
, (void __user
*)buf
, len
);
227 memset(bounce_buf
, 0, len
);
230 hgcm_call_add_pagelist_size(bounce_buf
, len
, extra
);
235 * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
236 * figure out how much extra storage we need for page lists.
237 * Return: 0 or negative errno value.
238 * @src_parm: Pointer to source function call parameters
239 * @parm_count: Number of function call parameters.
240 * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
241 * @extra: Where to return the extra request space needed for
242 * physical page lists.
244 static int hgcm_call_preprocess(
245 const struct vmmdev_hgcm_function_parameter
*src_parm
,
246 u32 parm_count
, void ***bounce_bufs_ret
, size_t *extra
)
248 void *buf
, **bounce_bufs
= NULL
;
252 for (i
= 0; i
< parm_count
; i
++, src_parm
++) {
253 switch (src_parm
->type
) {
254 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
255 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
258 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
259 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
260 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
262 bounce_bufs
= kcalloc(parm_count
,
268 *bounce_bufs_ret
= bounce_bufs
;
271 ret
= hgcm_call_preprocess_linaddr(src_parm
,
279 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL
:
280 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN
:
281 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT
:
282 buf
= (void *)src_parm
->u
.pointer
.u
.linear_addr
;
283 len
= src_parm
->u
.pointer
.size
;
284 if (WARN_ON(len
> VBG_MAX_HGCM_KERNEL_PARM
))
287 hgcm_call_add_pagelist_size(buf
, len
, extra
);
299 * Translates linear address types to page list direction flags.
301 * Return: page list flags.
304 static u32
hgcm_call_linear_addr_type_to_pagelist_flags(
305 enum vmmdev_hgcm_function_parameter_type type
)
311 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
312 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL
:
313 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH
;
315 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
316 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN
:
317 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST
;
319 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
320 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT
:
321 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST
;
325 static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call
*call
,
326 struct vmmdev_hgcm_function_parameter
*dst_parm
, void *buf
, u32 len
,
327 enum vmmdev_hgcm_function_parameter_type type
, u32
*off_extra
)
329 struct vmmdev_hgcm_pagelist
*dst_pg_lst
;
334 dst_parm
->type
= type
;
337 dst_parm
->u
.pointer
.size
= 0;
338 dst_parm
->u
.pointer
.u
.linear_addr
= 0;
342 dst_pg_lst
= (void *)call
+ *off_extra
;
343 page_count
= hgcm_call_buf_size_in_pages(buf
, len
);
344 is_vmalloc
= is_vmalloc_addr(buf
);
346 dst_parm
->type
= VMMDEV_HGCM_PARM_TYPE_PAGELIST
;
347 dst_parm
->u
.page_list
.size
= len
;
348 dst_parm
->u
.page_list
.offset
= *off_extra
;
349 dst_pg_lst
->flags
= hgcm_call_linear_addr_type_to_pagelist_flags(type
);
350 dst_pg_lst
->offset_first_page
= (unsigned long)buf
& ~PAGE_MASK
;
351 dst_pg_lst
->page_count
= page_count
;
353 for (i
= 0; i
< page_count
; i
++) {
355 page
= vmalloc_to_page(buf
);
357 page
= virt_to_page(buf
);
359 dst_pg_lst
->pages
[i
] = page_to_phys(page
);
363 *off_extra
+= offsetof(struct vmmdev_hgcm_pagelist
, pages
[page_count
]);
367 * Initializes the call request that we're sending to the host.
368 * @call: The call to initialize.
369 * @client_id: The client ID of the caller.
370 * @function: The function number of the function to call.
371 * @src_parm: Pointer to source function call parameters.
372 * @parm_count: Number of function call parameters.
373 * @bounce_bufs: The bouncebuffer array.
375 static void hgcm_call_init_call(
376 struct vmmdev_hgcm_call
*call
, u32 client_id
, u32 function
,
377 const struct vmmdev_hgcm_function_parameter
*src_parm
,
378 u32 parm_count
, void **bounce_bufs
)
380 struct vmmdev_hgcm_function_parameter
*dst_parm
=
381 VMMDEV_HGCM_CALL_PARMS(call
);
382 u32 i
, off_extra
= (uintptr_t)(dst_parm
+ parm_count
) - (uintptr_t)call
;
385 call
->header
.flags
= 0;
386 call
->header
.result
= VINF_SUCCESS
;
387 call
->client_id
= client_id
;
388 call
->function
= function
;
389 call
->parm_count
= parm_count
;
391 for (i
= 0; i
< parm_count
; i
++, src_parm
++, dst_parm
++) {
392 switch (src_parm
->type
) {
393 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
394 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
395 *dst_parm
= *src_parm
;
398 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
399 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
400 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
401 hgcm_call_init_linaddr(call
, dst_parm
, bounce_bufs
[i
],
402 src_parm
->u
.pointer
.size
,
403 src_parm
->type
, &off_extra
);
406 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL
:
407 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN
:
408 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT
:
409 buf
= (void *)src_parm
->u
.pointer
.u
.linear_addr
;
410 hgcm_call_init_linaddr(call
, dst_parm
, buf
,
411 src_parm
->u
.pointer
.size
,
412 src_parm
->type
, &off_extra
);
417 dst_parm
->type
= VMMDEV_HGCM_PARM_TYPE_INVALID
;
423 * Tries to cancel a pending HGCM call.
425 * Return: VBox status code
427 static int hgcm_cancel_call(struct vbg_dev
*gdev
, struct vmmdev_hgcm_call
*call
)
432 * We use a pre-allocated request for cancellations, which is
433 * protected by cancel_req_mutex. This means that all cancellations
434 * get serialized, this should be fine since they should be rare.
436 mutex_lock(&gdev
->cancel_req_mutex
);
437 gdev
->cancel_req
->phys_req_to_cancel
= virt_to_phys(call
);
438 rc
= vbg_req_perform(gdev
, gdev
->cancel_req
);
439 mutex_unlock(&gdev
->cancel_req_mutex
);
441 if (rc
== VERR_NOT_IMPLEMENTED
) {
442 call
->header
.flags
|= VMMDEV_HGCM_REQ_CANCELLED
;
443 call
->header
.header
.request_type
= VMMDEVREQ_HGCM_CANCEL
;
445 rc
= vbg_req_perform(gdev
, call
);
446 if (rc
== VERR_INVALID_PARAMETER
)
451 call
->header
.flags
|= VMMDEV_HGCM_REQ_CANCELLED
;
457 * Performs the call and completion wait.
458 * Return: 0 or negative errno value.
459 * @gdev: The VBoxGuest device extension.
460 * @call: The call to execute.
461 * @timeout_ms: Timeout in ms.
462 * @leak_it: Where to return the leak it / free it, indicator.
465 static int vbg_hgcm_do_call(struct vbg_dev
*gdev
, struct vmmdev_hgcm_call
*call
,
466 u32 timeout_ms
, bool *leak_it
)
468 int rc
, cancel_rc
, ret
;
473 rc
= vbg_req_perform(gdev
, call
);
476 * If the call failed, then pretend success. Upper layers will
477 * interpret the result code in the packet.
480 call
->header
.result
= rc
;
484 if (rc
!= VINF_HGCM_ASYNC_EXECUTE
)
487 /* Host decided to process the request asynchronously, wait for it */
488 if (timeout_ms
== U32_MAX
)
489 timeout
= MAX_SCHEDULE_TIMEOUT
;
491 timeout
= msecs_to_jiffies(timeout_ms
);
493 timeout
= wait_event_interruptible_timeout(
495 hgcm_req_done(gdev
, &call
->header
),
498 /* timeout > 0 means hgcm_req_done has returned true, so success */
507 /* Cancel the request */
508 cancel_rc
= hgcm_cancel_call(gdev
, call
);
513 * Failed to cancel, this should mean that the cancel has lost the
514 * race with normal completion, wait while the host completes it.
516 if (cancel_rc
== VERR_NOT_FOUND
|| cancel_rc
== VERR_SEM_DESTROYED
)
517 timeout
= msecs_to_jiffies(500);
519 timeout
= msecs_to_jiffies(2000);
521 timeout
= wait_event_timeout(gdev
->hgcm_wq
,
522 hgcm_req_done(gdev
, &call
->header
),
525 if (WARN_ON(timeout
== 0)) {
526 /* We really should never get here */
527 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
533 /* The call has completed normally after all */
538 * Copies the result of the call back to the caller info structure and user
540 * Return: 0 or negative errno value.
541 * @call: HGCM call request.
542 * @dst_parm: Pointer to function call parameters destination.
543 * @parm_count: Number of function call parameters.
544 * @bounce_bufs: The bouncebuffer array.
546 static int hgcm_call_copy_back_result(
547 const struct vmmdev_hgcm_call
*call
,
548 struct vmmdev_hgcm_function_parameter
*dst_parm
,
549 u32 parm_count
, void **bounce_bufs
)
551 const struct vmmdev_hgcm_function_parameter
*src_parm
=
552 VMMDEV_HGCM_CALL_PARMS(call
);
557 /* Copy back parameters. */
558 for (i
= 0; i
< parm_count
; i
++, src_parm
++, dst_parm
++) {
559 switch (dst_parm
->type
) {
560 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
561 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
562 *dst_parm
= *src_parm
;
565 case VMMDEV_HGCM_PARM_TYPE_PAGELIST
:
566 dst_parm
->u
.page_list
.size
= src_parm
->u
.page_list
.size
;
569 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
570 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL
:
571 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN
:
572 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT
:
573 dst_parm
->u
.pointer
.size
= src_parm
->u
.pointer
.size
;
576 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
577 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
578 dst_parm
->u
.pointer
.size
= src_parm
->u
.pointer
.size
;
580 p
= (void __user
*)dst_parm
->u
.pointer
.u
.linear_addr
;
581 ret
= copy_to_user(p
, bounce_bufs
[i
],
582 min(src_parm
->u
.pointer
.size
,
583 dst_parm
->u
.pointer
.size
));
597 int vbg_hgcm_call(struct vbg_dev
*gdev
, u32 client_id
, u32 function
,
598 u32 timeout_ms
, struct vmmdev_hgcm_function_parameter
*parms
,
599 u32 parm_count
, int *vbox_status
)
601 struct vmmdev_hgcm_call
*call
;
602 void **bounce_bufs
= NULL
;
607 size
= sizeof(struct vmmdev_hgcm_call
) +
608 parm_count
* sizeof(struct vmmdev_hgcm_function_parameter
);
610 * Validate and buffer the parameters for the call. This also increases
611 * call_size with the amount of extra space needed for page lists.
613 ret
= hgcm_call_preprocess(parms
, parm_count
, &bounce_bufs
, &size
);
615 /* Even on error bounce bufs may still have been allocated */
616 goto free_bounce_bufs
;
619 call
= vbg_req_alloc(size
, VMMDEVREQ_HGCM_CALL
);
622 goto free_bounce_bufs
;
625 hgcm_call_init_call(call
, client_id
, function
, parms
, parm_count
,
628 ret
= vbg_hgcm_do_call(gdev
, call
, timeout_ms
, &leak_it
);
630 *vbox_status
= call
->header
.result
;
631 ret
= hgcm_call_copy_back_result(call
, parms
, parm_count
,
636 vbg_req_free(call
, size
);
640 for (i
= 0; i
< parm_count
; i
++)
641 kvfree(bounce_bufs
[i
]);
647 EXPORT_SYMBOL(vbg_hgcm_call
);
651 struct vbg_dev
*gdev
, u32 client_id
, u32 function
, u32 timeout_ms
,
652 struct vmmdev_hgcm_function_parameter32
*parm32
, u32 parm_count
,
655 struct vmmdev_hgcm_function_parameter
*parm64
= NULL
;
659 /* KISS allocate a temporary request and convert the parameters. */
660 size
= parm_count
* sizeof(struct vmmdev_hgcm_function_parameter
);
661 parm64
= kzalloc(size
, GFP_KERNEL
);
665 for (i
= 0; i
< parm_count
; i
++) {
666 switch (parm32
[i
].type
) {
667 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
668 parm64
[i
].type
= VMMDEV_HGCM_PARM_TYPE_32BIT
;
669 parm64
[i
].u
.value32
= parm32
[i
].u
.value32
;
672 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
673 parm64
[i
].type
= VMMDEV_HGCM_PARM_TYPE_64BIT
;
674 parm64
[i
].u
.value64
= parm32
[i
].u
.value64
;
677 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
678 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
679 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
680 parm64
[i
].type
= parm32
[i
].type
;
681 parm64
[i
].u
.pointer
.size
= parm32
[i
].u
.pointer
.size
;
682 parm64
[i
].u
.pointer
.u
.linear_addr
=
683 parm32
[i
].u
.pointer
.u
.linear_addr
;
693 ret
= vbg_hgcm_call(gdev
, client_id
, function
, timeout_ms
,
694 parm64
, parm_count
, vbox_status
);
699 for (i
= 0; i
< parm_count
; i
++, parm32
++, parm64
++) {
700 switch (parm64
[i
].type
) {
701 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
702 parm32
[i
].u
.value32
= parm64
[i
].u
.value32
;
705 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
706 parm32
[i
].u
.value64
= parm64
[i
].u
.value64
;
709 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
710 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
711 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
712 parm32
[i
].u
.pointer
.size
= parm64
[i
].u
.pointer
.size
;
727 static const int vbg_status_code_to_errno_table
[] = {
728 [-VERR_ACCESS_DENIED
] = -EPERM
,
729 [-VERR_FILE_NOT_FOUND
] = -ENOENT
,
730 [-VERR_PROCESS_NOT_FOUND
] = -ESRCH
,
731 [-VERR_INTERRUPTED
] = -EINTR
,
732 [-VERR_DEV_IO_ERROR
] = -EIO
,
733 [-VERR_TOO_MUCH_DATA
] = -E2BIG
,
734 [-VERR_BAD_EXE_FORMAT
] = -ENOEXEC
,
735 [-VERR_INVALID_HANDLE
] = -EBADF
,
736 [-VERR_TRY_AGAIN
] = -EAGAIN
,
737 [-VERR_NO_MEMORY
] = -ENOMEM
,
738 [-VERR_INVALID_POINTER
] = -EFAULT
,
739 [-VERR_RESOURCE_BUSY
] = -EBUSY
,
740 [-VERR_ALREADY_EXISTS
] = -EEXIST
,
741 [-VERR_NOT_SAME_DEVICE
] = -EXDEV
,
742 [-VERR_NOT_A_DIRECTORY
] = -ENOTDIR
,
743 [-VERR_PATH_NOT_FOUND
] = -ENOTDIR
,
744 [-VERR_INVALID_NAME
] = -ENOENT
,
745 [-VERR_IS_A_DIRECTORY
] = -EISDIR
,
746 [-VERR_INVALID_PARAMETER
] = -EINVAL
,
747 [-VERR_TOO_MANY_OPEN_FILES
] = -ENFILE
,
748 [-VERR_INVALID_FUNCTION
] = -ENOTTY
,
749 [-VERR_SHARING_VIOLATION
] = -ETXTBSY
,
750 [-VERR_FILE_TOO_BIG
] = -EFBIG
,
751 [-VERR_DISK_FULL
] = -ENOSPC
,
752 [-VERR_SEEK_ON_DEVICE
] = -ESPIPE
,
753 [-VERR_WRITE_PROTECT
] = -EROFS
,
754 [-VERR_BROKEN_PIPE
] = -EPIPE
,
755 [-VERR_DEADLOCK
] = -EDEADLK
,
756 [-VERR_FILENAME_TOO_LONG
] = -ENAMETOOLONG
,
757 [-VERR_FILE_LOCK_FAILED
] = -ENOLCK
,
758 [-VERR_NOT_IMPLEMENTED
] = -ENOSYS
,
759 [-VERR_NOT_SUPPORTED
] = -ENOSYS
,
760 [-VERR_DIR_NOT_EMPTY
] = -ENOTEMPTY
,
761 [-VERR_TOO_MANY_SYMLINKS
] = -ELOOP
,
762 [-VERR_NO_MORE_FILES
] = -ENODATA
,
763 [-VERR_NO_DATA
] = -ENODATA
,
764 [-VERR_NET_NO_NETWORK
] = -ENONET
,
765 [-VERR_NET_NOT_UNIQUE_NAME
] = -ENOTUNIQ
,
766 [-VERR_NO_TRANSLATION
] = -EILSEQ
,
767 [-VERR_NET_NOT_SOCKET
] = -ENOTSOCK
,
768 [-VERR_NET_DEST_ADDRESS_REQUIRED
] = -EDESTADDRREQ
,
769 [-VERR_NET_MSG_SIZE
] = -EMSGSIZE
,
770 [-VERR_NET_PROTOCOL_TYPE
] = -EPROTOTYPE
,
771 [-VERR_NET_PROTOCOL_NOT_AVAILABLE
] = -ENOPROTOOPT
,
772 [-VERR_NET_PROTOCOL_NOT_SUPPORTED
] = -EPROTONOSUPPORT
,
773 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED
] = -ESOCKTNOSUPPORT
,
774 [-VERR_NET_OPERATION_NOT_SUPPORTED
] = -EOPNOTSUPP
,
775 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED
] = -EPFNOSUPPORT
,
776 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED
] = -EAFNOSUPPORT
,
777 [-VERR_NET_ADDRESS_IN_USE
] = -EADDRINUSE
,
778 [-VERR_NET_ADDRESS_NOT_AVAILABLE
] = -EADDRNOTAVAIL
,
779 [-VERR_NET_DOWN
] = -ENETDOWN
,
780 [-VERR_NET_UNREACHABLE
] = -ENETUNREACH
,
781 [-VERR_NET_CONNECTION_RESET
] = -ENETRESET
,
782 [-VERR_NET_CONNECTION_ABORTED
] = -ECONNABORTED
,
783 [-VERR_NET_CONNECTION_RESET_BY_PEER
] = -ECONNRESET
,
784 [-VERR_NET_NO_BUFFER_SPACE
] = -ENOBUFS
,
785 [-VERR_NET_ALREADY_CONNECTED
] = -EISCONN
,
786 [-VERR_NET_NOT_CONNECTED
] = -ENOTCONN
,
787 [-VERR_NET_SHUTDOWN
] = -ESHUTDOWN
,
788 [-VERR_NET_TOO_MANY_REFERENCES
] = -ETOOMANYREFS
,
789 [-VERR_TIMEOUT
] = -ETIMEDOUT
,
790 [-VERR_NET_CONNECTION_REFUSED
] = -ECONNREFUSED
,
791 [-VERR_NET_HOST_DOWN
] = -EHOSTDOWN
,
792 [-VERR_NET_HOST_UNREACHABLE
] = -EHOSTUNREACH
,
793 [-VERR_NET_ALREADY_IN_PROGRESS
] = -EALREADY
,
794 [-VERR_NET_IN_PROGRESS
] = -EINPROGRESS
,
795 [-VERR_MEDIA_NOT_PRESENT
] = -ENOMEDIUM
,
796 [-VERR_MEDIA_NOT_RECOGNIZED
] = -EMEDIUMTYPE
,
799 int vbg_status_code_to_errno(int rc
)
805 if (rc
>= ARRAY_SIZE(vbg_status_code_to_errno_table
) ||
806 vbg_status_code_to_errno_table
[rc
] == 0) {
807 vbg_warn("%s: Unhandled err %d\n", __func__
, -rc
);
811 return vbg_status_code_to_errno_table
[rc
];
813 EXPORT_SYMBOL(vbg_status_code_to_errno
);