1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
30 #define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
35 * Reserves memory in which the VMM can relocate any guest mappings
36 * that are floating around.
38 * This operation is a little bit tricky since the VMM might not accept
39 * just any address because of address clashes between the three contexts
40 * it operates in, so we try several times.
42 * Failure to reserve the guest mappings is ignored.
44 * @gdev: The Guest extension device.
46 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
48 struct vmmdev_hypervisorinfo
*req
;
49 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
50 struct page
**pages
= NULL
;
51 u32 size
, hypervisor_size
;
54 /* Query the required space. */
55 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
,
60 req
->hypervisor_start
= 0;
61 req
->hypervisor_size
= 0;
62 rc
= vbg_req_perform(gdev
, req
);
67 * The VMM will report back if there is nothing it wants to map, like
68 * for instance in VT-x and AMD-V mode.
70 if (req
->hypervisor_size
== 0)
73 hypervisor_size
= req
->hypervisor_size
;
74 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
75 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
77 pages
= kmalloc_array(size
>> PAGE_SHIFT
, sizeof(*pages
), GFP_KERNEL
);
81 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
82 if (!gdev
->guest_mappings_dummy_page
)
85 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
86 pages
[i
] = gdev
->guest_mappings_dummy_page
;
89 * Try several times, the VMM might not accept some addresses because
90 * of address clashes between the three contexts.
92 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
93 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
94 VM_MAP
, PAGE_KERNEL_RO
);
95 if (!guest_mappings
[i
])
98 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
99 req
->header
.rc
= VERR_INTERNAL_ERROR
;
100 req
->hypervisor_size
= hypervisor_size
;
101 req
->hypervisor_start
=
102 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
104 rc
= vbg_req_perform(gdev
, req
);
106 gdev
->guest_mappings
= guest_mappings
[i
];
111 /* Free vmap's from failed attempts. */
113 vunmap(guest_mappings
[i
]);
115 /* On failure free the dummy-page backing the vmap */
116 if (!gdev
->guest_mappings
) {
117 __free_page(gdev
->guest_mappings_dummy_page
);
118 gdev
->guest_mappings_dummy_page
= NULL
;
122 vbg_req_free(req
, sizeof(*req
));
127 * Undo what vbg_guest_mappings_init did.
129 * @gdev: The Guest extension device.
131 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
133 struct vmmdev_hypervisorinfo
*req
;
136 if (!gdev
->guest_mappings
)
140 * Tell the host that we're going to free the memory we reserved for
141 * it, the free it up. (Leak the memory if anything goes wrong here.)
143 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
,
148 req
->hypervisor_start
= 0;
149 req
->hypervisor_size
= 0;
151 rc
= vbg_req_perform(gdev
, req
);
153 vbg_req_free(req
, sizeof(*req
));
156 vbg_err("%s error: %d\n", __func__
, rc
);
160 vunmap(gdev
->guest_mappings
);
161 gdev
->guest_mappings
= NULL
;
163 __free_page(gdev
->guest_mappings_dummy_page
);
164 gdev
->guest_mappings_dummy_page
= NULL
;
168 * Report the guest information to the host.
169 * Return: 0 or negative errno value.
170 * @gdev: The Guest extension device.
172 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
175 * Allocate and fill in the two guest info reports.
177 struct vmmdev_guest_info
*req1
= NULL
;
178 struct vmmdev_guest_info2
*req2
= NULL
;
179 int rc
, ret
= -ENOMEM
;
181 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
,
183 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
,
188 req1
->interface_version
= VMMDEV_VERSION
;
189 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
190 #if __BITS_PER_LONG == 64
191 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
194 req2
->additions_major
= VBG_VERSION_MAJOR
;
195 req2
->additions_minor
= VBG_VERSION_MINOR
;
196 req2
->additions_build
= VBG_VERSION_BUILD
;
197 req2
->additions_revision
= VBG_SVN_REV
;
198 req2
->additions_features
=
199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO
;
200 strlcpy(req2
->name
, VBG_VERSION_STRING
,
204 * There are two protocols here:
205 * 1. INFO2 + INFO1. Supported by >=3.2.51.
206 * 2. INFO1 and optionally INFO2. The old protocol.
208 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
209 * if not supported by the VMMDev (message ordering requirement).
211 rc
= vbg_req_perform(gdev
, req2
);
213 rc
= vbg_req_perform(gdev
, req1
);
214 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
215 rc
= vbg_req_perform(gdev
, req1
);
217 rc
= vbg_req_perform(gdev
, req2
);
218 if (rc
== VERR_NOT_IMPLEMENTED
)
222 ret
= vbg_status_code_to_errno(rc
);
225 vbg_req_free(req2
, sizeof(*req2
));
226 vbg_req_free(req1
, sizeof(*req1
));
231 * Report the guest driver status to the host.
232 * Return: 0 or negative errno value.
233 * @gdev: The Guest extension device.
234 * @active: Flag whether the driver is now active or not.
236 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
238 struct vmmdev_guest_status
*req
;
241 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
,
246 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
248 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
250 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
253 rc
= vbg_req_perform(gdev
, req
);
254 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
257 vbg_req_free(req
, sizeof(*req
));
259 return vbg_status_code_to_errno(rc
);
263 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
264 * Return: 0 or negative errno value.
265 * @gdev: The Guest extension device.
266 * @chunk_idx: Index of the chunk.
268 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
270 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
274 pages
= kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
276 GFP_KERNEL
| __GFP_NOWARN
);
280 req
->header
.size
= sizeof(*req
);
282 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
284 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
285 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
291 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
294 rc
= vbg_req_perform(gdev
, req
);
296 vbg_err("%s error, rc: %d\n", __func__
, rc
);
297 ret
= vbg_status_code_to_errno(rc
);
301 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
307 __free_page(pages
[i
]);
314 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
315 * Return: 0 or negative errno value.
316 * @gdev: The Guest extension device.
317 * @chunk_idx: Index of the chunk.
319 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
321 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
322 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
325 req
->header
.size
= sizeof(*req
);
326 req
->inflate
= false;
327 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
329 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
330 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
332 rc
= vbg_req_perform(gdev
, req
);
334 vbg_err("%s error, rc: %d\n", __func__
, rc
);
335 return vbg_status_code_to_errno(rc
);
338 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
339 __free_page(pages
[i
]);
341 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
347 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
348 * the host wants the balloon to be and adjust accordingly.
350 static void vbg_balloon_work(struct work_struct
*work
)
352 struct vbg_dev
*gdev
=
353 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
354 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
359 * Setting this bit means that we request the value from the host and
360 * change the guest memory balloon according to the returned value.
362 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
363 rc
= vbg_req_perform(gdev
, req
);
365 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
370 * The host always returns the same maximum amount of chunks, so
373 if (!gdev
->mem_balloon
.max_chunks
) {
374 gdev
->mem_balloon
.pages
=
375 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
376 sizeof(struct page
**), GFP_KERNEL
);
377 if (!gdev
->mem_balloon
.pages
)
380 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
383 chunks
= req
->balloon_chunks
;
384 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
385 vbg_err("%s: illegal balloon size %u (max=%u)\n",
386 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
390 if (chunks
> gdev
->mem_balloon
.chunks
) {
392 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
393 ret
= vbg_balloon_inflate(gdev
, i
);
397 gdev
->mem_balloon
.chunks
++;
401 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
402 ret
= vbg_balloon_deflate(gdev
, i
);
406 gdev
->mem_balloon
.chunks
--;
412 * Callback for heartbeat timer.
414 static void vbg_heartbeat_timer(struct timer_list
*t
)
416 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
418 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
419 mod_timer(&gdev
->heartbeat_timer
,
420 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
424 * Configure the host to check guest's heartbeat
425 * and get heartbeat interval from the host.
426 * Return: 0 or negative errno value.
427 * @gdev: The Guest extension device.
428 * @enabled: Set true to enable guest heartbeat checks on host.
430 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
432 struct vmmdev_heartbeat
*req
;
435 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
,
440 req
->enabled
= enabled
;
441 req
->interval_ns
= 0;
442 rc
= vbg_req_perform(gdev
, req
);
443 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
444 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
445 vbg_req_free(req
, sizeof(*req
));
447 return vbg_status_code_to_errno(rc
);
451 * Initializes the heartbeat timer. This feature may be disabled by the host.
452 * Return: 0 or negative errno value.
453 * @gdev: The Guest extension device.
455 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
459 /* Make sure that heartbeat checking is disabled if we fail. */
460 ret
= vbg_heartbeat_host_config(gdev
, false);
464 ret
= vbg_heartbeat_host_config(gdev
, true);
468 gdev
->guest_heartbeat_req
= vbg_req_alloc(
469 sizeof(*gdev
->guest_heartbeat_req
),
470 VMMDEVREQ_GUEST_HEARTBEAT
,
472 if (!gdev
->guest_heartbeat_req
)
475 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
476 __func__
, gdev
->heartbeat_interval_ms
);
477 mod_timer(&gdev
->heartbeat_timer
, 0);
483 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
484 * @gdev: The Guest extension device.
486 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
488 del_timer_sync(&gdev
->heartbeat_timer
);
489 vbg_heartbeat_host_config(gdev
, false);
490 vbg_req_free(gdev
->guest_heartbeat_req
,
491 sizeof(*gdev
->guest_heartbeat_req
));
495 * Applies a change to the bit usage tracker.
496 * Return: true if the mask changed, false if not.
497 * @tracker: The bit usage tracker.
498 * @changed: The bits to change.
499 * @previous: The previous value of the bits.
501 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
502 u32 changed
, u32 previous
)
504 bool global_change
= false;
507 u32 bit
= ffs(changed
) - 1;
508 u32 bitmask
= BIT(bit
);
510 if (bitmask
& previous
) {
511 tracker
->per_bit_usage
[bit
] -= 1;
512 if (tracker
->per_bit_usage
[bit
] == 0) {
513 global_change
= true;
514 tracker
->mask
&= ~bitmask
;
517 tracker
->per_bit_usage
[bit
] += 1;
518 if (tracker
->per_bit_usage
[bit
] == 1) {
519 global_change
= true;
520 tracker
->mask
|= bitmask
;
527 return global_change
;
531 * Init and termination worker for resetting the (host) event filter on the host
532 * Return: 0 or negative errno value.
533 * @gdev: The Guest extension device.
534 * @fixed_events: Fixed events (init time).
536 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
539 struct vmmdev_mask
*req
;
542 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
547 req
->not_mask
= U32_MAX
& ~fixed_events
;
548 req
->or_mask
= fixed_events
;
549 rc
= vbg_req_perform(gdev
, req
);
551 vbg_err("%s error, rc: %d\n", __func__
, rc
);
553 vbg_req_free(req
, sizeof(*req
));
554 return vbg_status_code_to_errno(rc
);
558 * Changes the event filter mask for the given session.
560 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
561 * do session cleanup. Takes the session spinlock.
563 * Return: 0 or negative errno value.
564 * @gdev: The Guest extension device.
565 * @session: The session.
566 * @or_mask: The events to add.
567 * @not_mask: The events to remove.
568 * @session_termination: Set if we're called by the session cleanup code.
569 * This tweaks the error handling so we perform
570 * proper session cleanup even if the host
573 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
574 struct vbg_session
*session
,
575 u32 or_mask
, u32 not_mask
,
576 bool session_termination
)
578 struct vmmdev_mask
*req
;
579 u32 changed
, previous
;
583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
587 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
588 session_termination
? VBG_KERNEL_REQUEST
:
591 if (!session_termination
)
593 /* Ignore allocation failure, we must do session cleanup. */
596 mutex_lock(&gdev
->session_mutex
);
598 /* Apply the changes to the session mask. */
599 previous
= session
->event_filter
;
600 session
->event_filter
|= or_mask
;
601 session
->event_filter
&= ~not_mask
;
603 /* If anything actually changed, update the global usage counters. */
604 changed
= previous
^ session
->event_filter
;
608 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
609 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
611 if (gdev
->event_filter_host
== or_mask
|| !req
)
614 gdev
->event_filter_host
= or_mask
;
615 req
->or_mask
= or_mask
;
616 req
->not_mask
= ~or_mask
;
617 rc
= vbg_req_perform(gdev
, req
);
619 ret
= vbg_status_code_to_errno(rc
);
621 /* Failed, roll back (unless it's session termination time). */
622 gdev
->event_filter_host
= U32_MAX
;
623 if (session_termination
)
626 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
627 session
->event_filter
);
628 session
->event_filter
= previous
;
632 mutex_unlock(&gdev
->session_mutex
);
633 vbg_req_free(req
, sizeof(*req
));
639 * Init and termination worker for set guest capabilities to zero on the host.
640 * Return: 0 or negative errno value.
641 * @gdev: The Guest extension device.
643 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
645 struct vmmdev_mask
*req
;
648 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
653 req
->not_mask
= U32_MAX
;
655 rc
= vbg_req_perform(gdev
, req
);
657 vbg_err("%s error, rc: %d\n", __func__
, rc
);
659 vbg_req_free(req
, sizeof(*req
));
660 return vbg_status_code_to_errno(rc
);
664 * Sets the guest capabilities for a session. Takes the session spinlock.
665 * Return: 0 or negative errno value.
666 * @gdev: The Guest extension device.
667 * @session: The session.
668 * @or_mask: The capabilities to add.
669 * @not_mask: The capabilities to remove.
670 * @session_termination: Set if we're called by the session cleanup code.
671 * This tweaks the error handling so we perform
672 * proper session cleanup even if the host
675 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
676 struct vbg_session
*session
,
677 u32 or_mask
, u32 not_mask
,
678 bool session_termination
)
680 struct vmmdev_mask
*req
;
681 u32 changed
, previous
;
685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
689 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
690 session_termination
? VBG_KERNEL_REQUEST
:
693 if (!session_termination
)
695 /* Ignore allocation failure, we must do session cleanup. */
698 mutex_lock(&gdev
->session_mutex
);
700 /* Apply the changes to the session mask. */
701 previous
= session
->guest_caps
;
702 session
->guest_caps
|= or_mask
;
703 session
->guest_caps
&= ~not_mask
;
705 /* If anything actually changed, update the global usage counters. */
706 changed
= previous
^ session
->guest_caps
;
710 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
, previous
);
711 or_mask
= gdev
->guest_caps_tracker
.mask
;
713 if (gdev
->guest_caps_host
== or_mask
|| !req
)
716 gdev
->guest_caps_host
= or_mask
;
717 req
->or_mask
= or_mask
;
718 req
->not_mask
= ~or_mask
;
719 rc
= vbg_req_perform(gdev
, req
);
721 ret
= vbg_status_code_to_errno(rc
);
723 /* Failed, roll back (unless it's session termination time). */
724 gdev
->guest_caps_host
= U32_MAX
;
725 if (session_termination
)
728 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
,
729 session
->guest_caps
);
730 session
->guest_caps
= previous
;
734 mutex_unlock(&gdev
->session_mutex
);
735 vbg_req_free(req
, sizeof(*req
));
741 * vbg_query_host_version get the host feature mask and version information.
742 * Return: 0 or negative errno value.
743 * @gdev: The Guest extension device.
745 static int vbg_query_host_version(struct vbg_dev
*gdev
)
747 struct vmmdev_host_version
*req
;
750 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
,
755 rc
= vbg_req_perform(gdev
, req
);
756 ret
= vbg_status_code_to_errno(rc
);
758 vbg_err("%s error: %d\n", __func__
, rc
);
762 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
763 req
->major
, req
->minor
, req
->build
, req
->revision
);
764 gdev
->host_features
= req
->features
;
766 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
767 gdev
->host_features
);
769 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
770 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
775 vbg_req_free(req
, sizeof(*req
));
780 * Initializes the VBoxGuest device extension when the
781 * device driver is loaded.
783 * The native code locates the VMMDev on the PCI bus and retrieve
784 * the MMIO and I/O port ranges, this function will take care of
785 * mapping the MMIO memory (if present). Upon successful return
786 * the native code should set up the interrupt handler.
788 * Return: 0 or negative errno value.
790 * @gdev: The Guest extension device.
791 * @fixed_events: Events that will be enabled upon init and no client
792 * will ever be allowed to mask.
794 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
798 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
799 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
800 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
802 init_waitqueue_head(&gdev
->event_wq
);
803 init_waitqueue_head(&gdev
->hgcm_wq
);
804 spin_lock_init(&gdev
->event_spinlock
);
805 mutex_init(&gdev
->session_mutex
);
806 mutex_init(&gdev
->cancel_req_mutex
);
807 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
808 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
810 gdev
->mem_balloon
.get_req
=
811 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
,
814 gdev
->mem_balloon
.change_req
=
815 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
816 VMMDEVREQ_CHANGE_MEMBALLOON
,
819 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
820 VMMDEVREQ_HGCM_CANCEL2
,
822 gdev
->ack_events_req
=
823 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
824 VMMDEVREQ_ACKNOWLEDGE_EVENTS
,
826 gdev
->mouse_status_req
=
827 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
828 VMMDEVREQ_GET_MOUSE_STATUS
,
831 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
832 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
833 !gdev
->mouse_status_req
)
836 ret
= vbg_query_host_version(gdev
);
840 ret
= vbg_report_guest_info(gdev
);
842 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
846 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
848 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
853 ret
= vbg_reset_host_capabilities(gdev
);
855 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
860 ret
= vbg_core_set_mouse_status(gdev
, 0);
862 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
866 /* These may fail without requiring the driver init to fail. */
867 vbg_guest_mappings_init(gdev
);
868 vbg_heartbeat_init(gdev
);
871 ret
= vbg_report_driver_status(gdev
, true);
873 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
878 vbg_req_free(gdev
->mouse_status_req
,
879 sizeof(*gdev
->mouse_status_req
));
880 vbg_req_free(gdev
->ack_events_req
,
881 sizeof(*gdev
->ack_events_req
));
882 vbg_req_free(gdev
->cancel_req
,
883 sizeof(*gdev
->cancel_req
));
884 vbg_req_free(gdev
->mem_balloon
.change_req
,
885 sizeof(*gdev
->mem_balloon
.change_req
));
886 vbg_req_free(gdev
->mem_balloon
.get_req
,
887 sizeof(*gdev
->mem_balloon
.get_req
));
892 * Call this on exit to clean-up vboxguest-core managed resources.
894 * The native code should call this before the driver is loaded,
895 * but don't call this on shutdown.
896 * @gdev: The Guest extension device.
898 void vbg_core_exit(struct vbg_dev
*gdev
)
900 vbg_heartbeat_exit(gdev
);
901 vbg_guest_mappings_exit(gdev
);
903 /* Clear the host flags (mouse status etc). */
904 vbg_reset_host_event_filter(gdev
, 0);
905 vbg_reset_host_capabilities(gdev
);
906 vbg_core_set_mouse_status(gdev
, 0);
908 vbg_req_free(gdev
->mouse_status_req
,
909 sizeof(*gdev
->mouse_status_req
));
910 vbg_req_free(gdev
->ack_events_req
,
911 sizeof(*gdev
->ack_events_req
));
912 vbg_req_free(gdev
->cancel_req
,
913 sizeof(*gdev
->cancel_req
));
914 vbg_req_free(gdev
->mem_balloon
.change_req
,
915 sizeof(*gdev
->mem_balloon
.change_req
));
916 vbg_req_free(gdev
->mem_balloon
.get_req
,
917 sizeof(*gdev
->mem_balloon
.get_req
));
921 * Creates a VBoxGuest user session.
923 * vboxguest_linux.c calls this when userspace opens the char-device.
924 * Return: A pointer to the new session or an ERR_PTR on error.
925 * @gdev: The Guest extension device.
926 * @requestor: VMMDEV_REQUESTOR_* flags
928 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, u32 requestor
)
930 struct vbg_session
*session
;
932 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
934 return ERR_PTR(-ENOMEM
);
936 session
->gdev
= gdev
;
937 session
->requestor
= requestor
;
943 * Closes a VBoxGuest session.
944 * @session: The session to close (and free).
946 void vbg_core_close_session(struct vbg_session
*session
)
948 struct vbg_dev
*gdev
= session
->gdev
;
951 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
952 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
954 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
955 if (!session
->hgcm_client_ids
[i
])
958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev
, VBG_KERNEL_REQUEST
,
960 session
->hgcm_client_ids
[i
], &rc
);
966 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
969 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
970 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
976 static int vbg_ioctl_driver_version_info(
977 struct vbg_ioctl_driver_version_info
*info
)
979 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
980 u16 min_maj_version
, req_maj_version
;
982 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
985 req_maj_version
= info
->u
.in
.req_version
>> 16;
986 min_maj_version
= info
->u
.in
.min_version
>> 16;
988 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
989 min_maj_version
!= req_maj_version
)
992 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
993 min_maj_version
== vbg_maj_version
) {
994 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
996 info
->u
.out
.session_version
= U32_MAX
;
997 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
1000 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
1001 info
->u
.out
.driver_revision
= 0;
1002 info
->u
.out
.reserved1
= 0;
1003 info
->u
.out
.reserved2
= 0;
1008 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
1009 struct vbg_session
*session
,
1012 unsigned long flags
;
1016 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1018 events
= gdev
->pending_events
& event_mask
;
1019 wakeup
= events
|| session
->cancel_waiters
;
1021 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1026 /* Must be called with the event_lock held */
1027 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
1028 struct vbg_session
*session
,
1031 u32 events
= gdev
->pending_events
& event_mask
;
1033 gdev
->pending_events
&= ~events
;
1037 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
1038 struct vbg_session
*session
,
1039 struct vbg_ioctl_wait_for_events
*wait
)
1041 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1042 u32 event_mask
= wait
->u
.in
.events
;
1043 unsigned long flags
;
1047 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1050 if (timeout_ms
== U32_MAX
)
1051 timeout
= MAX_SCHEDULE_TIMEOUT
;
1053 timeout
= msecs_to_jiffies(timeout_ms
);
1055 wait
->u
.out
.events
= 0;
1057 timeout
= wait_event_interruptible_timeout(
1059 vbg_wait_event_cond(gdev
, session
, event_mask
),
1062 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1064 if (timeout
< 0 || session
->cancel_waiters
) {
1066 } else if (timeout
== 0) {
1069 wait
->u
.out
.events
=
1070 vbg_consume_events_locked(gdev
, session
, event_mask
);
1073 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1076 * Someone else may have consumed the event(s) first, in
1077 * which case we go back to waiting.
1079 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1084 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1085 struct vbg_session
*session
,
1086 struct vbg_ioctl_hdr
*hdr
)
1088 unsigned long flags
;
1090 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1093 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1094 session
->cancel_waiters
= true;
1095 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1097 wake_up(&gdev
->event_wq
);
1103 * Checks if the VMM request is allowed in the context of the given session.
1104 * Return: 0 or negative errno value.
1105 * @gdev: The Guest extension device.
1106 * @session: The calling session.
1107 * @req: The request.
1109 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1110 const struct vmmdev_request_header
*req
)
1112 const struct vmmdev_guest_status
*guest_status
;
1113 bool trusted_apps_only
;
1115 switch (req
->request_type
) {
1116 /* Trusted users apps only. */
1117 case VMMDEVREQ_QUERY_CREDENTIALS
:
1118 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1119 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1120 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1121 case VMMDEVREQ_WRITE_COREDUMP
:
1122 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1123 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1124 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1125 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1126 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1127 case VMMDEVREQ_REPORT_GUEST_STATS
:
1128 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1129 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1130 trusted_apps_only
= true;
1134 case VMMDEVREQ_GET_MOUSE_STATUS
:
1135 case VMMDEVREQ_SET_MOUSE_STATUS
:
1136 case VMMDEVREQ_SET_POINTER_SHAPE
:
1137 case VMMDEVREQ_GET_HOST_VERSION
:
1138 case VMMDEVREQ_IDLE
:
1139 case VMMDEVREQ_GET_HOST_TIME
:
1140 case VMMDEVREQ_SET_POWER_STATUS
:
1141 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1142 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1143 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1144 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1145 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1146 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1147 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1148 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1149 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1150 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1151 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1152 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1153 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1154 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1155 case VMMDEVREQ_LOG_STRING
:
1156 case VMMDEVREQ_GET_SESSION_ID
:
1157 trusted_apps_only
= false;
1160 /* Depends on the request parameters... */
1161 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1162 guest_status
= (const struct vmmdev_guest_status
*)req
;
1163 switch (guest_status
->facility
) {
1164 case VBOXGUEST_FACILITY_TYPE_ALL
:
1165 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1166 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1167 guest_status
->facility
);
1169 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1170 trusted_apps_only
= true;
1172 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1173 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1174 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1176 trusted_apps_only
= false;
1181 /* Anything else is not allowed. */
1183 vbg_err("Denying userspace vmm call type %#08x\n",
1188 if (trusted_apps_only
&&
1189 (session
->requestor
& VMMDEV_REQUESTOR_USER_DEVICE
)) {
1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1198 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1199 struct vbg_session
*session
, void *data
)
1201 struct vbg_ioctl_hdr
*hdr
= data
;
1204 if (hdr
->size_in
!= hdr
->size_out
)
1207 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1210 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1213 ret
= vbg_req_allowed(gdev
, session
, data
);
1217 vbg_req_perform(gdev
, data
);
1218 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1223 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1224 struct vbg_session
*session
,
1225 struct vbg_ioctl_hgcm_connect
*conn
)
1230 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1233 /* Find a free place in the sessions clients array and claim it */
1234 mutex_lock(&gdev
->session_mutex
);
1235 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1236 if (!session
->hgcm_client_ids
[i
]) {
1237 session
->hgcm_client_ids
[i
] = U32_MAX
;
1241 mutex_unlock(&gdev
->session_mutex
);
1243 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1246 ret
= vbg_hgcm_connect(gdev
, session
->requestor
, &conn
->u
.in
.loc
,
1247 &client_id
, &conn
->hdr
.rc
);
1249 mutex_lock(&gdev
->session_mutex
);
1250 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1251 conn
->u
.out
.client_id
= client_id
;
1252 session
->hgcm_client_ids
[i
] = client_id
;
1254 conn
->u
.out
.client_id
= 0;
1255 session
->hgcm_client_ids
[i
] = 0;
1257 mutex_unlock(&gdev
->session_mutex
);
1262 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1263 struct vbg_session
*session
,
1264 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1269 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1272 client_id
= disconn
->u
.in
.client_id
;
1273 if (client_id
== 0 || client_id
== U32_MAX
)
1276 mutex_lock(&gdev
->session_mutex
);
1277 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1278 if (session
->hgcm_client_ids
[i
] == client_id
) {
1279 session
->hgcm_client_ids
[i
] = U32_MAX
;
1283 mutex_unlock(&gdev
->session_mutex
);
1285 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1288 ret
= vbg_hgcm_disconnect(gdev
, session
->requestor
, client_id
,
1291 mutex_lock(&gdev
->session_mutex
);
1292 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1293 session
->hgcm_client_ids
[i
] = 0;
1295 session
->hgcm_client_ids
[i
] = client_id
;
1296 mutex_unlock(&gdev
->session_mutex
);
1301 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type
)
1304 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
1305 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
1306 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
1307 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
1308 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
1315 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1316 struct vbg_session
*session
, bool f32bit
,
1317 struct vbg_ioctl_hgcm_call
*call
)
1323 if (call
->hdr
.size_in
< sizeof(*call
))
1326 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1329 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1332 client_id
= call
->client_id
;
1333 if (client_id
== 0 || client_id
== U32_MAX
)
1336 actual_size
= sizeof(*call
);
1338 actual_size
+= call
->parm_count
*
1339 sizeof(struct vmmdev_hgcm_function_parameter32
);
1341 actual_size
+= call
->parm_count
*
1342 sizeof(struct vmmdev_hgcm_function_parameter
);
1343 if (call
->hdr
.size_in
< actual_size
) {
1344 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1345 call
->hdr
.size_in
, actual_size
);
1348 call
->hdr
.size_out
= actual_size
;
1350 /* Validate parameter types */
1352 struct vmmdev_hgcm_function_parameter32
*parm
=
1353 VBG_IOCTL_HGCM_CALL_PARMS32(call
);
1355 for (i
= 0; i
< call
->parm_count
; i
++)
1356 if (!vbg_param_valid(parm
[i
].type
))
1359 struct vmmdev_hgcm_function_parameter
*parm
=
1360 VBG_IOCTL_HGCM_CALL_PARMS(call
);
1362 for (i
= 0; i
< call
->parm_count
; i
++)
1363 if (!vbg_param_valid(parm
[i
].type
))
1368 * Validate the client id.
1370 mutex_lock(&gdev
->session_mutex
);
1371 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1372 if (session
->hgcm_client_ids
[i
] == client_id
)
1374 mutex_unlock(&gdev
->session_mutex
);
1375 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1376 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1381 if (IS_ENABLED(CONFIG_COMPAT
) && f32bit
)
1382 ret
= vbg_hgcm_call32(gdev
, session
->requestor
, client_id
,
1383 call
->function
, call
->timeout_ms
,
1384 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1385 call
->parm_count
, &call
->hdr
.rc
);
1387 ret
= vbg_hgcm_call(gdev
, session
->requestor
, client_id
,
1388 call
->function
, call
->timeout_ms
,
1389 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1390 call
->parm_count
, &call
->hdr
.rc
);
1392 if (ret
== -E2BIG
) {
1393 /* E2BIG needs to be reported through the hdr.rc field. */
1394 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1398 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1399 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1404 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1406 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1409 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1415 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1416 struct vbg_session
*session
,
1417 struct vbg_ioctl_change_filter
*filter
)
1419 u32 or_mask
, not_mask
;
1421 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1424 or_mask
= filter
->u
.in
.or_mask
;
1425 not_mask
= filter
->u
.in
.not_mask
;
1427 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1430 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1434 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1435 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1437 u32 or_mask
, not_mask
;
1440 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1443 or_mask
= caps
->u
.in
.or_mask
;
1444 not_mask
= caps
->u
.in
.not_mask
;
1446 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1449 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1454 caps
->u
.out
.session_caps
= session
->guest_caps
;
1455 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1460 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1461 struct vbg_ioctl_check_balloon
*balloon_info
)
1463 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1466 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1468 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1469 * events entirely in the kernel, see vbg_core_isr().
1471 balloon_info
->u
.out
.handle_in_r3
= false;
1476 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1477 struct vbg_session
*session
,
1478 struct vbg_ioctl_write_coredump
*dump
)
1480 struct vmmdev_write_core_dump
*req
;
1482 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1485 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
,
1486 session
->requestor
);
1490 req
->flags
= dump
->u
.in
.flags
;
1491 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1493 vbg_req_free(req
, sizeof(*req
));
1498 * Common IOCtl for user to kernel communication.
1499 * Return: 0 or negative errno value.
1500 * @session: The client session.
1501 * @req: The requested function.
1502 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1504 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1506 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1507 struct vbg_dev
*gdev
= session
->gdev
;
1508 struct vbg_ioctl_hdr
*hdr
= data
;
1509 bool f32bit
= false;
1511 hdr
->rc
= VINF_SUCCESS
;
1513 hdr
->size_out
= hdr
->size_in
;
1516 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1517 * already checked by vbg_misc_device_ioctl().
1520 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1521 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1522 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
)
1523 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1525 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1528 /* Fixed size requests. */
1530 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1531 return vbg_ioctl_driver_version_info(data
);
1532 case VBG_IOCTL_HGCM_CONNECT
:
1533 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1534 case VBG_IOCTL_HGCM_DISCONNECT
:
1535 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1536 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1537 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1538 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1539 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1540 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1541 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1542 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1543 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1544 case VBG_IOCTL_CHECK_BALLOON
:
1545 return vbg_ioctl_check_balloon(gdev
, data
);
1546 case VBG_IOCTL_WRITE_CORE_DUMP
:
1547 return vbg_ioctl_write_core_dump(gdev
, session
, data
);
1550 /* Variable sized requests. */
1551 switch (req_no_size
) {
1552 #ifdef CONFIG_COMPAT
1553 case VBG_IOCTL_HGCM_CALL_32(0):
1557 case VBG_IOCTL_HGCM_CALL(0):
1558 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1559 case VBG_IOCTL_LOG(0):
1560 return vbg_ioctl_log(data
);
1563 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req
);
1568 * Report guest supported mouse-features to the host.
1570 * Return: 0 or negative errno value.
1571 * @gdev: The Guest extension device.
1572 * @features: The set of features to report to the host.
1574 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1576 struct vmmdev_mouse_status
*req
;
1579 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
,
1580 VBG_KERNEL_REQUEST
);
1584 req
->mouse_features
= features
;
1585 req
->pointer_pos_x
= 0;
1586 req
->pointer_pos_y
= 0;
1588 rc
= vbg_req_perform(gdev
, req
);
1590 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1592 vbg_req_free(req
, sizeof(*req
));
1593 return vbg_status_code_to_errno(rc
);
1596 /** Core interrupt service routine. */
1597 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1599 struct vbg_dev
*gdev
= dev_id
;
1600 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1601 bool mouse_position_changed
= false;
1602 unsigned long flags
;
1606 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1609 /* Get and acknowlegde events. */
1610 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1612 rc
= vbg_req_perform(gdev
, req
);
1614 vbg_err("Error performing events req, rc: %d\n", rc
);
1618 events
= req
->events
;
1620 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1621 mouse_position_changed
= true;
1622 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1625 if (events
& VMMDEV_EVENT_HGCM
) {
1626 wake_up(&gdev
->hgcm_wq
);
1627 events
&= ~VMMDEV_EVENT_HGCM
;
1630 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1631 schedule_work(&gdev
->mem_balloon
.work
);
1632 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1636 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1637 gdev
->pending_events
|= events
;
1638 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1640 wake_up(&gdev
->event_wq
);
1643 if (mouse_position_changed
)
1644 vbg_linux_mouse_event(gdev
);