1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
31 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around.
34 * This operation is a little bit tricky since the VMM might not accept
35 * just any address because of address clashes between the three contexts
36 * it operates in, so we try several times.
38 * Failure to reserve the guest mappings is ignored.
40 * @gdev: The Guest extension device.
42 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
44 struct vmmdev_hypervisorinfo
*req
;
45 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
46 struct page
**pages
= NULL
;
47 u32 size
, hypervisor_size
;
50 /* Query the required space. */
51 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
);
55 req
->hypervisor_start
= 0;
56 req
->hypervisor_size
= 0;
57 rc
= vbg_req_perform(gdev
, req
);
62 * The VMM will report back if there is nothing it wants to map, like
63 * for instance in VT-x and AMD-V mode.
65 if (req
->hypervisor_size
== 0)
68 hypervisor_size
= req
->hypervisor_size
;
69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
72 pages
= kmalloc(sizeof(*pages
) * (size
>> PAGE_SHIFT
), GFP_KERNEL
);
76 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
77 if (!gdev
->guest_mappings_dummy_page
)
80 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
81 pages
[i
] = gdev
->guest_mappings_dummy_page
;
84 * Try several times, the VMM might not accept some addresses because
85 * of address clashes between the three contexts.
87 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
88 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
89 VM_MAP
, PAGE_KERNEL_RO
);
90 if (!guest_mappings
[i
])
93 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
94 req
->header
.rc
= VERR_INTERNAL_ERROR
;
95 req
->hypervisor_size
= hypervisor_size
;
96 req
->hypervisor_start
=
97 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
99 rc
= vbg_req_perform(gdev
, req
);
101 gdev
->guest_mappings
= guest_mappings
[i
];
106 /* Free vmap's from failed attempts. */
108 vunmap(guest_mappings
[i
]);
110 /* On failure free the dummy-page backing the vmap */
111 if (!gdev
->guest_mappings
) {
112 __free_page(gdev
->guest_mappings_dummy_page
);
113 gdev
->guest_mappings_dummy_page
= NULL
;
122 * Undo what vbg_guest_mappings_init did.
124 * @gdev: The Guest extension device.
126 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
128 struct vmmdev_hypervisorinfo
*req
;
131 if (!gdev
->guest_mappings
)
135 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.)
138 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
);
142 req
->hypervisor_start
= 0;
143 req
->hypervisor_size
= 0;
145 rc
= vbg_req_perform(gdev
, req
);
150 vbg_err("%s error: %d\n", __func__
, rc
);
154 vunmap(gdev
->guest_mappings
);
155 gdev
->guest_mappings
= NULL
;
157 __free_page(gdev
->guest_mappings_dummy_page
);
158 gdev
->guest_mappings_dummy_page
= NULL
;
162 * Report the guest information to the host.
163 * Return: 0 or negative errno value.
164 * @gdev: The Guest extension device.
166 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
169 * Allocate and fill in the two guest info reports.
171 struct vmmdev_guest_info
*req1
= NULL
;
172 struct vmmdev_guest_info2
*req2
= NULL
;
173 int rc
, ret
= -ENOMEM
;
175 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
);
176 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
);
180 req1
->interface_version
= VMMDEV_VERSION
;
181 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
182 #if __BITS_PER_LONG == 64
183 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
186 req2
->additions_major
= VBG_VERSION_MAJOR
;
187 req2
->additions_minor
= VBG_VERSION_MINOR
;
188 req2
->additions_build
= VBG_VERSION_BUILD
;
189 req2
->additions_revision
= VBG_SVN_REV
;
190 /* (no features defined yet) */
191 req2
->additions_features
= 0;
192 strlcpy(req2
->name
, VBG_VERSION_STRING
,
196 * There are two protocols here:
197 * 1. INFO2 + INFO1. Supported by >=3.2.51.
198 * 2. INFO1 and optionally INFO2. The old protocol.
200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
201 * if not supported by the VMMDev (message ordering requirement).
203 rc
= vbg_req_perform(gdev
, req2
);
205 rc
= vbg_req_perform(gdev
, req1
);
206 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
207 rc
= vbg_req_perform(gdev
, req1
);
209 rc
= vbg_req_perform(gdev
, req2
);
210 if (rc
== VERR_NOT_IMPLEMENTED
)
214 ret
= vbg_status_code_to_errno(rc
);
223 * Report the guest driver status to the host.
224 * Return: 0 or negative errno value.
225 * @gdev: The Guest extension device.
226 * @active: Flag whether the driver is now active or not.
228 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
230 struct vmmdev_guest_status
*req
;
233 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
);
237 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
239 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
241 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
244 rc
= vbg_req_perform(gdev
, req
);
245 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
250 return vbg_status_code_to_errno(rc
);
254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255 * Return: 0 or negative errno value.
256 * @gdev: The Guest extension device.
257 * @chunk_idx: Index of the chunk.
259 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
261 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
265 pages
= kmalloc(sizeof(*pages
) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
266 GFP_KERNEL
| __GFP_NOWARN
);
270 req
->header
.size
= sizeof(*req
);
272 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
274 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
275 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
281 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
284 rc
= vbg_req_perform(gdev
, req
);
286 vbg_err("%s error, rc: %d\n", __func__
, rc
);
287 ret
= vbg_status_code_to_errno(rc
);
291 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
297 __free_page(pages
[i
]);
304 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
305 * Return: 0 or negative errno value.
306 * @gdev: The Guest extension device.
307 * @chunk_idx: Index of the chunk.
309 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
311 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
312 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
315 req
->header
.size
= sizeof(*req
);
316 req
->inflate
= false;
317 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
319 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
320 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
322 rc
= vbg_req_perform(gdev
, req
);
324 vbg_err("%s error, rc: %d\n", __func__
, rc
);
325 return vbg_status_code_to_errno(rc
);
328 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
329 __free_page(pages
[i
]);
331 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
337 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
338 * the host wants the balloon to be and adjust accordingly.
340 static void vbg_balloon_work(struct work_struct
*work
)
342 struct vbg_dev
*gdev
=
343 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
344 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
349 * Setting this bit means that we request the value from the host and
350 * change the guest memory balloon according to the returned value.
352 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
353 rc
= vbg_req_perform(gdev
, req
);
355 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
360 * The host always returns the same maximum amount of chunks, so
363 if (!gdev
->mem_balloon
.max_chunks
) {
364 gdev
->mem_balloon
.pages
=
365 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
366 sizeof(struct page
**), GFP_KERNEL
);
367 if (!gdev
->mem_balloon
.pages
)
370 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
373 chunks
= req
->balloon_chunks
;
374 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
375 vbg_err("%s: illegal balloon size %u (max=%u)\n",
376 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
380 if (chunks
> gdev
->mem_balloon
.chunks
) {
382 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
383 ret
= vbg_balloon_inflate(gdev
, i
);
387 gdev
->mem_balloon
.chunks
++;
391 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
392 ret
= vbg_balloon_deflate(gdev
, i
);
396 gdev
->mem_balloon
.chunks
--;
402 * Callback for heartbeat timer.
404 static void vbg_heartbeat_timer(struct timer_list
*t
)
406 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
408 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
409 mod_timer(&gdev
->heartbeat_timer
,
410 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
414 * Configure the host to check guest's heartbeat
415 * and get heartbeat interval from the host.
416 * Return: 0 or negative errno value.
417 * @gdev: The Guest extension device.
418 * @enabled: Set true to enable guest heartbeat checks on host.
420 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
422 struct vmmdev_heartbeat
*req
;
425 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
);
429 req
->enabled
= enabled
;
430 req
->interval_ns
= 0;
431 rc
= vbg_req_perform(gdev
, req
);
432 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
433 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
436 return vbg_status_code_to_errno(rc
);
440 * Initializes the heartbeat timer. This feature may be disabled by the host.
441 * Return: 0 or negative errno value.
442 * @gdev: The Guest extension device.
444 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
448 /* Make sure that heartbeat checking is disabled if we fail. */
449 ret
= vbg_heartbeat_host_config(gdev
, false);
453 ret
= vbg_heartbeat_host_config(gdev
, true);
458 * Preallocate the request to use it from the timer callback because:
459 * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
460 * and the timer callback runs at DISPATCH_LEVEL;
461 * 2) avoid repeated allocations.
463 gdev
->guest_heartbeat_req
= vbg_req_alloc(
464 sizeof(*gdev
->guest_heartbeat_req
),
465 VMMDEVREQ_GUEST_HEARTBEAT
);
466 if (!gdev
->guest_heartbeat_req
)
469 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
470 __func__
, gdev
->heartbeat_interval_ms
);
471 mod_timer(&gdev
->heartbeat_timer
, 0);
477 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
478 * @gdev: The Guest extension device.
480 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
482 del_timer_sync(&gdev
->heartbeat_timer
);
483 vbg_heartbeat_host_config(gdev
, false);
484 kfree(gdev
->guest_heartbeat_req
);
489 * Applies a change to the bit usage tracker.
490 * Return: true if the mask changed, false if not.
491 * @tracker: The bit usage tracker.
492 * @changed: The bits to change.
493 * @previous: The previous value of the bits.
495 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
496 u32 changed
, u32 previous
)
498 bool global_change
= false;
501 u32 bit
= ffs(changed
) - 1;
502 u32 bitmask
= BIT(bit
);
504 if (bitmask
& previous
) {
505 tracker
->per_bit_usage
[bit
] -= 1;
506 if (tracker
->per_bit_usage
[bit
] == 0) {
507 global_change
= true;
508 tracker
->mask
&= ~bitmask
;
511 tracker
->per_bit_usage
[bit
] += 1;
512 if (tracker
->per_bit_usage
[bit
] == 1) {
513 global_change
= true;
514 tracker
->mask
|= bitmask
;
521 return global_change
;
525 * Init and termination worker for resetting the (host) event filter on the host
526 * Return: 0 or negative errno value.
527 * @gdev: The Guest extension device.
528 * @fixed_events: Fixed events (init time).
530 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
533 struct vmmdev_mask
*req
;
536 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
540 req
->not_mask
= U32_MAX
& ~fixed_events
;
541 req
->or_mask
= fixed_events
;
542 rc
= vbg_req_perform(gdev
, req
);
544 vbg_err("%s error, rc: %d\n", __func__
, rc
);
547 return vbg_status_code_to_errno(rc
);
551 * Changes the event filter mask for the given session.
553 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
554 * do session cleanup. Takes the session spinlock.
556 * Return: 0 or negative errno value.
557 * @gdev: The Guest extension device.
558 * @session: The session.
559 * @or_mask: The events to add.
560 * @not_mask: The events to remove.
561 * @session_termination: Set if we're called by the session cleanup code.
562 * This tweaks the error handling so we perform
563 * proper session cleanup even if the host
566 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
567 struct vbg_session
*session
,
568 u32 or_mask
, u32 not_mask
,
569 bool session_termination
)
571 struct vmmdev_mask
*req
;
572 u32 changed
, previous
;
575 /* Allocate a request buffer before taking the spinlock */
576 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
578 if (!session_termination
)
580 /* Ignore allocation failure, we must do session cleanup. */
583 mutex_lock(&gdev
->session_mutex
);
585 /* Apply the changes to the session mask. */
586 previous
= session
->event_filter
;
587 session
->event_filter
|= or_mask
;
588 session
->event_filter
&= ~not_mask
;
590 /* If anything actually changed, update the global usage counters. */
591 changed
= previous
^ session
->event_filter
;
595 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
596 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
598 if (gdev
->event_filter_host
== or_mask
|| !req
)
601 gdev
->event_filter_host
= or_mask
;
602 req
->or_mask
= or_mask
;
603 req
->not_mask
= ~or_mask
;
604 rc
= vbg_req_perform(gdev
, req
);
606 ret
= vbg_status_code_to_errno(rc
);
608 /* Failed, roll back (unless it's session termination time). */
609 gdev
->event_filter_host
= U32_MAX
;
610 if (session_termination
)
613 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
614 session
->event_filter
);
615 session
->event_filter
= previous
;
619 mutex_unlock(&gdev
->session_mutex
);
626 * Init and termination worker for set guest capabilities to zero on the host.
627 * Return: 0 or negative errno value.
628 * @gdev: The Guest extension device.
630 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
632 struct vmmdev_mask
*req
;
635 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
639 req
->not_mask
= U32_MAX
;
641 rc
= vbg_req_perform(gdev
, req
);
643 vbg_err("%s error, rc: %d\n", __func__
, rc
);
646 return vbg_status_code_to_errno(rc
);
650 * Sets the guest capabilities for a session. Takes the session spinlock.
651 * Return: 0 or negative errno value.
652 * @gdev: The Guest extension device.
653 * @session: The session.
654 * @or_mask: The capabilities to add.
655 * @not_mask: The capabilities to remove.
656 * @session_termination: Set if we're called by the session cleanup code.
657 * This tweaks the error handling so we perform
658 * proper session cleanup even if the host
661 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
662 struct vbg_session
*session
,
663 u32 or_mask
, u32 not_mask
,
664 bool session_termination
)
666 struct vmmdev_mask
*req
;
667 u32 changed
, previous
;
670 /* Allocate a request buffer before taking the spinlock */
671 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
673 if (!session_termination
)
675 /* Ignore allocation failure, we must do session cleanup. */
678 mutex_lock(&gdev
->session_mutex
);
680 /* Apply the changes to the session mask. */
681 previous
= session
->guest_caps
;
682 session
->guest_caps
|= or_mask
;
683 session
->guest_caps
&= ~not_mask
;
685 /* If anything actually changed, update the global usage counters. */
686 changed
= previous
^ session
->guest_caps
;
690 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
, previous
);
691 or_mask
= gdev
->guest_caps_tracker
.mask
;
693 if (gdev
->guest_caps_host
== or_mask
|| !req
)
696 gdev
->guest_caps_host
= or_mask
;
697 req
->or_mask
= or_mask
;
698 req
->not_mask
= ~or_mask
;
699 rc
= vbg_req_perform(gdev
, req
);
701 ret
= vbg_status_code_to_errno(rc
);
703 /* Failed, roll back (unless it's session termination time). */
704 gdev
->guest_caps_host
= U32_MAX
;
705 if (session_termination
)
708 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
,
709 session
->guest_caps
);
710 session
->guest_caps
= previous
;
714 mutex_unlock(&gdev
->session_mutex
);
721 * vbg_query_host_version get the host feature mask and version information.
722 * Return: 0 or negative errno value.
723 * @gdev: The Guest extension device.
725 static int vbg_query_host_version(struct vbg_dev
*gdev
)
727 struct vmmdev_host_version
*req
;
730 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
);
734 rc
= vbg_req_perform(gdev
, req
);
735 ret
= vbg_status_code_to_errno(rc
);
739 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
740 req
->major
, req
->minor
, req
->build
, req
->revision
);
741 gdev
->host_features
= req
->features
;
743 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
744 gdev
->host_features
);
746 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
747 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
757 * Initializes the VBoxGuest device extension when the
758 * device driver is loaded.
760 * The native code locates the VMMDev on the PCI bus and retrieve
761 * the MMIO and I/O port ranges, this function will take care of
762 * mapping the MMIO memory (if present). Upon successful return
763 * the native code should set up the interrupt handler.
765 * Return: 0 or negative errno value.
767 * @gdev: The Guest extension device.
768 * @fixed_events: Events that will be enabled upon init and no client
769 * will ever be allowed to mask.
771 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
775 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
776 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
777 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
779 init_waitqueue_head(&gdev
->event_wq
);
780 init_waitqueue_head(&gdev
->hgcm_wq
);
781 spin_lock_init(&gdev
->event_spinlock
);
782 mutex_init(&gdev
->session_mutex
);
783 mutex_init(&gdev
->cancel_req_mutex
);
784 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
785 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
787 gdev
->mem_balloon
.get_req
=
788 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
789 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
);
790 gdev
->mem_balloon
.change_req
=
791 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
792 VMMDEVREQ_CHANGE_MEMBALLOON
);
794 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
795 VMMDEVREQ_HGCM_CANCEL2
);
796 gdev
->ack_events_req
=
797 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
798 VMMDEVREQ_ACKNOWLEDGE_EVENTS
);
799 gdev
->mouse_status_req
=
800 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
801 VMMDEVREQ_GET_MOUSE_STATUS
);
803 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
804 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
805 !gdev
->mouse_status_req
)
808 ret
= vbg_query_host_version(gdev
);
812 ret
= vbg_report_guest_info(gdev
);
814 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
818 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
820 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
825 ret
= vbg_reset_host_capabilities(gdev
);
827 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
832 ret
= vbg_core_set_mouse_status(gdev
, 0);
834 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
838 /* These may fail without requiring the driver init to fail. */
839 vbg_guest_mappings_init(gdev
);
840 vbg_heartbeat_init(gdev
);
843 ret
= vbg_report_driver_status(gdev
, true);
845 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
850 kfree(gdev
->mouse_status_req
);
851 kfree(gdev
->ack_events_req
);
852 kfree(gdev
->cancel_req
);
853 kfree(gdev
->mem_balloon
.change_req
);
854 kfree(gdev
->mem_balloon
.get_req
);
859 * Call this on exit to clean-up vboxguest-core managed resources.
861 * The native code should call this before the driver is loaded,
862 * but don't call this on shutdown.
863 * @gdev: The Guest extension device.
865 void vbg_core_exit(struct vbg_dev
*gdev
)
867 vbg_heartbeat_exit(gdev
);
868 vbg_guest_mappings_exit(gdev
);
870 /* Clear the host flags (mouse status etc). */
871 vbg_reset_host_event_filter(gdev
, 0);
872 vbg_reset_host_capabilities(gdev
);
873 vbg_core_set_mouse_status(gdev
, 0);
875 kfree(gdev
->mouse_status_req
);
876 kfree(gdev
->ack_events_req
);
877 kfree(gdev
->cancel_req
);
878 kfree(gdev
->mem_balloon
.change_req
);
879 kfree(gdev
->mem_balloon
.get_req
);
883 * Creates a VBoxGuest user session.
885 * vboxguest_linux.c calls this when userspace opens the char-device.
886 * Return: A pointer to the new session or an ERR_PTR on error.
887 * @gdev: The Guest extension device.
888 * @user: Set if this is a session for the vboxuser device.
890 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, bool user
)
892 struct vbg_session
*session
;
894 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
896 return ERR_PTR(-ENOMEM
);
898 session
->gdev
= gdev
;
899 session
->user_session
= user
;
905 * Closes a VBoxGuest session.
906 * @session: The session to close (and free).
908 void vbg_core_close_session(struct vbg_session
*session
)
910 struct vbg_dev
*gdev
= session
->gdev
;
913 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
914 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
916 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
917 if (!session
->hgcm_client_ids
[i
])
920 vbg_hgcm_disconnect(gdev
, session
->hgcm_client_ids
[i
], &rc
);
926 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
929 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
930 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
936 static int vbg_ioctl_driver_version_info(
937 struct vbg_ioctl_driver_version_info
*info
)
939 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
940 u16 min_maj_version
, req_maj_version
;
942 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
945 req_maj_version
= info
->u
.in
.req_version
>> 16;
946 min_maj_version
= info
->u
.in
.min_version
>> 16;
948 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
949 min_maj_version
!= req_maj_version
)
952 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
953 min_maj_version
== vbg_maj_version
) {
954 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
956 info
->u
.out
.session_version
= U32_MAX
;
957 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
960 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
961 info
->u
.out
.driver_revision
= 0;
962 info
->u
.out
.reserved1
= 0;
963 info
->u
.out
.reserved2
= 0;
968 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
969 struct vbg_session
*session
,
976 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
978 events
= gdev
->pending_events
& event_mask
;
979 wakeup
= events
|| session
->cancel_waiters
;
981 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
986 /* Must be called with the event_lock held */
987 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
988 struct vbg_session
*session
,
991 u32 events
= gdev
->pending_events
& event_mask
;
993 gdev
->pending_events
&= ~events
;
997 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
998 struct vbg_session
*session
,
999 struct vbg_ioctl_wait_for_events
*wait
)
1001 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1002 u32 event_mask
= wait
->u
.in
.events
;
1003 unsigned long flags
;
1007 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1010 if (timeout_ms
== U32_MAX
)
1011 timeout
= MAX_SCHEDULE_TIMEOUT
;
1013 timeout
= msecs_to_jiffies(timeout_ms
);
1015 wait
->u
.out
.events
= 0;
1017 timeout
= wait_event_interruptible_timeout(
1019 vbg_wait_event_cond(gdev
, session
, event_mask
),
1022 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1024 if (timeout
< 0 || session
->cancel_waiters
) {
1026 } else if (timeout
== 0) {
1029 wait
->u
.out
.events
=
1030 vbg_consume_events_locked(gdev
, session
, event_mask
);
1033 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1036 * Someone else may have consumed the event(s) first, in
1037 * which case we go back to waiting.
1039 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1044 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1045 struct vbg_session
*session
,
1046 struct vbg_ioctl_hdr
*hdr
)
1048 unsigned long flags
;
1050 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1053 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1054 session
->cancel_waiters
= true;
1055 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1057 wake_up(&gdev
->event_wq
);
1063 * Checks if the VMM request is allowed in the context of the given session.
1064 * Return: 0 or negative errno value.
1065 * @gdev: The Guest extension device.
1066 * @session: The calling session.
1067 * @req: The request.
1069 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1070 const struct vmmdev_request_header
*req
)
1072 const struct vmmdev_guest_status
*guest_status
;
1073 bool trusted_apps_only
;
1075 switch (req
->request_type
) {
1076 /* Trusted users apps only. */
1077 case VMMDEVREQ_QUERY_CREDENTIALS
:
1078 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1079 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1080 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1081 case VMMDEVREQ_WRITE_COREDUMP
:
1082 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1083 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1084 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1085 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1086 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1087 case VMMDEVREQ_REPORT_GUEST_STATS
:
1088 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1089 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1090 trusted_apps_only
= true;
1094 case VMMDEVREQ_GET_MOUSE_STATUS
:
1095 case VMMDEVREQ_SET_MOUSE_STATUS
:
1096 case VMMDEVREQ_SET_POINTER_SHAPE
:
1097 case VMMDEVREQ_GET_HOST_VERSION
:
1098 case VMMDEVREQ_IDLE
:
1099 case VMMDEVREQ_GET_HOST_TIME
:
1100 case VMMDEVREQ_SET_POWER_STATUS
:
1101 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1102 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1103 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1104 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1105 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1106 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1107 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1108 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1109 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1110 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1111 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1112 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1113 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1114 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1115 case VMMDEVREQ_LOG_STRING
:
1116 case VMMDEVREQ_GET_SESSION_ID
:
1117 trusted_apps_only
= false;
1120 /* Depends on the request parameters... */
1121 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1122 guest_status
= (const struct vmmdev_guest_status
*)req
;
1123 switch (guest_status
->facility
) {
1124 case VBOXGUEST_FACILITY_TYPE_ALL
:
1125 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1126 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1127 guest_status
->facility
);
1129 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1130 trusted_apps_only
= true;
1132 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1133 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1134 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1136 trusted_apps_only
= false;
1141 /* Anything else is not allowed. */
1143 vbg_err("Denying userspace vmm call type %#08x\n",
1148 if (trusted_apps_only
&& session
->user_session
) {
1149 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1158 struct vbg_session
*session
, void *data
)
1160 struct vbg_ioctl_hdr
*hdr
= data
;
1163 if (hdr
->size_in
!= hdr
->size_out
)
1166 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1169 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1172 ret
= vbg_req_allowed(gdev
, session
, data
);
1176 vbg_req_perform(gdev
, data
);
1177 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1182 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1183 struct vbg_session
*session
,
1184 struct vbg_ioctl_hgcm_connect
*conn
)
1189 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1192 /* Find a free place in the sessions clients array and claim it */
1193 mutex_lock(&gdev
->session_mutex
);
1194 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1195 if (!session
->hgcm_client_ids
[i
]) {
1196 session
->hgcm_client_ids
[i
] = U32_MAX
;
1200 mutex_unlock(&gdev
->session_mutex
);
1202 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1205 ret
= vbg_hgcm_connect(gdev
, &conn
->u
.in
.loc
, &client_id
,
1208 mutex_lock(&gdev
->session_mutex
);
1209 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1210 conn
->u
.out
.client_id
= client_id
;
1211 session
->hgcm_client_ids
[i
] = client_id
;
1213 conn
->u
.out
.client_id
= 0;
1214 session
->hgcm_client_ids
[i
] = 0;
1216 mutex_unlock(&gdev
->session_mutex
);
1221 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1222 struct vbg_session
*session
,
1223 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1228 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1231 client_id
= disconn
->u
.in
.client_id
;
1232 if (client_id
== 0 || client_id
== U32_MAX
)
1235 mutex_lock(&gdev
->session_mutex
);
1236 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1237 if (session
->hgcm_client_ids
[i
] == client_id
) {
1238 session
->hgcm_client_ids
[i
] = U32_MAX
;
1242 mutex_unlock(&gdev
->session_mutex
);
1244 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1247 ret
= vbg_hgcm_disconnect(gdev
, client_id
, &disconn
->hdr
.rc
);
1249 mutex_lock(&gdev
->session_mutex
);
1250 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1251 session
->hgcm_client_ids
[i
] = 0;
1253 session
->hgcm_client_ids
[i
] = client_id
;
1254 mutex_unlock(&gdev
->session_mutex
);
1259 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1260 struct vbg_session
*session
, bool f32bit
,
1261 struct vbg_ioctl_hgcm_call
*call
)
1267 if (call
->hdr
.size_in
< sizeof(*call
))
1270 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1273 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1276 client_id
= call
->client_id
;
1277 if (client_id
== 0 || client_id
== U32_MAX
)
1280 actual_size
= sizeof(*call
);
1282 actual_size
+= call
->parm_count
*
1283 sizeof(struct vmmdev_hgcm_function_parameter32
);
1285 actual_size
+= call
->parm_count
*
1286 sizeof(struct vmmdev_hgcm_function_parameter
);
1287 if (call
->hdr
.size_in
< actual_size
) {
1288 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1289 call
->hdr
.size_in
, actual_size
);
1292 call
->hdr
.size_out
= actual_size
;
1295 * Validate the client id.
1297 mutex_lock(&gdev
->session_mutex
);
1298 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1299 if (session
->hgcm_client_ids
[i
] == client_id
)
1301 mutex_unlock(&gdev
->session_mutex
);
1302 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1303 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1309 ret
= vbg_hgcm_call32(gdev
, client_id
,
1310 call
->function
, call
->timeout_ms
,
1311 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1312 call
->parm_count
, &call
->hdr
.rc
);
1314 ret
= vbg_hgcm_call(gdev
, client_id
,
1315 call
->function
, call
->timeout_ms
,
1316 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1317 call
->parm_count
, &call
->hdr
.rc
);
1319 if (ret
== -E2BIG
) {
1320 /* E2BIG needs to be reported through the hdr.rc field. */
1321 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1325 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1326 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1331 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1333 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1336 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1342 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1343 struct vbg_session
*session
,
1344 struct vbg_ioctl_change_filter
*filter
)
1346 u32 or_mask
, not_mask
;
1348 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1351 or_mask
= filter
->u
.in
.or_mask
;
1352 not_mask
= filter
->u
.in
.not_mask
;
1354 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1357 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1361 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1362 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1364 u32 or_mask
, not_mask
;
1367 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1370 or_mask
= caps
->u
.in
.or_mask
;
1371 not_mask
= caps
->u
.in
.not_mask
;
1373 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1376 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1381 caps
->u
.out
.session_caps
= session
->guest_caps
;
1382 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1387 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1388 struct vbg_ioctl_check_balloon
*balloon_info
)
1390 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1393 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1395 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1396 * events entirely in the kernel, see vbg_core_isr().
1398 balloon_info
->u
.out
.handle_in_r3
= false;
1403 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1404 struct vbg_ioctl_write_coredump
*dump
)
1406 struct vmmdev_write_core_dump
*req
;
1408 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1411 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
);
1415 req
->flags
= dump
->u
.in
.flags
;
1416 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1423 * Common IOCtl for user to kernel communication.
1424 * Return: 0 or negative errno value.
1425 * @session: The client session.
1426 * @req: The requested function.
1427 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1429 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1431 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1432 struct vbg_dev
*gdev
= session
->gdev
;
1433 struct vbg_ioctl_hdr
*hdr
= data
;
1434 bool f32bit
= false;
1436 hdr
->rc
= VINF_SUCCESS
;
1438 hdr
->size_out
= hdr
->size_in
;
1441 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1442 * already checked by vbg_misc_device_ioctl().
1445 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1446 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1447 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
)
1448 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1450 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1453 /* Fixed size requests. */
1455 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1456 return vbg_ioctl_driver_version_info(data
);
1457 case VBG_IOCTL_HGCM_CONNECT
:
1458 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1459 case VBG_IOCTL_HGCM_DISCONNECT
:
1460 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1461 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1462 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1463 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1464 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1465 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1466 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1467 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1468 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1469 case VBG_IOCTL_CHECK_BALLOON
:
1470 return vbg_ioctl_check_balloon(gdev
, data
);
1471 case VBG_IOCTL_WRITE_CORE_DUMP
:
1472 return vbg_ioctl_write_core_dump(gdev
, data
);
1475 /* Variable sized requests. */
1476 switch (req_no_size
) {
1477 #ifdef CONFIG_COMPAT
1478 case VBG_IOCTL_HGCM_CALL_32(0):
1482 case VBG_IOCTL_HGCM_CALL(0):
1483 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1484 case VBG_IOCTL_LOG(0):
1485 return vbg_ioctl_log(data
);
1488 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req
);
1493 * Report guest supported mouse-features to the host.
1495 * Return: 0 or negative errno value.
1496 * @gdev: The Guest extension device.
1497 * @features: The set of features to report to the host.
1499 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1501 struct vmmdev_mouse_status
*req
;
1504 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
);
1508 req
->mouse_features
= features
;
1509 req
->pointer_pos_x
= 0;
1510 req
->pointer_pos_y
= 0;
1512 rc
= vbg_req_perform(gdev
, req
);
1514 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1517 return vbg_status_code_to_errno(rc
);
1520 /** Core interrupt service routine. */
1521 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1523 struct vbg_dev
*gdev
= dev_id
;
1524 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1525 bool mouse_position_changed
= false;
1526 unsigned long flags
;
1530 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1533 /* Get and acknowlegde events. */
1534 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1536 rc
= vbg_req_perform(gdev
, req
);
1538 vbg_err("Error performing events req, rc: %d\n", rc
);
1542 events
= req
->events
;
1544 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1545 mouse_position_changed
= true;
1546 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1549 if (events
& VMMDEV_EVENT_HGCM
) {
1550 wake_up(&gdev
->hgcm_wq
);
1551 events
&= ~VMMDEV_EVENT_HGCM
;
1554 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1555 schedule_work(&gdev
->mem_balloon
.work
);
1556 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1560 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1561 gdev
->pending_events
|= events
;
1562 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1564 wake_up(&gdev
->event_wq
);
1567 if (mouse_position_changed
)
1568 vbg_linux_mouse_event(gdev
);