1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
31 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around.
34 * This operation is a little bit tricky since the VMM might not accept
35 * just any address because of address clashes between the three contexts
36 * it operates in, so we try several times.
38 * Failure to reserve the guest mappings is ignored.
40 * @gdev: The Guest extension device.
42 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
44 struct vmmdev_hypervisorinfo
*req
;
45 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
46 struct page
**pages
= NULL
;
47 u32 size
, hypervisor_size
;
50 /* Query the required space. */
51 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
);
55 req
->hypervisor_start
= 0;
56 req
->hypervisor_size
= 0;
57 rc
= vbg_req_perform(gdev
, req
);
62 * The VMM will report back if there is nothing it wants to map, like
63 * for instance in VT-x and AMD-V mode.
65 if (req
->hypervisor_size
== 0)
68 hypervisor_size
= req
->hypervisor_size
;
69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
72 pages
= kmalloc(sizeof(*pages
) * (size
>> PAGE_SHIFT
), GFP_KERNEL
);
76 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
77 if (!gdev
->guest_mappings_dummy_page
)
80 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
81 pages
[i
] = gdev
->guest_mappings_dummy_page
;
84 * Try several times, the VMM might not accept some addresses because
85 * of address clashes between the three contexts.
87 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
88 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
89 VM_MAP
, PAGE_KERNEL_RO
);
90 if (!guest_mappings
[i
])
93 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
94 req
->header
.rc
= VERR_INTERNAL_ERROR
;
95 req
->hypervisor_size
= hypervisor_size
;
96 req
->hypervisor_start
=
97 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
99 rc
= vbg_req_perform(gdev
, req
);
101 gdev
->guest_mappings
= guest_mappings
[i
];
106 /* Free vmap's from failed attempts. */
108 vunmap(guest_mappings
[i
]);
110 /* On failure free the dummy-page backing the vmap */
111 if (!gdev
->guest_mappings
) {
112 __free_page(gdev
->guest_mappings_dummy_page
);
113 gdev
->guest_mappings_dummy_page
= NULL
;
117 vbg_req_free(req
, sizeof(*req
));
122 * Undo what vbg_guest_mappings_init did.
124 * @gdev: The Guest extension device.
126 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
128 struct vmmdev_hypervisorinfo
*req
;
131 if (!gdev
->guest_mappings
)
135 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.)
138 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
);
142 req
->hypervisor_start
= 0;
143 req
->hypervisor_size
= 0;
145 rc
= vbg_req_perform(gdev
, req
);
147 vbg_req_free(req
, sizeof(*req
));
150 vbg_err("%s error: %d\n", __func__
, rc
);
154 vunmap(gdev
->guest_mappings
);
155 gdev
->guest_mappings
= NULL
;
157 __free_page(gdev
->guest_mappings_dummy_page
);
158 gdev
->guest_mappings_dummy_page
= NULL
;
162 * Report the guest information to the host.
163 * Return: 0 or negative errno value.
164 * @gdev: The Guest extension device.
166 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
169 * Allocate and fill in the two guest info reports.
171 struct vmmdev_guest_info
*req1
= NULL
;
172 struct vmmdev_guest_info2
*req2
= NULL
;
173 int rc
, ret
= -ENOMEM
;
175 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
);
176 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
);
180 req1
->interface_version
= VMMDEV_VERSION
;
181 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
182 #if __BITS_PER_LONG == 64
183 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
186 req2
->additions_major
= VBG_VERSION_MAJOR
;
187 req2
->additions_minor
= VBG_VERSION_MINOR
;
188 req2
->additions_build
= VBG_VERSION_BUILD
;
189 req2
->additions_revision
= VBG_SVN_REV
;
190 /* (no features defined yet) */
191 req2
->additions_features
= 0;
192 strlcpy(req2
->name
, VBG_VERSION_STRING
,
196 * There are two protocols here:
197 * 1. INFO2 + INFO1. Supported by >=3.2.51.
198 * 2. INFO1 and optionally INFO2. The old protocol.
200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
201 * if not supported by the VMMDev (message ordering requirement).
203 rc
= vbg_req_perform(gdev
, req2
);
205 rc
= vbg_req_perform(gdev
, req1
);
206 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
207 rc
= vbg_req_perform(gdev
, req1
);
209 rc
= vbg_req_perform(gdev
, req2
);
210 if (rc
== VERR_NOT_IMPLEMENTED
)
214 ret
= vbg_status_code_to_errno(rc
);
217 vbg_req_free(req2
, sizeof(*req2
));
218 vbg_req_free(req1
, sizeof(*req1
));
223 * Report the guest driver status to the host.
224 * Return: 0 or negative errno value.
225 * @gdev: The Guest extension device.
226 * @active: Flag whether the driver is now active or not.
228 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
230 struct vmmdev_guest_status
*req
;
233 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
);
237 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
239 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
241 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
244 rc
= vbg_req_perform(gdev
, req
);
245 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
248 vbg_req_free(req
, sizeof(*req
));
250 return vbg_status_code_to_errno(rc
);
254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255 * Return: 0 or negative errno value.
256 * @gdev: The Guest extension device.
257 * @chunk_idx: Index of the chunk.
259 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
261 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
265 pages
= kmalloc(sizeof(*pages
) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
266 GFP_KERNEL
| __GFP_NOWARN
);
270 req
->header
.size
= sizeof(*req
);
272 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
274 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
275 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
281 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
284 rc
= vbg_req_perform(gdev
, req
);
286 vbg_err("%s error, rc: %d\n", __func__
, rc
);
287 ret
= vbg_status_code_to_errno(rc
);
291 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
297 __free_page(pages
[i
]);
304 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
305 * Return: 0 or negative errno value.
306 * @gdev: The Guest extension device.
307 * @chunk_idx: Index of the chunk.
309 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
311 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
312 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
315 req
->header
.size
= sizeof(*req
);
316 req
->inflate
= false;
317 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
319 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
320 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
322 rc
= vbg_req_perform(gdev
, req
);
324 vbg_err("%s error, rc: %d\n", __func__
, rc
);
325 return vbg_status_code_to_errno(rc
);
328 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
329 __free_page(pages
[i
]);
331 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
337 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
338 * the host wants the balloon to be and adjust accordingly.
340 static void vbg_balloon_work(struct work_struct
*work
)
342 struct vbg_dev
*gdev
=
343 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
344 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
349 * Setting this bit means that we request the value from the host and
350 * change the guest memory balloon according to the returned value.
352 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
353 rc
= vbg_req_perform(gdev
, req
);
355 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
360 * The host always returns the same maximum amount of chunks, so
363 if (!gdev
->mem_balloon
.max_chunks
) {
364 gdev
->mem_balloon
.pages
=
365 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
366 sizeof(struct page
**), GFP_KERNEL
);
367 if (!gdev
->mem_balloon
.pages
)
370 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
373 chunks
= req
->balloon_chunks
;
374 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
375 vbg_err("%s: illegal balloon size %u (max=%u)\n",
376 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
380 if (chunks
> gdev
->mem_balloon
.chunks
) {
382 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
383 ret
= vbg_balloon_inflate(gdev
, i
);
387 gdev
->mem_balloon
.chunks
++;
391 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
392 ret
= vbg_balloon_deflate(gdev
, i
);
396 gdev
->mem_balloon
.chunks
--;
402 * Callback for heartbeat timer.
404 static void vbg_heartbeat_timer(struct timer_list
*t
)
406 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
408 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
409 mod_timer(&gdev
->heartbeat_timer
,
410 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
414 * Configure the host to check guest's heartbeat
415 * and get heartbeat interval from the host.
416 * Return: 0 or negative errno value.
417 * @gdev: The Guest extension device.
418 * @enabled: Set true to enable guest heartbeat checks on host.
420 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
422 struct vmmdev_heartbeat
*req
;
425 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
);
429 req
->enabled
= enabled
;
430 req
->interval_ns
= 0;
431 rc
= vbg_req_perform(gdev
, req
);
432 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
433 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
434 vbg_req_free(req
, sizeof(*req
));
436 return vbg_status_code_to_errno(rc
);
440 * Initializes the heartbeat timer. This feature may be disabled by the host.
441 * Return: 0 or negative errno value.
442 * @gdev: The Guest extension device.
444 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
448 /* Make sure that heartbeat checking is disabled if we fail. */
449 ret
= vbg_heartbeat_host_config(gdev
, false);
453 ret
= vbg_heartbeat_host_config(gdev
, true);
457 gdev
->guest_heartbeat_req
= vbg_req_alloc(
458 sizeof(*gdev
->guest_heartbeat_req
),
459 VMMDEVREQ_GUEST_HEARTBEAT
);
460 if (!gdev
->guest_heartbeat_req
)
463 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
464 __func__
, gdev
->heartbeat_interval_ms
);
465 mod_timer(&gdev
->heartbeat_timer
, 0);
471 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
472 * @gdev: The Guest extension device.
474 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
476 del_timer_sync(&gdev
->heartbeat_timer
);
477 vbg_heartbeat_host_config(gdev
, false);
478 vbg_req_free(gdev
->guest_heartbeat_req
,
479 sizeof(*gdev
->guest_heartbeat_req
));
483 * Applies a change to the bit usage tracker.
484 * Return: true if the mask changed, false if not.
485 * @tracker: The bit usage tracker.
486 * @changed: The bits to change.
487 * @previous: The previous value of the bits.
489 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
490 u32 changed
, u32 previous
)
492 bool global_change
= false;
495 u32 bit
= ffs(changed
) - 1;
496 u32 bitmask
= BIT(bit
);
498 if (bitmask
& previous
) {
499 tracker
->per_bit_usage
[bit
] -= 1;
500 if (tracker
->per_bit_usage
[bit
] == 0) {
501 global_change
= true;
502 tracker
->mask
&= ~bitmask
;
505 tracker
->per_bit_usage
[bit
] += 1;
506 if (tracker
->per_bit_usage
[bit
] == 1) {
507 global_change
= true;
508 tracker
->mask
|= bitmask
;
515 return global_change
;
519 * Init and termination worker for resetting the (host) event filter on the host
520 * Return: 0 or negative errno value.
521 * @gdev: The Guest extension device.
522 * @fixed_events: Fixed events (init time).
524 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
527 struct vmmdev_mask
*req
;
530 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
534 req
->not_mask
= U32_MAX
& ~fixed_events
;
535 req
->or_mask
= fixed_events
;
536 rc
= vbg_req_perform(gdev
, req
);
538 vbg_err("%s error, rc: %d\n", __func__
, rc
);
540 vbg_req_free(req
, sizeof(*req
));
541 return vbg_status_code_to_errno(rc
);
545 * Changes the event filter mask for the given session.
547 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
548 * do session cleanup. Takes the session spinlock.
550 * Return: 0 or negative errno value.
551 * @gdev: The Guest extension device.
552 * @session: The session.
553 * @or_mask: The events to add.
554 * @not_mask: The events to remove.
555 * @session_termination: Set if we're called by the session cleanup code.
556 * This tweaks the error handling so we perform
557 * proper session cleanup even if the host
560 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
561 struct vbg_session
*session
,
562 u32 or_mask
, u32 not_mask
,
563 bool session_termination
)
565 struct vmmdev_mask
*req
;
566 u32 changed
, previous
;
569 /* Allocate a request buffer before taking the spinlock */
570 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
572 if (!session_termination
)
574 /* Ignore allocation failure, we must do session cleanup. */
577 mutex_lock(&gdev
->session_mutex
);
579 /* Apply the changes to the session mask. */
580 previous
= session
->event_filter
;
581 session
->event_filter
|= or_mask
;
582 session
->event_filter
&= ~not_mask
;
584 /* If anything actually changed, update the global usage counters. */
585 changed
= previous
^ session
->event_filter
;
589 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
590 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
592 if (gdev
->event_filter_host
== or_mask
|| !req
)
595 gdev
->event_filter_host
= or_mask
;
596 req
->or_mask
= or_mask
;
597 req
->not_mask
= ~or_mask
;
598 rc
= vbg_req_perform(gdev
, req
);
600 ret
= vbg_status_code_to_errno(rc
);
602 /* Failed, roll back (unless it's session termination time). */
603 gdev
->event_filter_host
= U32_MAX
;
604 if (session_termination
)
607 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
608 session
->event_filter
);
609 session
->event_filter
= previous
;
613 mutex_unlock(&gdev
->session_mutex
);
614 vbg_req_free(req
, sizeof(*req
));
620 * Init and termination worker for set guest capabilities to zero on the host.
621 * Return: 0 or negative errno value.
622 * @gdev: The Guest extension device.
624 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
626 struct vmmdev_mask
*req
;
629 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
633 req
->not_mask
= U32_MAX
;
635 rc
= vbg_req_perform(gdev
, req
);
637 vbg_err("%s error, rc: %d\n", __func__
, rc
);
639 vbg_req_free(req
, sizeof(*req
));
640 return vbg_status_code_to_errno(rc
);
644 * Sets the guest capabilities for a session. Takes the session spinlock.
645 * Return: 0 or negative errno value.
646 * @gdev: The Guest extension device.
647 * @session: The session.
648 * @or_mask: The capabilities to add.
649 * @not_mask: The capabilities to remove.
650 * @session_termination: Set if we're called by the session cleanup code.
651 * This tweaks the error handling so we perform
652 * proper session cleanup even if the host
655 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
656 struct vbg_session
*session
,
657 u32 or_mask
, u32 not_mask
,
658 bool session_termination
)
660 struct vmmdev_mask
*req
;
661 u32 changed
, previous
;
664 /* Allocate a request buffer before taking the spinlock */
665 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
667 if (!session_termination
)
669 /* Ignore allocation failure, we must do session cleanup. */
672 mutex_lock(&gdev
->session_mutex
);
674 /* Apply the changes to the session mask. */
675 previous
= session
->guest_caps
;
676 session
->guest_caps
|= or_mask
;
677 session
->guest_caps
&= ~not_mask
;
679 /* If anything actually changed, update the global usage counters. */
680 changed
= previous
^ session
->guest_caps
;
684 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
, previous
);
685 or_mask
= gdev
->guest_caps_tracker
.mask
;
687 if (gdev
->guest_caps_host
== or_mask
|| !req
)
690 gdev
->guest_caps_host
= or_mask
;
691 req
->or_mask
= or_mask
;
692 req
->not_mask
= ~or_mask
;
693 rc
= vbg_req_perform(gdev
, req
);
695 ret
= vbg_status_code_to_errno(rc
);
697 /* Failed, roll back (unless it's session termination time). */
698 gdev
->guest_caps_host
= U32_MAX
;
699 if (session_termination
)
702 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
,
703 session
->guest_caps
);
704 session
->guest_caps
= previous
;
708 mutex_unlock(&gdev
->session_mutex
);
709 vbg_req_free(req
, sizeof(*req
));
715 * vbg_query_host_version get the host feature mask and version information.
716 * Return: 0 or negative errno value.
717 * @gdev: The Guest extension device.
719 static int vbg_query_host_version(struct vbg_dev
*gdev
)
721 struct vmmdev_host_version
*req
;
724 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
);
728 rc
= vbg_req_perform(gdev
, req
);
729 ret
= vbg_status_code_to_errno(rc
);
733 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
734 req
->major
, req
->minor
, req
->build
, req
->revision
);
735 gdev
->host_features
= req
->features
;
737 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
738 gdev
->host_features
);
740 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
741 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
746 vbg_req_free(req
, sizeof(*req
));
751 * Initializes the VBoxGuest device extension when the
752 * device driver is loaded.
754 * The native code locates the VMMDev on the PCI bus and retrieve
755 * the MMIO and I/O port ranges, this function will take care of
756 * mapping the MMIO memory (if present). Upon successful return
757 * the native code should set up the interrupt handler.
759 * Return: 0 or negative errno value.
761 * @gdev: The Guest extension device.
762 * @fixed_events: Events that will be enabled upon init and no client
763 * will ever be allowed to mask.
765 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
769 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
770 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
771 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
773 init_waitqueue_head(&gdev
->event_wq
);
774 init_waitqueue_head(&gdev
->hgcm_wq
);
775 spin_lock_init(&gdev
->event_spinlock
);
776 mutex_init(&gdev
->session_mutex
);
777 mutex_init(&gdev
->cancel_req_mutex
);
778 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
779 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
781 gdev
->mem_balloon
.get_req
=
782 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
783 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
);
784 gdev
->mem_balloon
.change_req
=
785 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
786 VMMDEVREQ_CHANGE_MEMBALLOON
);
788 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
789 VMMDEVREQ_HGCM_CANCEL2
);
790 gdev
->ack_events_req
=
791 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
792 VMMDEVREQ_ACKNOWLEDGE_EVENTS
);
793 gdev
->mouse_status_req
=
794 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
795 VMMDEVREQ_GET_MOUSE_STATUS
);
797 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
798 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
799 !gdev
->mouse_status_req
)
802 ret
= vbg_query_host_version(gdev
);
806 ret
= vbg_report_guest_info(gdev
);
808 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
812 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
814 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
819 ret
= vbg_reset_host_capabilities(gdev
);
821 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
826 ret
= vbg_core_set_mouse_status(gdev
, 0);
828 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
832 /* These may fail without requiring the driver init to fail. */
833 vbg_guest_mappings_init(gdev
);
834 vbg_heartbeat_init(gdev
);
837 ret
= vbg_report_driver_status(gdev
, true);
839 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
844 vbg_req_free(gdev
->mouse_status_req
,
845 sizeof(*gdev
->mouse_status_req
));
846 vbg_req_free(gdev
->ack_events_req
,
847 sizeof(*gdev
->ack_events_req
));
848 vbg_req_free(gdev
->cancel_req
,
849 sizeof(*gdev
->cancel_req
));
850 vbg_req_free(gdev
->mem_balloon
.change_req
,
851 sizeof(*gdev
->mem_balloon
.change_req
));
852 vbg_req_free(gdev
->mem_balloon
.get_req
,
853 sizeof(*gdev
->mem_balloon
.get_req
));
858 * Call this on exit to clean-up vboxguest-core managed resources.
860 * The native code should call this before the driver is loaded,
861 * but don't call this on shutdown.
862 * @gdev: The Guest extension device.
864 void vbg_core_exit(struct vbg_dev
*gdev
)
866 vbg_heartbeat_exit(gdev
);
867 vbg_guest_mappings_exit(gdev
);
869 /* Clear the host flags (mouse status etc). */
870 vbg_reset_host_event_filter(gdev
, 0);
871 vbg_reset_host_capabilities(gdev
);
872 vbg_core_set_mouse_status(gdev
, 0);
874 vbg_req_free(gdev
->mouse_status_req
,
875 sizeof(*gdev
->mouse_status_req
));
876 vbg_req_free(gdev
->ack_events_req
,
877 sizeof(*gdev
->ack_events_req
));
878 vbg_req_free(gdev
->cancel_req
,
879 sizeof(*gdev
->cancel_req
));
880 vbg_req_free(gdev
->mem_balloon
.change_req
,
881 sizeof(*gdev
->mem_balloon
.change_req
));
882 vbg_req_free(gdev
->mem_balloon
.get_req
,
883 sizeof(*gdev
->mem_balloon
.get_req
));
887 * Creates a VBoxGuest user session.
889 * vboxguest_linux.c calls this when userspace opens the char-device.
890 * Return: A pointer to the new session or an ERR_PTR on error.
891 * @gdev: The Guest extension device.
892 * @user: Set if this is a session for the vboxuser device.
894 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, bool user
)
896 struct vbg_session
*session
;
898 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
900 return ERR_PTR(-ENOMEM
);
902 session
->gdev
= gdev
;
903 session
->user_session
= user
;
909 * Closes a VBoxGuest session.
910 * @session: The session to close (and free).
912 void vbg_core_close_session(struct vbg_session
*session
)
914 struct vbg_dev
*gdev
= session
->gdev
;
917 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
918 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
920 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
921 if (!session
->hgcm_client_ids
[i
])
924 vbg_hgcm_disconnect(gdev
, session
->hgcm_client_ids
[i
], &rc
);
930 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
933 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
934 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
940 static int vbg_ioctl_driver_version_info(
941 struct vbg_ioctl_driver_version_info
*info
)
943 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
944 u16 min_maj_version
, req_maj_version
;
946 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
949 req_maj_version
= info
->u
.in
.req_version
>> 16;
950 min_maj_version
= info
->u
.in
.min_version
>> 16;
952 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
953 min_maj_version
!= req_maj_version
)
956 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
957 min_maj_version
== vbg_maj_version
) {
958 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
960 info
->u
.out
.session_version
= U32_MAX
;
961 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
964 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
965 info
->u
.out
.driver_revision
= 0;
966 info
->u
.out
.reserved1
= 0;
967 info
->u
.out
.reserved2
= 0;
972 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
973 struct vbg_session
*session
,
980 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
982 events
= gdev
->pending_events
& event_mask
;
983 wakeup
= events
|| session
->cancel_waiters
;
985 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
990 /* Must be called with the event_lock held */
991 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
992 struct vbg_session
*session
,
995 u32 events
= gdev
->pending_events
& event_mask
;
997 gdev
->pending_events
&= ~events
;
1001 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
1002 struct vbg_session
*session
,
1003 struct vbg_ioctl_wait_for_events
*wait
)
1005 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1006 u32 event_mask
= wait
->u
.in
.events
;
1007 unsigned long flags
;
1011 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1014 if (timeout_ms
== U32_MAX
)
1015 timeout
= MAX_SCHEDULE_TIMEOUT
;
1017 timeout
= msecs_to_jiffies(timeout_ms
);
1019 wait
->u
.out
.events
= 0;
1021 timeout
= wait_event_interruptible_timeout(
1023 vbg_wait_event_cond(gdev
, session
, event_mask
),
1026 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1028 if (timeout
< 0 || session
->cancel_waiters
) {
1030 } else if (timeout
== 0) {
1033 wait
->u
.out
.events
=
1034 vbg_consume_events_locked(gdev
, session
, event_mask
);
1037 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1040 * Someone else may have consumed the event(s) first, in
1041 * which case we go back to waiting.
1043 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1048 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1049 struct vbg_session
*session
,
1050 struct vbg_ioctl_hdr
*hdr
)
1052 unsigned long flags
;
1054 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1057 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1058 session
->cancel_waiters
= true;
1059 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1061 wake_up(&gdev
->event_wq
);
1067 * Checks if the VMM request is allowed in the context of the given session.
1068 * Return: 0 or negative errno value.
1069 * @gdev: The Guest extension device.
1070 * @session: The calling session.
1071 * @req: The request.
1073 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1074 const struct vmmdev_request_header
*req
)
1076 const struct vmmdev_guest_status
*guest_status
;
1077 bool trusted_apps_only
;
1079 switch (req
->request_type
) {
1080 /* Trusted users apps only. */
1081 case VMMDEVREQ_QUERY_CREDENTIALS
:
1082 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1083 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1084 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1085 case VMMDEVREQ_WRITE_COREDUMP
:
1086 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1087 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1088 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1089 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1090 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1091 case VMMDEVREQ_REPORT_GUEST_STATS
:
1092 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1093 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1094 trusted_apps_only
= true;
1098 case VMMDEVREQ_GET_MOUSE_STATUS
:
1099 case VMMDEVREQ_SET_MOUSE_STATUS
:
1100 case VMMDEVREQ_SET_POINTER_SHAPE
:
1101 case VMMDEVREQ_GET_HOST_VERSION
:
1102 case VMMDEVREQ_IDLE
:
1103 case VMMDEVREQ_GET_HOST_TIME
:
1104 case VMMDEVREQ_SET_POWER_STATUS
:
1105 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1106 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1107 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1108 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1109 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1110 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1111 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1112 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1113 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1114 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1115 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1116 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1117 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1118 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1119 case VMMDEVREQ_LOG_STRING
:
1120 case VMMDEVREQ_GET_SESSION_ID
:
1121 trusted_apps_only
= false;
1124 /* Depends on the request parameters... */
1125 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1126 guest_status
= (const struct vmmdev_guest_status
*)req
;
1127 switch (guest_status
->facility
) {
1128 case VBOXGUEST_FACILITY_TYPE_ALL
:
1129 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1130 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1131 guest_status
->facility
);
1133 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1134 trusted_apps_only
= true;
1136 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1137 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1138 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1140 trusted_apps_only
= false;
1145 /* Anything else is not allowed. */
1147 vbg_err("Denying userspace vmm call type %#08x\n",
1152 if (trusted_apps_only
&& session
->user_session
) {
1153 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1161 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1162 struct vbg_session
*session
, void *data
)
1164 struct vbg_ioctl_hdr
*hdr
= data
;
1167 if (hdr
->size_in
!= hdr
->size_out
)
1170 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1173 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1176 ret
= vbg_req_allowed(gdev
, session
, data
);
1180 vbg_req_perform(gdev
, data
);
1181 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1186 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1187 struct vbg_session
*session
,
1188 struct vbg_ioctl_hgcm_connect
*conn
)
1193 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1196 /* Find a free place in the sessions clients array and claim it */
1197 mutex_lock(&gdev
->session_mutex
);
1198 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1199 if (!session
->hgcm_client_ids
[i
]) {
1200 session
->hgcm_client_ids
[i
] = U32_MAX
;
1204 mutex_unlock(&gdev
->session_mutex
);
1206 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1209 ret
= vbg_hgcm_connect(gdev
, &conn
->u
.in
.loc
, &client_id
,
1212 mutex_lock(&gdev
->session_mutex
);
1213 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1214 conn
->u
.out
.client_id
= client_id
;
1215 session
->hgcm_client_ids
[i
] = client_id
;
1217 conn
->u
.out
.client_id
= 0;
1218 session
->hgcm_client_ids
[i
] = 0;
1220 mutex_unlock(&gdev
->session_mutex
);
1225 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1226 struct vbg_session
*session
,
1227 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1232 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1235 client_id
= disconn
->u
.in
.client_id
;
1236 if (client_id
== 0 || client_id
== U32_MAX
)
1239 mutex_lock(&gdev
->session_mutex
);
1240 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1241 if (session
->hgcm_client_ids
[i
] == client_id
) {
1242 session
->hgcm_client_ids
[i
] = U32_MAX
;
1246 mutex_unlock(&gdev
->session_mutex
);
1248 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1251 ret
= vbg_hgcm_disconnect(gdev
, client_id
, &disconn
->hdr
.rc
);
1253 mutex_lock(&gdev
->session_mutex
);
1254 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1255 session
->hgcm_client_ids
[i
] = 0;
1257 session
->hgcm_client_ids
[i
] = client_id
;
1258 mutex_unlock(&gdev
->session_mutex
);
1263 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1264 struct vbg_session
*session
, bool f32bit
,
1265 struct vbg_ioctl_hgcm_call
*call
)
1271 if (call
->hdr
.size_in
< sizeof(*call
))
1274 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1277 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1280 client_id
= call
->client_id
;
1281 if (client_id
== 0 || client_id
== U32_MAX
)
1284 actual_size
= sizeof(*call
);
1286 actual_size
+= call
->parm_count
*
1287 sizeof(struct vmmdev_hgcm_function_parameter32
);
1289 actual_size
+= call
->parm_count
*
1290 sizeof(struct vmmdev_hgcm_function_parameter
);
1291 if (call
->hdr
.size_in
< actual_size
) {
1292 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1293 call
->hdr
.size_in
, actual_size
);
1296 call
->hdr
.size_out
= actual_size
;
1299 * Validate the client id.
1301 mutex_lock(&gdev
->session_mutex
);
1302 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1303 if (session
->hgcm_client_ids
[i
] == client_id
)
1305 mutex_unlock(&gdev
->session_mutex
);
1306 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1307 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1313 ret
= vbg_hgcm_call32(gdev
, client_id
,
1314 call
->function
, call
->timeout_ms
,
1315 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1316 call
->parm_count
, &call
->hdr
.rc
);
1318 ret
= vbg_hgcm_call(gdev
, client_id
,
1319 call
->function
, call
->timeout_ms
,
1320 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1321 call
->parm_count
, &call
->hdr
.rc
);
1323 if (ret
== -E2BIG
) {
1324 /* E2BIG needs to be reported through the hdr.rc field. */
1325 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1329 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1330 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1335 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1337 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1340 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1346 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1347 struct vbg_session
*session
,
1348 struct vbg_ioctl_change_filter
*filter
)
1350 u32 or_mask
, not_mask
;
1352 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1355 or_mask
= filter
->u
.in
.or_mask
;
1356 not_mask
= filter
->u
.in
.not_mask
;
1358 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1361 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1365 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1366 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1368 u32 or_mask
, not_mask
;
1371 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1374 or_mask
= caps
->u
.in
.or_mask
;
1375 not_mask
= caps
->u
.in
.not_mask
;
1377 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1380 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1385 caps
->u
.out
.session_caps
= session
->guest_caps
;
1386 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1391 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1392 struct vbg_ioctl_check_balloon
*balloon_info
)
1394 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1397 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1399 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1400 * events entirely in the kernel, see vbg_core_isr().
1402 balloon_info
->u
.out
.handle_in_r3
= false;
1407 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1408 struct vbg_ioctl_write_coredump
*dump
)
1410 struct vmmdev_write_core_dump
*req
;
1412 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1415 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
);
1419 req
->flags
= dump
->u
.in
.flags
;
1420 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1422 vbg_req_free(req
, sizeof(*req
));
1427 * Common IOCtl for user to kernel communication.
1428 * Return: 0 or negative errno value.
1429 * @session: The client session.
1430 * @req: The requested function.
1431 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1433 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1435 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1436 struct vbg_dev
*gdev
= session
->gdev
;
1437 struct vbg_ioctl_hdr
*hdr
= data
;
1438 bool f32bit
= false;
1440 hdr
->rc
= VINF_SUCCESS
;
1442 hdr
->size_out
= hdr
->size_in
;
1445 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1446 * already checked by vbg_misc_device_ioctl().
1449 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1450 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1451 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
)
1452 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1454 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1457 /* Fixed size requests. */
1459 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1460 return vbg_ioctl_driver_version_info(data
);
1461 case VBG_IOCTL_HGCM_CONNECT
:
1462 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1463 case VBG_IOCTL_HGCM_DISCONNECT
:
1464 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1465 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1466 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1467 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1468 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1469 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1470 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1471 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1472 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1473 case VBG_IOCTL_CHECK_BALLOON
:
1474 return vbg_ioctl_check_balloon(gdev
, data
);
1475 case VBG_IOCTL_WRITE_CORE_DUMP
:
1476 return vbg_ioctl_write_core_dump(gdev
, data
);
1479 /* Variable sized requests. */
1480 switch (req_no_size
) {
1481 #ifdef CONFIG_COMPAT
1482 case VBG_IOCTL_HGCM_CALL_32(0):
1486 case VBG_IOCTL_HGCM_CALL(0):
1487 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1488 case VBG_IOCTL_LOG(0):
1489 return vbg_ioctl_log(data
);
1492 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req
);
1497 * Report guest supported mouse-features to the host.
1499 * Return: 0 or negative errno value.
1500 * @gdev: The Guest extension device.
1501 * @features: The set of features to report to the host.
1503 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1505 struct vmmdev_mouse_status
*req
;
1508 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
);
1512 req
->mouse_features
= features
;
1513 req
->pointer_pos_x
= 0;
1514 req
->pointer_pos_y
= 0;
1516 rc
= vbg_req_perform(gdev
, req
);
1518 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1520 vbg_req_free(req
, sizeof(*req
));
1521 return vbg_status_code_to_errno(rc
);
1524 /** Core interrupt service routine. */
1525 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1527 struct vbg_dev
*gdev
= dev_id
;
1528 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1529 bool mouse_position_changed
= false;
1530 unsigned long flags
;
1534 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1537 /* Get and acknowlegde events. */
1538 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1540 rc
= vbg_req_perform(gdev
, req
);
1542 vbg_err("Error performing events req, rc: %d\n", rc
);
1546 events
= req
->events
;
1548 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1549 mouse_position_changed
= true;
1550 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1553 if (events
& VMMDEV_EVENT_HGCM
) {
1554 wake_up(&gdev
->hgcm_wq
);
1555 events
&= ~VMMDEV_EVENT_HGCM
;
1558 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1559 schedule_work(&gdev
->mem_balloon
.work
);
1560 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1564 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1565 gdev
->pending_events
|= events
;
1566 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1568 wake_up(&gdev
->event_wq
);
1571 if (mouse_position_changed
)
1572 vbg_linux_mouse_event(gdev
);