1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
31 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around.
34 * This operation is a little bit tricky since the VMM might not accept
35 * just any address because of address clashes between the three contexts
36 * it operates in, so we try several times.
38 * Failure to reserve the guest mappings is ignored.
40 * @gdev: The Guest extension device.
42 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
44 struct vmmdev_hypervisorinfo
*req
;
45 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
46 struct page
**pages
= NULL
;
47 u32 size
, hypervisor_size
;
50 /* Query the required space. */
51 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
);
55 req
->hypervisor_start
= 0;
56 req
->hypervisor_size
= 0;
57 rc
= vbg_req_perform(gdev
, req
);
62 * The VMM will report back if there is nothing it wants to map, like
63 * for instance in VT-x and AMD-V mode.
65 if (req
->hypervisor_size
== 0)
68 hypervisor_size
= req
->hypervisor_size
;
69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
72 pages
= kmalloc_array(size
>> PAGE_SHIFT
, sizeof(*pages
), GFP_KERNEL
);
76 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
77 if (!gdev
->guest_mappings_dummy_page
)
80 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
81 pages
[i
] = gdev
->guest_mappings_dummy_page
;
84 * Try several times, the VMM might not accept some addresses because
85 * of address clashes between the three contexts.
87 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
88 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
89 VM_MAP
, PAGE_KERNEL_RO
);
90 if (!guest_mappings
[i
])
93 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
94 req
->header
.rc
= VERR_INTERNAL_ERROR
;
95 req
->hypervisor_size
= hypervisor_size
;
96 req
->hypervisor_start
=
97 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
99 rc
= vbg_req_perform(gdev
, req
);
101 gdev
->guest_mappings
= guest_mappings
[i
];
106 /* Free vmap's from failed attempts. */
108 vunmap(guest_mappings
[i
]);
110 /* On failure free the dummy-page backing the vmap */
111 if (!gdev
->guest_mappings
) {
112 __free_page(gdev
->guest_mappings_dummy_page
);
113 gdev
->guest_mappings_dummy_page
= NULL
;
117 vbg_req_free(req
, sizeof(*req
));
122 * Undo what vbg_guest_mappings_init did.
124 * @gdev: The Guest extension device.
126 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
128 struct vmmdev_hypervisorinfo
*req
;
131 if (!gdev
->guest_mappings
)
135 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.)
138 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
);
142 req
->hypervisor_start
= 0;
143 req
->hypervisor_size
= 0;
145 rc
= vbg_req_perform(gdev
, req
);
147 vbg_req_free(req
, sizeof(*req
));
150 vbg_err("%s error: %d\n", __func__
, rc
);
154 vunmap(gdev
->guest_mappings
);
155 gdev
->guest_mappings
= NULL
;
157 __free_page(gdev
->guest_mappings_dummy_page
);
158 gdev
->guest_mappings_dummy_page
= NULL
;
162 * Report the guest information to the host.
163 * Return: 0 or negative errno value.
164 * @gdev: The Guest extension device.
166 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
169 * Allocate and fill in the two guest info reports.
171 struct vmmdev_guest_info
*req1
= NULL
;
172 struct vmmdev_guest_info2
*req2
= NULL
;
173 int rc
, ret
= -ENOMEM
;
175 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
);
176 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
);
180 req1
->interface_version
= VMMDEV_VERSION
;
181 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
182 #if __BITS_PER_LONG == 64
183 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
186 req2
->additions_major
= VBG_VERSION_MAJOR
;
187 req2
->additions_minor
= VBG_VERSION_MINOR
;
188 req2
->additions_build
= VBG_VERSION_BUILD
;
189 req2
->additions_revision
= VBG_SVN_REV
;
190 /* (no features defined yet) */
191 req2
->additions_features
= 0;
192 strlcpy(req2
->name
, VBG_VERSION_STRING
,
196 * There are two protocols here:
197 * 1. INFO2 + INFO1. Supported by >=3.2.51.
198 * 2. INFO1 and optionally INFO2. The old protocol.
200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
201 * if not supported by the VMMDev (message ordering requirement).
203 rc
= vbg_req_perform(gdev
, req2
);
205 rc
= vbg_req_perform(gdev
, req1
);
206 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
207 rc
= vbg_req_perform(gdev
, req1
);
209 rc
= vbg_req_perform(gdev
, req2
);
210 if (rc
== VERR_NOT_IMPLEMENTED
)
214 ret
= vbg_status_code_to_errno(rc
);
217 vbg_req_free(req2
, sizeof(*req2
));
218 vbg_req_free(req1
, sizeof(*req1
));
223 * Report the guest driver status to the host.
224 * Return: 0 or negative errno value.
225 * @gdev: The Guest extension device.
226 * @active: Flag whether the driver is now active or not.
228 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
230 struct vmmdev_guest_status
*req
;
233 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
);
237 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
239 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
241 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
244 rc
= vbg_req_perform(gdev
, req
);
245 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
248 vbg_req_free(req
, sizeof(*req
));
250 return vbg_status_code_to_errno(rc
);
254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255 * Return: 0 or negative errno value.
256 * @gdev: The Guest extension device.
257 * @chunk_idx: Index of the chunk.
259 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
261 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
265 pages
= kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
267 GFP_KERNEL
| __GFP_NOWARN
);
271 req
->header
.size
= sizeof(*req
);
273 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
275 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
276 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
282 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
285 rc
= vbg_req_perform(gdev
, req
);
287 vbg_err("%s error, rc: %d\n", __func__
, rc
);
288 ret
= vbg_status_code_to_errno(rc
);
292 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
298 __free_page(pages
[i
]);
305 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
306 * Return: 0 or negative errno value.
307 * @gdev: The Guest extension device.
308 * @chunk_idx: Index of the chunk.
310 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
312 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
313 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
316 req
->header
.size
= sizeof(*req
);
317 req
->inflate
= false;
318 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
320 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
321 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
323 rc
= vbg_req_perform(gdev
, req
);
325 vbg_err("%s error, rc: %d\n", __func__
, rc
);
326 return vbg_status_code_to_errno(rc
);
329 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
330 __free_page(pages
[i
]);
332 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
338 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
339 * the host wants the balloon to be and adjust accordingly.
341 static void vbg_balloon_work(struct work_struct
*work
)
343 struct vbg_dev
*gdev
=
344 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
345 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
350 * Setting this bit means that we request the value from the host and
351 * change the guest memory balloon according to the returned value.
353 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
354 rc
= vbg_req_perform(gdev
, req
);
356 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
361 * The host always returns the same maximum amount of chunks, so
364 if (!gdev
->mem_balloon
.max_chunks
) {
365 gdev
->mem_balloon
.pages
=
366 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
367 sizeof(struct page
**), GFP_KERNEL
);
368 if (!gdev
->mem_balloon
.pages
)
371 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
374 chunks
= req
->balloon_chunks
;
375 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
376 vbg_err("%s: illegal balloon size %u (max=%u)\n",
377 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
381 if (chunks
> gdev
->mem_balloon
.chunks
) {
383 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
384 ret
= vbg_balloon_inflate(gdev
, i
);
388 gdev
->mem_balloon
.chunks
++;
392 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
393 ret
= vbg_balloon_deflate(gdev
, i
);
397 gdev
->mem_balloon
.chunks
--;
403 * Callback for heartbeat timer.
405 static void vbg_heartbeat_timer(struct timer_list
*t
)
407 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
409 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
410 mod_timer(&gdev
->heartbeat_timer
,
411 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
415 * Configure the host to check guest's heartbeat
416 * and get heartbeat interval from the host.
417 * Return: 0 or negative errno value.
418 * @gdev: The Guest extension device.
419 * @enabled: Set true to enable guest heartbeat checks on host.
421 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
423 struct vmmdev_heartbeat
*req
;
426 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
);
430 req
->enabled
= enabled
;
431 req
->interval_ns
= 0;
432 rc
= vbg_req_perform(gdev
, req
);
433 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
434 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
435 vbg_req_free(req
, sizeof(*req
));
437 return vbg_status_code_to_errno(rc
);
441 * Initializes the heartbeat timer. This feature may be disabled by the host.
442 * Return: 0 or negative errno value.
443 * @gdev: The Guest extension device.
445 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
449 /* Make sure that heartbeat checking is disabled if we fail. */
450 ret
= vbg_heartbeat_host_config(gdev
, false);
454 ret
= vbg_heartbeat_host_config(gdev
, true);
458 gdev
->guest_heartbeat_req
= vbg_req_alloc(
459 sizeof(*gdev
->guest_heartbeat_req
),
460 VMMDEVREQ_GUEST_HEARTBEAT
);
461 if (!gdev
->guest_heartbeat_req
)
464 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
465 __func__
, gdev
->heartbeat_interval_ms
);
466 mod_timer(&gdev
->heartbeat_timer
, 0);
472 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
473 * @gdev: The Guest extension device.
475 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
477 del_timer_sync(&gdev
->heartbeat_timer
);
478 vbg_heartbeat_host_config(gdev
, false);
479 vbg_req_free(gdev
->guest_heartbeat_req
,
480 sizeof(*gdev
->guest_heartbeat_req
));
484 * Applies a change to the bit usage tracker.
485 * Return: true if the mask changed, false if not.
486 * @tracker: The bit usage tracker.
487 * @changed: The bits to change.
488 * @previous: The previous value of the bits.
490 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
491 u32 changed
, u32 previous
)
493 bool global_change
= false;
496 u32 bit
= ffs(changed
) - 1;
497 u32 bitmask
= BIT(bit
);
499 if (bitmask
& previous
) {
500 tracker
->per_bit_usage
[bit
] -= 1;
501 if (tracker
->per_bit_usage
[bit
] == 0) {
502 global_change
= true;
503 tracker
->mask
&= ~bitmask
;
506 tracker
->per_bit_usage
[bit
] += 1;
507 if (tracker
->per_bit_usage
[bit
] == 1) {
508 global_change
= true;
509 tracker
->mask
|= bitmask
;
516 return global_change
;
520 * Init and termination worker for resetting the (host) event filter on the host
521 * Return: 0 or negative errno value.
522 * @gdev: The Guest extension device.
523 * @fixed_events: Fixed events (init time).
525 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
528 struct vmmdev_mask
*req
;
531 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
535 req
->not_mask
= U32_MAX
& ~fixed_events
;
536 req
->or_mask
= fixed_events
;
537 rc
= vbg_req_perform(gdev
, req
);
539 vbg_err("%s error, rc: %d\n", __func__
, rc
);
541 vbg_req_free(req
, sizeof(*req
));
542 return vbg_status_code_to_errno(rc
);
546 * Changes the event filter mask for the given session.
548 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
549 * do session cleanup. Takes the session spinlock.
551 * Return: 0 or negative errno value.
552 * @gdev: The Guest extension device.
553 * @session: The session.
554 * @or_mask: The events to add.
555 * @not_mask: The events to remove.
556 * @session_termination: Set if we're called by the session cleanup code.
557 * This tweaks the error handling so we perform
558 * proper session cleanup even if the host
561 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
562 struct vbg_session
*session
,
563 u32 or_mask
, u32 not_mask
,
564 bool session_termination
)
566 struct vmmdev_mask
*req
;
567 u32 changed
, previous
;
570 /* Allocate a request buffer before taking the spinlock */
571 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
);
573 if (!session_termination
)
575 /* Ignore allocation failure, we must do session cleanup. */
578 mutex_lock(&gdev
->session_mutex
);
580 /* Apply the changes to the session mask. */
581 previous
= session
->event_filter
;
582 session
->event_filter
|= or_mask
;
583 session
->event_filter
&= ~not_mask
;
585 /* If anything actually changed, update the global usage counters. */
586 changed
= previous
^ session
->event_filter
;
590 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
591 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
593 if (gdev
->event_filter_host
== or_mask
|| !req
)
596 gdev
->event_filter_host
= or_mask
;
597 req
->or_mask
= or_mask
;
598 req
->not_mask
= ~or_mask
;
599 rc
= vbg_req_perform(gdev
, req
);
601 ret
= vbg_status_code_to_errno(rc
);
603 /* Failed, roll back (unless it's session termination time). */
604 gdev
->event_filter_host
= U32_MAX
;
605 if (session_termination
)
608 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
609 session
->event_filter
);
610 session
->event_filter
= previous
;
614 mutex_unlock(&gdev
->session_mutex
);
615 vbg_req_free(req
, sizeof(*req
));
621 * Init and termination worker for set guest capabilities to zero on the host.
622 * Return: 0 or negative errno value.
623 * @gdev: The Guest extension device.
625 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
627 struct vmmdev_mask
*req
;
630 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
634 req
->not_mask
= U32_MAX
;
636 rc
= vbg_req_perform(gdev
, req
);
638 vbg_err("%s error, rc: %d\n", __func__
, rc
);
640 vbg_req_free(req
, sizeof(*req
));
641 return vbg_status_code_to_errno(rc
);
645 * Sets the guest capabilities for a session. Takes the session spinlock.
646 * Return: 0 or negative errno value.
647 * @gdev: The Guest extension device.
648 * @session: The session.
649 * @or_mask: The capabilities to add.
650 * @not_mask: The capabilities to remove.
651 * @session_termination: Set if we're called by the session cleanup code.
652 * This tweaks the error handling so we perform
653 * proper session cleanup even if the host
656 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
657 struct vbg_session
*session
,
658 u32 or_mask
, u32 not_mask
,
659 bool session_termination
)
661 struct vmmdev_mask
*req
;
662 u32 changed
, previous
;
665 /* Allocate a request buffer before taking the spinlock */
666 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
);
668 if (!session_termination
)
670 /* Ignore allocation failure, we must do session cleanup. */
673 mutex_lock(&gdev
->session_mutex
);
675 /* Apply the changes to the session mask. */
676 previous
= session
->guest_caps
;
677 session
->guest_caps
|= or_mask
;
678 session
->guest_caps
&= ~not_mask
;
680 /* If anything actually changed, update the global usage counters. */
681 changed
= previous
^ session
->guest_caps
;
685 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
, previous
);
686 or_mask
= gdev
->guest_caps_tracker
.mask
;
688 if (gdev
->guest_caps_host
== or_mask
|| !req
)
691 gdev
->guest_caps_host
= or_mask
;
692 req
->or_mask
= or_mask
;
693 req
->not_mask
= ~or_mask
;
694 rc
= vbg_req_perform(gdev
, req
);
696 ret
= vbg_status_code_to_errno(rc
);
698 /* Failed, roll back (unless it's session termination time). */
699 gdev
->guest_caps_host
= U32_MAX
;
700 if (session_termination
)
703 vbg_track_bit_usage(&gdev
->guest_caps_tracker
, changed
,
704 session
->guest_caps
);
705 session
->guest_caps
= previous
;
709 mutex_unlock(&gdev
->session_mutex
);
710 vbg_req_free(req
, sizeof(*req
));
716 * vbg_query_host_version get the host feature mask and version information.
717 * Return: 0 or negative errno value.
718 * @gdev: The Guest extension device.
720 static int vbg_query_host_version(struct vbg_dev
*gdev
)
722 struct vmmdev_host_version
*req
;
725 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
);
729 rc
= vbg_req_perform(gdev
, req
);
730 ret
= vbg_status_code_to_errno(rc
);
732 vbg_err("%s error: %d\n", __func__
, rc
);
736 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
737 req
->major
, req
->minor
, req
->build
, req
->revision
);
738 gdev
->host_features
= req
->features
;
740 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
741 gdev
->host_features
);
743 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
744 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
749 vbg_req_free(req
, sizeof(*req
));
754 * Initializes the VBoxGuest device extension when the
755 * device driver is loaded.
757 * The native code locates the VMMDev on the PCI bus and retrieve
758 * the MMIO and I/O port ranges, this function will take care of
759 * mapping the MMIO memory (if present). Upon successful return
760 * the native code should set up the interrupt handler.
762 * Return: 0 or negative errno value.
764 * @gdev: The Guest extension device.
765 * @fixed_events: Events that will be enabled upon init and no client
766 * will ever be allowed to mask.
768 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
772 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
773 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
774 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
776 init_waitqueue_head(&gdev
->event_wq
);
777 init_waitqueue_head(&gdev
->hgcm_wq
);
778 spin_lock_init(&gdev
->event_spinlock
);
779 mutex_init(&gdev
->session_mutex
);
780 mutex_init(&gdev
->cancel_req_mutex
);
781 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
782 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
784 gdev
->mem_balloon
.get_req
=
785 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
);
787 gdev
->mem_balloon
.change_req
=
788 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
789 VMMDEVREQ_CHANGE_MEMBALLOON
);
791 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
792 VMMDEVREQ_HGCM_CANCEL2
);
793 gdev
->ack_events_req
=
794 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS
);
796 gdev
->mouse_status_req
=
797 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
798 VMMDEVREQ_GET_MOUSE_STATUS
);
800 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
801 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
802 !gdev
->mouse_status_req
)
805 ret
= vbg_query_host_version(gdev
);
809 ret
= vbg_report_guest_info(gdev
);
811 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
815 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
817 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
822 ret
= vbg_reset_host_capabilities(gdev
);
824 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
829 ret
= vbg_core_set_mouse_status(gdev
, 0);
831 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
835 /* These may fail without requiring the driver init to fail. */
836 vbg_guest_mappings_init(gdev
);
837 vbg_heartbeat_init(gdev
);
840 ret
= vbg_report_driver_status(gdev
, true);
842 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
847 vbg_req_free(gdev
->mouse_status_req
,
848 sizeof(*gdev
->mouse_status_req
));
849 vbg_req_free(gdev
->ack_events_req
,
850 sizeof(*gdev
->ack_events_req
));
851 vbg_req_free(gdev
->cancel_req
,
852 sizeof(*gdev
->cancel_req
));
853 vbg_req_free(gdev
->mem_balloon
.change_req
,
854 sizeof(*gdev
->mem_balloon
.change_req
));
855 vbg_req_free(gdev
->mem_balloon
.get_req
,
856 sizeof(*gdev
->mem_balloon
.get_req
));
861 * Call this on exit to clean-up vboxguest-core managed resources.
863 * The native code should call this before the driver is loaded,
864 * but don't call this on shutdown.
865 * @gdev: The Guest extension device.
867 void vbg_core_exit(struct vbg_dev
*gdev
)
869 vbg_heartbeat_exit(gdev
);
870 vbg_guest_mappings_exit(gdev
);
872 /* Clear the host flags (mouse status etc). */
873 vbg_reset_host_event_filter(gdev
, 0);
874 vbg_reset_host_capabilities(gdev
);
875 vbg_core_set_mouse_status(gdev
, 0);
877 vbg_req_free(gdev
->mouse_status_req
,
878 sizeof(*gdev
->mouse_status_req
));
879 vbg_req_free(gdev
->ack_events_req
,
880 sizeof(*gdev
->ack_events_req
));
881 vbg_req_free(gdev
->cancel_req
,
882 sizeof(*gdev
->cancel_req
));
883 vbg_req_free(gdev
->mem_balloon
.change_req
,
884 sizeof(*gdev
->mem_balloon
.change_req
));
885 vbg_req_free(gdev
->mem_balloon
.get_req
,
886 sizeof(*gdev
->mem_balloon
.get_req
));
890 * Creates a VBoxGuest user session.
892 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device.
897 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, bool user
)
899 struct vbg_session
*session
;
901 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
903 return ERR_PTR(-ENOMEM
);
905 session
->gdev
= gdev
;
906 session
->user_session
= user
;
912 * Closes a VBoxGuest session.
913 * @session: The session to close (and free).
915 void vbg_core_close_session(struct vbg_session
*session
)
917 struct vbg_dev
*gdev
= session
->gdev
;
920 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
921 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
923 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
924 if (!session
->hgcm_client_ids
[i
])
927 vbg_hgcm_disconnect(gdev
, session
->hgcm_client_ids
[i
], &rc
);
933 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
936 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
937 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
943 static int vbg_ioctl_driver_version_info(
944 struct vbg_ioctl_driver_version_info
*info
)
946 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
947 u16 min_maj_version
, req_maj_version
;
949 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
952 req_maj_version
= info
->u
.in
.req_version
>> 16;
953 min_maj_version
= info
->u
.in
.min_version
>> 16;
955 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
956 min_maj_version
!= req_maj_version
)
959 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
960 min_maj_version
== vbg_maj_version
) {
961 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
963 info
->u
.out
.session_version
= U32_MAX
;
964 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
967 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
968 info
->u
.out
.driver_revision
= 0;
969 info
->u
.out
.reserved1
= 0;
970 info
->u
.out
.reserved2
= 0;
975 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
976 struct vbg_session
*session
,
983 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
985 events
= gdev
->pending_events
& event_mask
;
986 wakeup
= events
|| session
->cancel_waiters
;
988 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
993 /* Must be called with the event_lock held */
994 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
995 struct vbg_session
*session
,
998 u32 events
= gdev
->pending_events
& event_mask
;
1000 gdev
->pending_events
&= ~events
;
1004 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
1005 struct vbg_session
*session
,
1006 struct vbg_ioctl_wait_for_events
*wait
)
1008 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1009 u32 event_mask
= wait
->u
.in
.events
;
1010 unsigned long flags
;
1014 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1017 if (timeout_ms
== U32_MAX
)
1018 timeout
= MAX_SCHEDULE_TIMEOUT
;
1020 timeout
= msecs_to_jiffies(timeout_ms
);
1022 wait
->u
.out
.events
= 0;
1024 timeout
= wait_event_interruptible_timeout(
1026 vbg_wait_event_cond(gdev
, session
, event_mask
),
1029 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1031 if (timeout
< 0 || session
->cancel_waiters
) {
1033 } else if (timeout
== 0) {
1036 wait
->u
.out
.events
=
1037 vbg_consume_events_locked(gdev
, session
, event_mask
);
1040 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1043 * Someone else may have consumed the event(s) first, in
1044 * which case we go back to waiting.
1046 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1051 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1052 struct vbg_session
*session
,
1053 struct vbg_ioctl_hdr
*hdr
)
1055 unsigned long flags
;
1057 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1060 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1061 session
->cancel_waiters
= true;
1062 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1064 wake_up(&gdev
->event_wq
);
1070 * Checks if the VMM request is allowed in the context of the given session.
1071 * Return: 0 or negative errno value.
1072 * @gdev: The Guest extension device.
1073 * @session: The calling session.
1074 * @req: The request.
1076 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1077 const struct vmmdev_request_header
*req
)
1079 const struct vmmdev_guest_status
*guest_status
;
1080 bool trusted_apps_only
;
1082 switch (req
->request_type
) {
1083 /* Trusted users apps only. */
1084 case VMMDEVREQ_QUERY_CREDENTIALS
:
1085 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1086 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1087 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1088 case VMMDEVREQ_WRITE_COREDUMP
:
1089 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1090 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1091 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1092 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1093 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1094 case VMMDEVREQ_REPORT_GUEST_STATS
:
1095 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1096 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1097 trusted_apps_only
= true;
1101 case VMMDEVREQ_GET_MOUSE_STATUS
:
1102 case VMMDEVREQ_SET_MOUSE_STATUS
:
1103 case VMMDEVREQ_SET_POINTER_SHAPE
:
1104 case VMMDEVREQ_GET_HOST_VERSION
:
1105 case VMMDEVREQ_IDLE
:
1106 case VMMDEVREQ_GET_HOST_TIME
:
1107 case VMMDEVREQ_SET_POWER_STATUS
:
1108 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1109 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1110 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1111 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1112 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1113 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1114 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1115 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1116 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1117 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1118 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1119 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1120 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1121 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1122 case VMMDEVREQ_LOG_STRING
:
1123 case VMMDEVREQ_GET_SESSION_ID
:
1124 trusted_apps_only
= false;
1127 /* Depends on the request parameters... */
1128 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1129 guest_status
= (const struct vmmdev_guest_status
*)req
;
1130 switch (guest_status
->facility
) {
1131 case VBOXGUEST_FACILITY_TYPE_ALL
:
1132 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1133 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1134 guest_status
->facility
);
1136 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1137 trusted_apps_only
= true;
1139 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1140 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1141 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1143 trusted_apps_only
= false;
1148 /* Anything else is not allowed. */
1150 vbg_err("Denying userspace vmm call type %#08x\n",
1155 if (trusted_apps_only
&& session
->user_session
) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1164 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1165 struct vbg_session
*session
, void *data
)
1167 struct vbg_ioctl_hdr
*hdr
= data
;
1170 if (hdr
->size_in
!= hdr
->size_out
)
1173 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1176 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1179 ret
= vbg_req_allowed(gdev
, session
, data
);
1183 vbg_req_perform(gdev
, data
);
1184 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1189 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1190 struct vbg_session
*session
,
1191 struct vbg_ioctl_hgcm_connect
*conn
)
1196 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1199 /* Find a free place in the sessions clients array and claim it */
1200 mutex_lock(&gdev
->session_mutex
);
1201 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1202 if (!session
->hgcm_client_ids
[i
]) {
1203 session
->hgcm_client_ids
[i
] = U32_MAX
;
1207 mutex_unlock(&gdev
->session_mutex
);
1209 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1212 ret
= vbg_hgcm_connect(gdev
, &conn
->u
.in
.loc
, &client_id
,
1215 mutex_lock(&gdev
->session_mutex
);
1216 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1217 conn
->u
.out
.client_id
= client_id
;
1218 session
->hgcm_client_ids
[i
] = client_id
;
1220 conn
->u
.out
.client_id
= 0;
1221 session
->hgcm_client_ids
[i
] = 0;
1223 mutex_unlock(&gdev
->session_mutex
);
1228 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1229 struct vbg_session
*session
,
1230 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1235 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1238 client_id
= disconn
->u
.in
.client_id
;
1239 if (client_id
== 0 || client_id
== U32_MAX
)
1242 mutex_lock(&gdev
->session_mutex
);
1243 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1244 if (session
->hgcm_client_ids
[i
] == client_id
) {
1245 session
->hgcm_client_ids
[i
] = U32_MAX
;
1249 mutex_unlock(&gdev
->session_mutex
);
1251 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1254 ret
= vbg_hgcm_disconnect(gdev
, client_id
, &disconn
->hdr
.rc
);
1256 mutex_lock(&gdev
->session_mutex
);
1257 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1258 session
->hgcm_client_ids
[i
] = 0;
1260 session
->hgcm_client_ids
[i
] = client_id
;
1261 mutex_unlock(&gdev
->session_mutex
);
1266 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1267 struct vbg_session
*session
, bool f32bit
,
1268 struct vbg_ioctl_hgcm_call
*call
)
1274 if (call
->hdr
.size_in
< sizeof(*call
))
1277 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1280 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1283 client_id
= call
->client_id
;
1284 if (client_id
== 0 || client_id
== U32_MAX
)
1287 actual_size
= sizeof(*call
);
1289 actual_size
+= call
->parm_count
*
1290 sizeof(struct vmmdev_hgcm_function_parameter32
);
1292 actual_size
+= call
->parm_count
*
1293 sizeof(struct vmmdev_hgcm_function_parameter
);
1294 if (call
->hdr
.size_in
< actual_size
) {
1295 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1296 call
->hdr
.size_in
, actual_size
);
1299 call
->hdr
.size_out
= actual_size
;
1302 * Validate the client id.
1304 mutex_lock(&gdev
->session_mutex
);
1305 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1306 if (session
->hgcm_client_ids
[i
] == client_id
)
1308 mutex_unlock(&gdev
->session_mutex
);
1309 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1310 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1315 if (IS_ENABLED(CONFIG_COMPAT
) && f32bit
)
1316 ret
= vbg_hgcm_call32(gdev
, client_id
,
1317 call
->function
, call
->timeout_ms
,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1319 call
->parm_count
, &call
->hdr
.rc
);
1321 ret
= vbg_hgcm_call(gdev
, client_id
,
1322 call
->function
, call
->timeout_ms
,
1323 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1324 call
->parm_count
, &call
->hdr
.rc
);
1326 if (ret
== -E2BIG
) {
1327 /* E2BIG needs to be reported through the hdr.rc field. */
1328 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1332 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1333 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1338 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1340 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1343 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1349 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1350 struct vbg_session
*session
,
1351 struct vbg_ioctl_change_filter
*filter
)
1353 u32 or_mask
, not_mask
;
1355 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1358 or_mask
= filter
->u
.in
.or_mask
;
1359 not_mask
= filter
->u
.in
.not_mask
;
1361 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1364 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1368 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1369 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1371 u32 or_mask
, not_mask
;
1374 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1377 or_mask
= caps
->u
.in
.or_mask
;
1378 not_mask
= caps
->u
.in
.not_mask
;
1380 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1383 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1388 caps
->u
.out
.session_caps
= session
->guest_caps
;
1389 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1394 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1395 struct vbg_ioctl_check_balloon
*balloon_info
)
1397 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1400 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1402 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1403 * events entirely in the kernel, see vbg_core_isr().
1405 balloon_info
->u
.out
.handle_in_r3
= false;
1410 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1411 struct vbg_ioctl_write_coredump
*dump
)
1413 struct vmmdev_write_core_dump
*req
;
1415 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1418 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
);
1422 req
->flags
= dump
->u
.in
.flags
;
1423 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1425 vbg_req_free(req
, sizeof(*req
));
1430 * Common IOCtl for user to kernel communication.
1431 * Return: 0 or negative errno value.
1432 * @session: The client session.
1433 * @req: The requested function.
1434 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1436 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1438 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1439 struct vbg_dev
*gdev
= session
->gdev
;
1440 struct vbg_ioctl_hdr
*hdr
= data
;
1441 bool f32bit
= false;
1443 hdr
->rc
= VINF_SUCCESS
;
1445 hdr
->size_out
= hdr
->size_in
;
1448 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1449 * already checked by vbg_misc_device_ioctl().
1452 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1453 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1454 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
)
1455 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1457 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1460 /* Fixed size requests. */
1462 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1463 return vbg_ioctl_driver_version_info(data
);
1464 case VBG_IOCTL_HGCM_CONNECT
:
1465 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1466 case VBG_IOCTL_HGCM_DISCONNECT
:
1467 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1468 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1469 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1470 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1471 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1472 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1473 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1474 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1475 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1476 case VBG_IOCTL_CHECK_BALLOON
:
1477 return vbg_ioctl_check_balloon(gdev
, data
);
1478 case VBG_IOCTL_WRITE_CORE_DUMP
:
1479 return vbg_ioctl_write_core_dump(gdev
, data
);
1482 /* Variable sized requests. */
1483 switch (req_no_size
) {
1484 #ifdef CONFIG_COMPAT
1485 case VBG_IOCTL_HGCM_CALL_32(0):
1489 case VBG_IOCTL_HGCM_CALL(0):
1490 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1491 case VBG_IOCTL_LOG(0):
1492 return vbg_ioctl_log(data
);
1495 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req
);
1500 * Report guest supported mouse-features to the host.
1502 * Return: 0 or negative errno value.
1503 * @gdev: The Guest extension device.
1504 * @features: The set of features to report to the host.
1506 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1508 struct vmmdev_mouse_status
*req
;
1511 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
);
1515 req
->mouse_features
= features
;
1516 req
->pointer_pos_x
= 0;
1517 req
->pointer_pos_y
= 0;
1519 rc
= vbg_req_perform(gdev
, req
);
1521 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1523 vbg_req_free(req
, sizeof(*req
));
1524 return vbg_status_code_to_errno(rc
);
1527 /** Core interrupt service routine. */
1528 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1530 struct vbg_dev
*gdev
= dev_id
;
1531 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1532 bool mouse_position_changed
= false;
1533 unsigned long flags
;
1537 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1540 /* Get and acknowlegde events. */
1541 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1543 rc
= vbg_req_perform(gdev
, req
);
1545 vbg_err("Error performing events req, rc: %d\n", rc
);
1549 events
= req
->events
;
1551 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1552 mouse_position_changed
= true;
1553 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1556 if (events
& VMMDEV_EVENT_HGCM
) {
1557 wake_up(&gdev
->hgcm_wq
);
1558 events
&= ~VMMDEV_EVENT_HGCM
;
1561 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1562 schedule_work(&gdev
->mem_balloon
.work
);
1563 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1567 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1568 gdev
->pending_events
|= events
;
1569 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1571 wake_up(&gdev
->event_wq
);
1574 if (mouse_position_changed
)
1575 vbg_linux_mouse_event(gdev
);