1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
11 #include <linux/sched.h>
12 #include <linux/sizes.h>
13 #include <linux/slab.h>
14 #include <linux/vbox_err.h>
15 #include <linux/vbox_utils.h>
16 #include <linux/vmalloc.h>
17 #include "vboxguest_core.h"
18 #include "vboxguest_version.h"
20 /* Get the pointer to the first HGCM parameter. */
21 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
22 ((struct vmmdev_hgcm_function_parameter *)( \
23 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
24 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
25 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
26 ((struct vmmdev_hgcm_function_parameter32 *)( \
27 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
29 #define GUEST_MAPPINGS_TRIES 5
31 #define VBG_KERNEL_REQUEST \
32 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
33 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
36 * vbg_guest_mappings_init - Reserves memory in which the VMM can
37 * relocate any guest mappings that are floating around.
38 * @gdev: The Guest extension device.
40 * This operation is a little bit tricky since the VMM might not accept
41 * just any address because of address clashes between the three contexts
42 * it operates in, so we try several times.
44 * Failure to reserve the guest mappings is ignored.
46 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
48 struct vmmdev_hypervisorinfo
*req
;
49 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
50 struct page
**pages
= NULL
;
51 u32 size
, hypervisor_size
;
54 /* Query the required space. */
55 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
,
60 req
->hypervisor_start
= 0;
61 req
->hypervisor_size
= 0;
62 rc
= vbg_req_perform(gdev
, req
);
67 * The VMM will report back if there is nothing it wants to map, like
68 * for instance in VT-x and AMD-V mode.
70 if (req
->hypervisor_size
== 0)
73 hypervisor_size
= req
->hypervisor_size
;
74 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
75 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
77 pages
= kmalloc_array(size
>> PAGE_SHIFT
, sizeof(*pages
), GFP_KERNEL
);
81 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
82 if (!gdev
->guest_mappings_dummy_page
)
85 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
86 pages
[i
] = gdev
->guest_mappings_dummy_page
;
89 * Try several times, the VMM might not accept some addresses because
90 * of address clashes between the three contexts.
92 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
93 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
94 VM_MAP
, PAGE_KERNEL_RO
);
95 if (!guest_mappings
[i
])
98 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
99 req
->header
.rc
= VERR_INTERNAL_ERROR
;
100 req
->hypervisor_size
= hypervisor_size
;
101 req
->hypervisor_start
=
102 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
104 rc
= vbg_req_perform(gdev
, req
);
106 gdev
->guest_mappings
= guest_mappings
[i
];
111 /* Free vmap's from failed attempts. */
113 vunmap(guest_mappings
[i
]);
115 /* On failure free the dummy-page backing the vmap */
116 if (!gdev
->guest_mappings
) {
117 __free_page(gdev
->guest_mappings_dummy_page
);
118 gdev
->guest_mappings_dummy_page
= NULL
;
122 vbg_req_free(req
, sizeof(*req
));
127 * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did.
129 * @gdev: The Guest extension device.
131 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
133 struct vmmdev_hypervisorinfo
*req
;
136 if (!gdev
->guest_mappings
)
140 * Tell the host that we're going to free the memory we reserved for
141 * it, the free it up. (Leak the memory if anything goes wrong here.)
143 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
,
148 req
->hypervisor_start
= 0;
149 req
->hypervisor_size
= 0;
151 rc
= vbg_req_perform(gdev
, req
);
153 vbg_req_free(req
, sizeof(*req
));
156 vbg_err("%s error: %d\n", __func__
, rc
);
160 vunmap(gdev
->guest_mappings
);
161 gdev
->guest_mappings
= NULL
;
163 __free_page(gdev
->guest_mappings_dummy_page
);
164 gdev
->guest_mappings_dummy_page
= NULL
;
168 * vbg_report_guest_info - Report the guest information to the host.
169 * @gdev: The Guest extension device.
171 * Return: %0 or negative errno value.
173 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
176 * Allocate and fill in the two guest info reports.
178 struct vmmdev_guest_info
*req1
= NULL
;
179 struct vmmdev_guest_info2
*req2
= NULL
;
180 int rc
, ret
= -ENOMEM
;
182 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
,
184 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
,
189 req1
->interface_version
= VMMDEV_VERSION
;
190 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
191 #if __BITS_PER_LONG == 64
192 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
195 req2
->additions_major
= VBG_VERSION_MAJOR
;
196 req2
->additions_minor
= VBG_VERSION_MINOR
;
197 req2
->additions_build
= VBG_VERSION_BUILD
;
198 req2
->additions_revision
= VBG_SVN_REV
;
199 req2
->additions_features
=
200 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO
;
201 strscpy(req2
->name
, VBG_VERSION_STRING
,
205 * There are two protocols here:
206 * 1. INFO2 + INFO1. Supported by >=3.2.51.
207 * 2. INFO1 and optionally INFO2. The old protocol.
209 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
210 * if not supported by the VMMDev (message ordering requirement).
212 rc
= vbg_req_perform(gdev
, req2
);
214 rc
= vbg_req_perform(gdev
, req1
);
215 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
216 rc
= vbg_req_perform(gdev
, req1
);
218 rc
= vbg_req_perform(gdev
, req2
);
219 if (rc
== VERR_NOT_IMPLEMENTED
)
223 ret
= vbg_status_code_to_errno(rc
);
226 vbg_req_free(req2
, sizeof(*req2
));
227 vbg_req_free(req1
, sizeof(*req1
));
232 * vbg_report_driver_status - Report the guest driver status to the host.
233 * @gdev: The Guest extension device.
234 * @active: Flag whether the driver is now active or not.
236 * Return: 0 or negative errno value.
238 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
240 struct vmmdev_guest_status
*req
;
243 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
,
248 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
250 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
252 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
255 rc
= vbg_req_perform(gdev
, req
);
256 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
259 vbg_req_free(req
, sizeof(*req
));
261 return vbg_status_code_to_errno(rc
);
265 * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller
266 * owns the balloon mutex.
267 * @gdev: The Guest extension device.
268 * @chunk_idx: Index of the chunk.
270 * Return: %0 or negative errno value.
272 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
274 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
278 pages
= kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
280 GFP_KERNEL
| __GFP_NOWARN
);
284 req
->header
.size
= sizeof(*req
);
286 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
288 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
289 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
295 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
298 rc
= vbg_req_perform(gdev
, req
);
300 vbg_err("%s error, rc: %d\n", __func__
, rc
);
301 ret
= vbg_status_code_to_errno(rc
);
305 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
311 __free_page(pages
[i
]);
318 * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller
319 * owns the balloon mutex.
320 * @gdev: The Guest extension device.
321 * @chunk_idx: Index of the chunk.
323 * Return: %0 or negative errno value.
325 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
327 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
328 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
331 req
->header
.size
= sizeof(*req
);
332 req
->inflate
= false;
333 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
335 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
336 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
338 rc
= vbg_req_perform(gdev
, req
);
340 vbg_err("%s error, rc: %d\n", __func__
, rc
);
341 return vbg_status_code_to_errno(rc
);
344 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
345 __free_page(pages
[i
]);
347 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
353 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
354 * the host wants the balloon to be and adjust accordingly.
356 static void vbg_balloon_work(struct work_struct
*work
)
358 struct vbg_dev
*gdev
=
359 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
360 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
365 * Setting this bit means that we request the value from the host and
366 * change the guest memory balloon according to the returned value.
368 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
369 rc
= vbg_req_perform(gdev
, req
);
371 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
376 * The host always returns the same maximum amount of chunks, so
379 if (!gdev
->mem_balloon
.max_chunks
) {
380 gdev
->mem_balloon
.pages
=
381 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
382 sizeof(struct page
**), GFP_KERNEL
);
383 if (!gdev
->mem_balloon
.pages
)
386 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
389 chunks
= req
->balloon_chunks
;
390 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
391 vbg_err("%s: illegal balloon size %u (max=%u)\n",
392 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
396 if (chunks
> gdev
->mem_balloon
.chunks
) {
398 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
399 ret
= vbg_balloon_inflate(gdev
, i
);
403 gdev
->mem_balloon
.chunks
++;
407 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
408 ret
= vbg_balloon_deflate(gdev
, i
);
412 gdev
->mem_balloon
.chunks
--;
418 * Callback for heartbeat timer.
420 static void vbg_heartbeat_timer(struct timer_list
*t
)
422 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
424 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
425 mod_timer(&gdev
->heartbeat_timer
,
426 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
430 * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat
431 * and get heartbeat interval from the host.
432 * @gdev: The Guest extension device.
433 * @enabled: Set true to enable guest heartbeat checks on host.
435 * Return: %0 or negative errno value.
437 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
439 struct vmmdev_heartbeat
*req
;
442 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
,
447 req
->enabled
= enabled
;
448 req
->interval_ns
= 0;
449 rc
= vbg_req_perform(gdev
, req
);
450 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
451 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
452 vbg_req_free(req
, sizeof(*req
));
454 return vbg_status_code_to_errno(rc
);
458 * vbg_heartbeat_init - Initializes the heartbeat timer. This feature
459 * may be disabled by the host.
460 * @gdev: The Guest extension device.
462 * Return: %0 or negative errno value.
464 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
468 /* Make sure that heartbeat checking is disabled if we fail. */
469 ret
= vbg_heartbeat_host_config(gdev
, false);
473 ret
= vbg_heartbeat_host_config(gdev
, true);
477 gdev
->guest_heartbeat_req
= vbg_req_alloc(
478 sizeof(*gdev
->guest_heartbeat_req
),
479 VMMDEVREQ_GUEST_HEARTBEAT
,
481 if (!gdev
->guest_heartbeat_req
)
484 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
485 __func__
, gdev
->heartbeat_interval_ms
);
486 mod_timer(&gdev
->heartbeat_timer
, 0);
492 * vbg_heartbeat_exit - Cleanup heartbeat code, stop HB timer and disable
493 * host heartbeat checking.
494 * @gdev: The Guest extension device.
496 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
498 del_timer_sync(&gdev
->heartbeat_timer
);
499 vbg_heartbeat_host_config(gdev
, false);
500 vbg_req_free(gdev
->guest_heartbeat_req
,
501 sizeof(*gdev
->guest_heartbeat_req
));
505 * vbg_track_bit_usage - Applies a change to the bit usage tracker.
506 * @tracker: The bit usage tracker.
507 * @changed: The bits to change.
508 * @previous: The previous value of the bits.
510 * Return: %true if the mask changed, %false if not.
512 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
513 u32 changed
, u32 previous
)
515 bool global_change
= false;
518 u32 bit
= ffs(changed
) - 1;
519 u32 bitmask
= BIT(bit
);
521 if (bitmask
& previous
) {
522 tracker
->per_bit_usage
[bit
] -= 1;
523 if (tracker
->per_bit_usage
[bit
] == 0) {
524 global_change
= true;
525 tracker
->mask
&= ~bitmask
;
528 tracker
->per_bit_usage
[bit
] += 1;
529 if (tracker
->per_bit_usage
[bit
] == 1) {
530 global_change
= true;
531 tracker
->mask
|= bitmask
;
538 return global_change
;
542 * vbg_reset_host_event_filter - Init and termination worker for
543 * resetting the (host) event filter on the host
544 * @gdev: The Guest extension device.
545 * @fixed_events: Fixed events (init time).
547 * Return: %0 or negative errno value.
549 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
552 struct vmmdev_mask
*req
;
555 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
560 req
->not_mask
= U32_MAX
& ~fixed_events
;
561 req
->or_mask
= fixed_events
;
562 rc
= vbg_req_perform(gdev
, req
);
564 vbg_err("%s error, rc: %d\n", __func__
, rc
);
566 vbg_req_free(req
, sizeof(*req
));
567 return vbg_status_code_to_errno(rc
);
571 * vbg_set_session_event_filter - Changes the event filter mask for the
573 * @gdev: The Guest extension device.
574 * @session: The session.
575 * @or_mask: The events to add.
576 * @not_mask: The events to remove.
577 * @session_termination: Set if we're called by the session cleanup code.
578 * This tweaks the error handling so we perform
579 * proper session cleanup even if the host
582 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
583 * do session cleanup. Takes the session mutex.
585 * Return: 0 or negative errno value.
587 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
588 struct vbg_session
*session
,
589 u32 or_mask
, u32 not_mask
,
590 bool session_termination
)
592 struct vmmdev_mask
*req
;
593 u32 changed
, previous
;
597 * Allocate a request buffer before taking the spinlock, when
598 * the session is being terminated the requestor is the kernel,
599 * as we're cleaning up.
601 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
602 session_termination
? VBG_KERNEL_REQUEST
:
605 if (!session_termination
)
607 /* Ignore allocation failure, we must do session cleanup. */
610 mutex_lock(&gdev
->session_mutex
);
612 /* Apply the changes to the session mask. */
613 previous
= session
->event_filter
;
614 session
->event_filter
|= or_mask
;
615 session
->event_filter
&= ~not_mask
;
617 /* If anything actually changed, update the global usage counters. */
618 changed
= previous
^ session
->event_filter
;
622 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
623 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
625 if (gdev
->event_filter_host
== or_mask
|| !req
)
628 gdev
->event_filter_host
= or_mask
;
629 req
->or_mask
= or_mask
;
630 req
->not_mask
= ~or_mask
;
631 rc
= vbg_req_perform(gdev
, req
);
633 ret
= vbg_status_code_to_errno(rc
);
635 /* Failed, roll back (unless it's session termination time). */
636 gdev
->event_filter_host
= U32_MAX
;
637 if (session_termination
)
640 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
641 session
->event_filter
);
642 session
->event_filter
= previous
;
646 mutex_unlock(&gdev
->session_mutex
);
647 vbg_req_free(req
, sizeof(*req
));
653 * vbg_reset_host_capabilities - Init and termination worker for set
654 * guest capabilities to zero on the host.
655 * @gdev: The Guest extension device.
657 * Return: %0 or negative errno value.
659 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
661 struct vmmdev_mask
*req
;
664 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
669 req
->not_mask
= U32_MAX
;
671 rc
= vbg_req_perform(gdev
, req
);
673 vbg_err("%s error, rc: %d\n", __func__
, rc
);
675 vbg_req_free(req
, sizeof(*req
));
676 return vbg_status_code_to_errno(rc
);
680 * vbg_set_host_capabilities - Set guest capabilities on the host.
681 * @gdev: The Guest extension device.
682 * @session: The session.
683 * @session_termination: Set if we're called by the session cleanup code.
685 * Must be called with gdev->session_mutex hold.
687 * Return: %0 or negative errno value.
689 static int vbg_set_host_capabilities(struct vbg_dev
*gdev
,
690 struct vbg_session
*session
,
691 bool session_termination
)
693 struct vmmdev_mask
*req
;
697 WARN_ON(!mutex_is_locked(&gdev
->session_mutex
));
699 caps
= gdev
->acquired_guest_caps
| gdev
->set_guest_caps_tracker
.mask
;
701 if (gdev
->guest_caps_host
== caps
)
704 /* On termination the requestor is the kernel, as we're cleaning up. */
705 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
706 session_termination
? VBG_KERNEL_REQUEST
:
709 gdev
->guest_caps_host
= U32_MAX
;
714 req
->not_mask
= ~caps
;
715 rc
= vbg_req_perform(gdev
, req
);
716 vbg_req_free(req
, sizeof(*req
));
718 gdev
->guest_caps_host
= (rc
>= 0) ? caps
: U32_MAX
;
720 return vbg_status_code_to_errno(rc
);
724 * vbg_acquire_session_capabilities - Acquire (get exclusive access)
725 * guest capabilities for a session.
726 * @gdev: The Guest extension device.
727 * @session: The session.
728 * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
729 * @or_mask: The capabilities to add.
730 * @not_mask: The capabilities to remove.
731 * @session_termination: Set if we're called by the session cleanup code.
732 * This tweaks the error handling so we perform
733 * proper session cleanup even if the host
736 * Takes the session mutex.
738 * Return: %0 or negative errno value.
740 static int vbg_acquire_session_capabilities(struct vbg_dev
*gdev
,
741 struct vbg_session
*session
,
742 u32 or_mask
, u32 not_mask
,
743 u32 flags
, bool session_termination
)
745 unsigned long irqflags
;
749 mutex_lock(&gdev
->session_mutex
);
751 if (gdev
->set_guest_caps_tracker
.mask
& or_mask
) {
752 vbg_err("%s error: cannot acquire caps which are currently set\n",
759 * Mark any caps in the or_mask as now being in acquire-mode. Note
760 * once caps are in acquire_mode they always stay in this mode.
761 * This impacts event handling, so we take the event-lock.
763 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
764 gdev
->acquire_mode_guest_caps
|= or_mask
;
765 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
767 /* If we only have to switch the caps to acquire mode, we're done. */
768 if (flags
& VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE
)
771 not_mask
&= ~or_mask
; /* or_mask takes priority over not_mask */
772 not_mask
&= session
->acquired_guest_caps
;
773 or_mask
&= ~session
->acquired_guest_caps
;
775 if (or_mask
== 0 && not_mask
== 0)
778 if (gdev
->acquired_guest_caps
& or_mask
) {
783 gdev
->acquired_guest_caps
|= or_mask
;
784 gdev
->acquired_guest_caps
&= ~not_mask
;
785 /* session->acquired_guest_caps impacts event handling, take the lock */
786 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
787 session
->acquired_guest_caps
|= or_mask
;
788 session
->acquired_guest_caps
&= ~not_mask
;
789 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
791 ret
= vbg_set_host_capabilities(gdev
, session
, session_termination
);
792 /* Roll back on failure, unless it's session termination time. */
793 if (ret
< 0 && !session_termination
) {
794 gdev
->acquired_guest_caps
&= ~or_mask
;
795 gdev
->acquired_guest_caps
|= not_mask
;
796 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
797 session
->acquired_guest_caps
&= ~or_mask
;
798 session
->acquired_guest_caps
|= not_mask
;
799 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
803 * If we added a capability, check if that means some other thread in
804 * our session should be unblocked because there are events pending
805 * (the result of vbg_get_allowed_event_mask_for_session() may change).
807 * HACK ALERT! When the seamless support capability is added we generate
808 * a seamless change event so that the ring-3 client can sync with
809 * the seamless state.
811 if (ret
== 0 && or_mask
!= 0) {
812 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
814 if (or_mask
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
)
815 gdev
->pending_events
|=
816 VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST
;
818 if (gdev
->pending_events
)
821 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
824 wake_up(&gdev
->event_wq
);
828 mutex_unlock(&gdev
->session_mutex
);
834 * vbg_set_session_capabilities - Sets the guest capabilities for a
835 * session. Takes the session mutex.
836 * @gdev: The Guest extension device.
837 * @session: The session.
838 * @or_mask: The capabilities to add.
839 * @not_mask: The capabilities to remove.
840 * @session_termination: Set if we're called by the session cleanup code.
841 * This tweaks the error handling so we perform
842 * proper session cleanup even if the host
845 * Return: %0 or negative errno value.
847 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
848 struct vbg_session
*session
,
849 u32 or_mask
, u32 not_mask
,
850 bool session_termination
)
852 u32 changed
, previous
;
855 mutex_lock(&gdev
->session_mutex
);
857 if (gdev
->acquire_mode_guest_caps
& or_mask
) {
858 vbg_err("%s error: cannot set caps which are in acquire_mode\n",
864 /* Apply the changes to the session mask. */
865 previous
= session
->set_guest_caps
;
866 session
->set_guest_caps
|= or_mask
;
867 session
->set_guest_caps
&= ~not_mask
;
869 /* If anything actually changed, update the global usage counters. */
870 changed
= previous
^ session
->set_guest_caps
;
874 vbg_track_bit_usage(&gdev
->set_guest_caps_tracker
, changed
, previous
);
876 ret
= vbg_set_host_capabilities(gdev
, session
, session_termination
);
877 /* Roll back on failure, unless it's session termination time. */
878 if (ret
< 0 && !session_termination
) {
879 vbg_track_bit_usage(&gdev
->set_guest_caps_tracker
, changed
,
880 session
->set_guest_caps
);
881 session
->set_guest_caps
= previous
;
885 mutex_unlock(&gdev
->session_mutex
);
891 * vbg_query_host_version - get the host feature mask and version information.
892 * @gdev: The Guest extension device.
894 * Return: %0 or negative errno value.
896 static int vbg_query_host_version(struct vbg_dev
*gdev
)
898 struct vmmdev_host_version
*req
;
901 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
,
906 rc
= vbg_req_perform(gdev
, req
);
907 ret
= vbg_status_code_to_errno(rc
);
909 vbg_err("%s error: %d\n", __func__
, rc
);
913 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
914 req
->major
, req
->minor
, req
->build
, req
->revision
);
915 gdev
->host_features
= req
->features
;
917 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
918 gdev
->host_features
);
920 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
921 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
926 vbg_req_free(req
, sizeof(*req
));
931 * vbg_core_init - Initializes the VBoxGuest device extension when the
932 * device driver is loaded.
933 * @gdev: The Guest extension device.
934 * @fixed_events: Events that will be enabled upon init and no client
935 * will ever be allowed to mask.
937 * The native code locates the VMMDev on the PCI bus and retrieve
938 * the MMIO and I/O port ranges, this function will take care of
939 * mapping the MMIO memory (if present). Upon successful return
940 * the native code should set up the interrupt handler.
942 * Return: %0 or negative errno value.
944 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
948 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
949 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
950 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
952 init_waitqueue_head(&gdev
->event_wq
);
953 init_waitqueue_head(&gdev
->hgcm_wq
);
954 spin_lock_init(&gdev
->event_spinlock
);
955 mutex_init(&gdev
->session_mutex
);
956 mutex_init(&gdev
->cancel_req_mutex
);
957 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
958 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
960 gdev
->mem_balloon
.get_req
=
961 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
962 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
,
964 gdev
->mem_balloon
.change_req
=
965 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
966 VMMDEVREQ_CHANGE_MEMBALLOON
,
969 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
970 VMMDEVREQ_HGCM_CANCEL2
,
972 gdev
->ack_events_req
=
973 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
974 VMMDEVREQ_ACKNOWLEDGE_EVENTS
,
976 gdev
->mouse_status_req
=
977 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
978 VMMDEVREQ_GET_MOUSE_STATUS
,
981 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
982 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
983 !gdev
->mouse_status_req
)
986 ret
= vbg_query_host_version(gdev
);
990 ret
= vbg_report_guest_info(gdev
);
992 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
996 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
998 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
1003 ret
= vbg_reset_host_capabilities(gdev
);
1005 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
1010 ret
= vbg_core_set_mouse_status(gdev
, 0);
1012 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
1016 /* These may fail without requiring the driver init to fail. */
1017 vbg_guest_mappings_init(gdev
);
1018 vbg_heartbeat_init(gdev
);
1021 ret
= vbg_report_driver_status(gdev
, true);
1023 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
1028 vbg_req_free(gdev
->mouse_status_req
,
1029 sizeof(*gdev
->mouse_status_req
));
1030 vbg_req_free(gdev
->ack_events_req
,
1031 sizeof(*gdev
->ack_events_req
));
1032 vbg_req_free(gdev
->cancel_req
,
1033 sizeof(*gdev
->cancel_req
));
1034 vbg_req_free(gdev
->mem_balloon
.change_req
,
1035 sizeof(*gdev
->mem_balloon
.change_req
));
1036 vbg_req_free(gdev
->mem_balloon
.get_req
,
1037 sizeof(*gdev
->mem_balloon
.get_req
));
1042 * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed
1044 * @gdev: The Guest extension device.
1046 * The native code should call this before the driver is loaded,
1047 * but don't call this on shutdown.
1049 void vbg_core_exit(struct vbg_dev
*gdev
)
1051 vbg_heartbeat_exit(gdev
);
1052 vbg_guest_mappings_exit(gdev
);
1054 /* Clear the host flags (mouse status etc). */
1055 vbg_reset_host_event_filter(gdev
, 0);
1056 vbg_reset_host_capabilities(gdev
);
1057 vbg_core_set_mouse_status(gdev
, 0);
1059 vbg_req_free(gdev
->mouse_status_req
,
1060 sizeof(*gdev
->mouse_status_req
));
1061 vbg_req_free(gdev
->ack_events_req
,
1062 sizeof(*gdev
->ack_events_req
));
1063 vbg_req_free(gdev
->cancel_req
,
1064 sizeof(*gdev
->cancel_req
));
1065 vbg_req_free(gdev
->mem_balloon
.change_req
,
1066 sizeof(*gdev
->mem_balloon
.change_req
));
1067 vbg_req_free(gdev
->mem_balloon
.get_req
,
1068 sizeof(*gdev
->mem_balloon
.get_req
));
1072 * vbg_core_open_session - Creates a VBoxGuest user session.
1073 * @gdev: The Guest extension device.
1074 * @requestor: VMMDEV_REQUESTOR_* flags
1076 * vboxguest_linux.c calls this when userspace opens the char-device.
1078 * Return: A pointer to the new session or an ERR_PTR on error.
1080 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, u32 requestor
)
1082 struct vbg_session
*session
;
1084 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
1086 return ERR_PTR(-ENOMEM
);
1088 session
->gdev
= gdev
;
1089 session
->requestor
= requestor
;
1095 * vbg_core_close_session - Closes a VBoxGuest session.
1096 * @session: The session to close (and free).
1098 void vbg_core_close_session(struct vbg_session
*session
)
1100 struct vbg_dev
*gdev
= session
->gdev
;
1103 vbg_acquire_session_capabilities(gdev
, session
, 0, U32_MAX
, 0, true);
1104 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
1105 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
1107 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1108 if (!session
->hgcm_client_ids
[i
])
1111 /* requestor is kernel here, as we're cleaning up. */
1112 vbg_hgcm_disconnect(gdev
, VBG_KERNEL_REQUEST
,
1113 session
->hgcm_client_ids
[i
], &rc
);
1119 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
1122 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
1123 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
1129 static int vbg_ioctl_driver_version_info(
1130 struct vbg_ioctl_driver_version_info
*info
)
1132 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
1133 u16 min_maj_version
, req_maj_version
;
1135 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
1138 req_maj_version
= info
->u
.in
.req_version
>> 16;
1139 min_maj_version
= info
->u
.in
.min_version
>> 16;
1141 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
1142 min_maj_version
!= req_maj_version
)
1145 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
1146 min_maj_version
== vbg_maj_version
) {
1147 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
1149 info
->u
.out
.session_version
= U32_MAX
;
1150 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
1153 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
1154 info
->u
.out
.driver_revision
= 0;
1155 info
->u
.out
.reserved1
= 0;
1156 info
->u
.out
.reserved2
= 0;
1161 /* Must be called with the event_lock held */
1162 static u32
vbg_get_allowed_event_mask_for_session(struct vbg_dev
*gdev
,
1163 struct vbg_session
*session
)
1165 u32 acquire_mode_caps
= gdev
->acquire_mode_guest_caps
;
1166 u32 session_acquired_caps
= session
->acquired_guest_caps
;
1167 u32 allowed_events
= VMMDEV_EVENT_VALID_EVENT_MASK
;
1169 if ((acquire_mode_caps
& VMMDEV_GUEST_SUPPORTS_GRAPHICS
) &&
1170 !(session_acquired_caps
& VMMDEV_GUEST_SUPPORTS_GRAPHICS
))
1171 allowed_events
&= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST
;
1173 if ((acquire_mode_caps
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
) &&
1174 !(session_acquired_caps
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
))
1175 allowed_events
&= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST
;
1177 return allowed_events
;
1180 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
1181 struct vbg_session
*session
,
1184 unsigned long flags
;
1188 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1190 events
= gdev
->pending_events
& event_mask
;
1191 events
&= vbg_get_allowed_event_mask_for_session(gdev
, session
);
1192 wakeup
= events
|| session
->cancel_waiters
;
1194 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1199 /* Must be called with the event_lock held */
1200 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
1201 struct vbg_session
*session
,
1204 u32 events
= gdev
->pending_events
& event_mask
;
1206 events
&= vbg_get_allowed_event_mask_for_session(gdev
, session
);
1207 gdev
->pending_events
&= ~events
;
1211 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
1212 struct vbg_session
*session
,
1213 struct vbg_ioctl_wait_for_events
*wait
)
1215 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1216 u32 event_mask
= wait
->u
.in
.events
;
1217 unsigned long flags
;
1221 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1224 if (timeout_ms
== U32_MAX
)
1225 timeout
= MAX_SCHEDULE_TIMEOUT
;
1227 timeout
= msecs_to_jiffies(timeout_ms
);
1229 wait
->u
.out
.events
= 0;
1231 timeout
= wait_event_interruptible_timeout(
1233 vbg_wait_event_cond(gdev
, session
, event_mask
),
1236 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1238 if (timeout
< 0 || session
->cancel_waiters
) {
1240 } else if (timeout
== 0) {
1243 wait
->u
.out
.events
=
1244 vbg_consume_events_locked(gdev
, session
, event_mask
);
1247 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1250 * Someone else may have consumed the event(s) first, in
1251 * which case we go back to waiting.
1253 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1258 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1259 struct vbg_session
*session
,
1260 struct vbg_ioctl_hdr
*hdr
)
1262 unsigned long flags
;
1264 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1267 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1268 session
->cancel_waiters
= true;
1269 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1271 wake_up(&gdev
->event_wq
);
1277 * vbg_req_allowed - Checks if the VMM request is allowed in the
1278 * context of the given session.
1279 * @gdev: The Guest extension device.
1280 * @session: The calling session.
1281 * @req: The request.
1283 * Return: %0 or negative errno value.
1285 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1286 const struct vmmdev_request_header
*req
)
1288 const struct vmmdev_guest_status
*guest_status
;
1289 bool trusted_apps_only
;
1291 switch (req
->request_type
) {
1292 /* Trusted users apps only. */
1293 case VMMDEVREQ_QUERY_CREDENTIALS
:
1294 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1295 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1296 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1297 case VMMDEVREQ_WRITE_COREDUMP
:
1298 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1299 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1300 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1301 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1302 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1303 case VMMDEVREQ_REPORT_GUEST_STATS
:
1304 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1305 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1306 trusted_apps_only
= true;
1310 case VMMDEVREQ_GET_MOUSE_STATUS
:
1311 case VMMDEVREQ_SET_MOUSE_STATUS
:
1312 case VMMDEVREQ_SET_POINTER_SHAPE
:
1313 case VMMDEVREQ_GET_HOST_VERSION
:
1314 case VMMDEVREQ_IDLE
:
1315 case VMMDEVREQ_GET_HOST_TIME
:
1316 case VMMDEVREQ_SET_POWER_STATUS
:
1317 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1318 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1319 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1320 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1321 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1322 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1323 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1324 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1325 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1326 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1327 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1328 case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS
:
1329 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1330 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI
:
1331 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1332 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1333 case VMMDEVREQ_LOG_STRING
:
1334 case VMMDEVREQ_GET_SESSION_ID
:
1335 trusted_apps_only
= false;
1338 /* Depends on the request parameters... */
1339 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1340 guest_status
= (const struct vmmdev_guest_status
*)req
;
1341 switch (guest_status
->facility
) {
1342 case VBOXGUEST_FACILITY_TYPE_ALL
:
1343 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1344 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1345 guest_status
->facility
);
1347 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1348 trusted_apps_only
= true;
1350 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1351 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1352 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1354 trusted_apps_only
= false;
1359 /* Anything else is not allowed. */
1361 vbg_err("Denying userspace vmm call type %#08x\n",
1366 if (trusted_apps_only
&&
1367 (session
->requestor
& VMMDEV_REQUESTOR_USER_DEVICE
)) {
1368 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1376 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1377 struct vbg_session
*session
, void *data
)
1379 struct vbg_ioctl_hdr
*hdr
= data
;
1382 if (hdr
->size_in
!= hdr
->size_out
)
1385 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1388 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1391 ret
= vbg_req_allowed(gdev
, session
, data
);
1395 vbg_req_perform(gdev
, data
);
1396 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1401 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1402 struct vbg_session
*session
,
1403 struct vbg_ioctl_hgcm_connect
*conn
)
1408 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1411 /* Find a free place in the sessions clients array and claim it */
1412 mutex_lock(&gdev
->session_mutex
);
1413 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1414 if (!session
->hgcm_client_ids
[i
]) {
1415 session
->hgcm_client_ids
[i
] = U32_MAX
;
1419 mutex_unlock(&gdev
->session_mutex
);
1421 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1424 ret
= vbg_hgcm_connect(gdev
, session
->requestor
, &conn
->u
.in
.loc
,
1425 &client_id
, &conn
->hdr
.rc
);
1427 mutex_lock(&gdev
->session_mutex
);
1428 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1429 conn
->u
.out
.client_id
= client_id
;
1430 session
->hgcm_client_ids
[i
] = client_id
;
1432 conn
->u
.out
.client_id
= 0;
1433 session
->hgcm_client_ids
[i
] = 0;
1435 mutex_unlock(&gdev
->session_mutex
);
1440 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1441 struct vbg_session
*session
,
1442 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1447 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1450 client_id
= disconn
->u
.in
.client_id
;
1451 if (client_id
== 0 || client_id
== U32_MAX
)
1454 mutex_lock(&gdev
->session_mutex
);
1455 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1456 if (session
->hgcm_client_ids
[i
] == client_id
) {
1457 session
->hgcm_client_ids
[i
] = U32_MAX
;
1461 mutex_unlock(&gdev
->session_mutex
);
1463 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1466 ret
= vbg_hgcm_disconnect(gdev
, session
->requestor
, client_id
,
1469 mutex_lock(&gdev
->session_mutex
);
1470 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1471 session
->hgcm_client_ids
[i
] = 0;
1473 session
->hgcm_client_ids
[i
] = client_id
;
1474 mutex_unlock(&gdev
->session_mutex
);
1479 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type
)
1482 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
1483 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
1484 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
1485 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
1486 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
1493 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1494 struct vbg_session
*session
, bool f32bit
,
1495 struct vbg_ioctl_hgcm_call
*call
)
1501 if (call
->hdr
.size_in
< sizeof(*call
))
1504 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1507 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1510 client_id
= call
->client_id
;
1511 if (client_id
== 0 || client_id
== U32_MAX
)
1514 actual_size
= sizeof(*call
);
1516 actual_size
+= call
->parm_count
*
1517 sizeof(struct vmmdev_hgcm_function_parameter32
);
1519 actual_size
+= call
->parm_count
*
1520 sizeof(struct vmmdev_hgcm_function_parameter
);
1521 if (call
->hdr
.size_in
< actual_size
) {
1522 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1523 call
->hdr
.size_in
, actual_size
);
1526 call
->hdr
.size_out
= actual_size
;
1528 /* Validate parameter types */
1530 struct vmmdev_hgcm_function_parameter32
*parm
=
1531 VBG_IOCTL_HGCM_CALL_PARMS32(call
);
1533 for (i
= 0; i
< call
->parm_count
; i
++)
1534 if (!vbg_param_valid(parm
[i
].type
))
1537 struct vmmdev_hgcm_function_parameter
*parm
=
1538 VBG_IOCTL_HGCM_CALL_PARMS(call
);
1540 for (i
= 0; i
< call
->parm_count
; i
++)
1541 if (!vbg_param_valid(parm
[i
].type
))
1546 * Validate the client id.
1548 mutex_lock(&gdev
->session_mutex
);
1549 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1550 if (session
->hgcm_client_ids
[i
] == client_id
)
1552 mutex_unlock(&gdev
->session_mutex
);
1553 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1554 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1559 if (IS_ENABLED(CONFIG_COMPAT
) && f32bit
)
1560 ret
= vbg_hgcm_call32(gdev
, session
->requestor
, client_id
,
1561 call
->function
, call
->timeout_ms
,
1562 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1563 call
->parm_count
, &call
->hdr
.rc
);
1565 ret
= vbg_hgcm_call(gdev
, session
->requestor
, client_id
,
1566 call
->function
, call
->timeout_ms
,
1567 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1568 call
->parm_count
, &call
->hdr
.rc
);
1570 if (ret
== -E2BIG
) {
1571 /* E2BIG needs to be reported through the hdr.rc field. */
1572 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1576 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1577 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1582 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1584 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1587 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1593 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1594 struct vbg_session
*session
,
1595 struct vbg_ioctl_change_filter
*filter
)
1597 u32 or_mask
, not_mask
;
1599 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1602 or_mask
= filter
->u
.in
.or_mask
;
1603 not_mask
= filter
->u
.in
.not_mask
;
1605 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1608 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1612 static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev
*gdev
,
1613 struct vbg_session
*session
,
1614 struct vbg_ioctl_acquire_guest_caps
*caps
)
1616 u32 flags
, or_mask
, not_mask
;
1618 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), 0))
1621 flags
= caps
->u
.in
.flags
;
1622 or_mask
= caps
->u
.in
.or_mask
;
1623 not_mask
= caps
->u
.in
.not_mask
;
1625 if (flags
& ~VBGL_IOC_AGC_FLAGS_VALID_MASK
)
1628 if ((or_mask
| not_mask
) & ~VMMDEV_GUEST_CAPABILITIES_MASK
)
1631 return vbg_acquire_session_capabilities(gdev
, session
, or_mask
,
1632 not_mask
, flags
, false);
1635 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1636 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1638 u32 or_mask
, not_mask
;
1641 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1644 or_mask
= caps
->u
.in
.or_mask
;
1645 not_mask
= caps
->u
.in
.not_mask
;
1647 if ((or_mask
| not_mask
) & ~VMMDEV_GUEST_CAPABILITIES_MASK
)
1650 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1655 caps
->u
.out
.session_caps
= session
->set_guest_caps
;
1656 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1661 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1662 struct vbg_ioctl_check_balloon
*balloon_info
)
1664 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1667 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1669 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1670 * events entirely in the kernel, see vbg_core_isr().
1672 balloon_info
->u
.out
.handle_in_r3
= false;
1677 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1678 struct vbg_session
*session
,
1679 struct vbg_ioctl_write_coredump
*dump
)
1681 struct vmmdev_write_core_dump
*req
;
1683 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1686 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
,
1687 session
->requestor
);
1691 req
->flags
= dump
->u
.in
.flags
;
1692 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1694 vbg_req_free(req
, sizeof(*req
));
1699 * vbg_core_ioctl - Common IOCtl for user to kernel communication.
1700 * @session: The client session.
1701 * @req: The requested function.
1702 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1704 * Return: %0 or negative errno value.
1706 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1708 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1709 struct vbg_dev
*gdev
= session
->gdev
;
1710 struct vbg_ioctl_hdr
*hdr
= data
;
1711 bool f32bit
= false;
1713 hdr
->rc
= VINF_SUCCESS
;
1715 hdr
->size_out
= hdr
->size_in
;
1718 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1719 * already checked by vbg_misc_device_ioctl().
1722 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1723 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1724 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
||
1725 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT
)
1726 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1728 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1731 /* Fixed size requests. */
1733 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1734 return vbg_ioctl_driver_version_info(data
);
1735 case VBG_IOCTL_HGCM_CONNECT
:
1736 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1737 case VBG_IOCTL_HGCM_DISCONNECT
:
1738 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1739 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1740 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1741 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1742 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1743 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1744 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1745 case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES
:
1746 return vbg_ioctl_acquire_guest_capabilities(gdev
, session
, data
);
1747 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1748 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1749 case VBG_IOCTL_CHECK_BALLOON
:
1750 return vbg_ioctl_check_balloon(gdev
, data
);
1751 case VBG_IOCTL_WRITE_CORE_DUMP
:
1752 return vbg_ioctl_write_core_dump(gdev
, session
, data
);
1755 /* Variable sized requests. */
1756 switch (req_no_size
) {
1757 #ifdef CONFIG_COMPAT
1758 case VBG_IOCTL_HGCM_CALL_32(0):
1762 case VBG_IOCTL_HGCM_CALL(0):
1763 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1764 case VBG_IOCTL_LOG(0):
1765 case VBG_IOCTL_LOG_ALT(0):
1766 return vbg_ioctl_log(data
);
1769 vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req
);
1774 * vbg_core_set_mouse_status - Report guest supported mouse-features to the host.
1776 * @gdev: The Guest extension device.
1777 * @features: The set of features to report to the host.
1779 * Return: %0 or negative errno value.
1781 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1783 struct vmmdev_mouse_status
*req
;
1786 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
,
1787 VBG_KERNEL_REQUEST
);
1791 req
->mouse_features
= features
;
1792 req
->pointer_pos_x
= 0;
1793 req
->pointer_pos_y
= 0;
1795 rc
= vbg_req_perform(gdev
, req
);
1797 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1799 vbg_req_free(req
, sizeof(*req
));
1800 return vbg_status_code_to_errno(rc
);
1803 /* Core interrupt service routine. */
1804 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1806 struct vbg_dev
*gdev
= dev_id
;
1807 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1808 bool mouse_position_changed
= false;
1809 unsigned long flags
;
1813 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1816 /* Get and acknowlegde events. */
1817 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1819 rc
= vbg_req_perform(gdev
, req
);
1821 vbg_err("Error performing events req, rc: %d\n", rc
);
1825 events
= req
->events
;
1827 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1828 mouse_position_changed
= true;
1829 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1832 if (events
& VMMDEV_EVENT_HGCM
) {
1833 wake_up(&gdev
->hgcm_wq
);
1834 events
&= ~VMMDEV_EVENT_HGCM
;
1837 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1838 schedule_work(&gdev
->mem_balloon
.work
);
1839 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1843 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1844 gdev
->pending_events
|= events
;
1845 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1847 wake_up(&gdev
->event_wq
);
1850 if (mouse_position_changed
)
1851 vbg_linux_mouse_event(gdev
);