1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
8 #include <linux/device.h>
11 #include <linux/sched.h>
12 #include <linux/sizes.h>
13 #include <linux/slab.h>
14 #include <linux/vbox_err.h>
15 #include <linux/vbox_utils.h>
16 #include <linux/vmalloc.h>
17 #include "vboxguest_core.h"
18 #include "vboxguest_version.h"
20 /* Get the pointer to the first HGCM parameter. */
21 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
22 ((struct vmmdev_hgcm_function_parameter *)( \
23 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
24 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
25 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
26 ((struct vmmdev_hgcm_function_parameter32 *)( \
27 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
29 #define GUEST_MAPPINGS_TRIES 5
31 #define VBG_KERNEL_REQUEST \
32 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
33 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
36 * Reserves memory in which the VMM can relocate any guest mappings
37 * that are floating around.
39 * This operation is a little bit tricky since the VMM might not accept
40 * just any address because of address clashes between the three contexts
41 * it operates in, so we try several times.
43 * Failure to reserve the guest mappings is ignored.
45 * @gdev: The Guest extension device.
47 static void vbg_guest_mappings_init(struct vbg_dev
*gdev
)
49 struct vmmdev_hypervisorinfo
*req
;
50 void *guest_mappings
[GUEST_MAPPINGS_TRIES
];
51 struct page
**pages
= NULL
;
52 u32 size
, hypervisor_size
;
55 /* Query the required space. */
56 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HYPERVISOR_INFO
,
61 req
->hypervisor_start
= 0;
62 req
->hypervisor_size
= 0;
63 rc
= vbg_req_perform(gdev
, req
);
68 * The VMM will report back if there is nothing it wants to map, like
69 * for instance in VT-x and AMD-V mode.
71 if (req
->hypervisor_size
== 0)
74 hypervisor_size
= req
->hypervisor_size
;
75 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
76 size
= PAGE_ALIGN(req
->hypervisor_size
) + SZ_4M
;
78 pages
= kmalloc_array(size
>> PAGE_SHIFT
, sizeof(*pages
), GFP_KERNEL
);
82 gdev
->guest_mappings_dummy_page
= alloc_page(GFP_HIGHUSER
);
83 if (!gdev
->guest_mappings_dummy_page
)
86 for (i
= 0; i
< (size
>> PAGE_SHIFT
); i
++)
87 pages
[i
] = gdev
->guest_mappings_dummy_page
;
90 * Try several times, the VMM might not accept some addresses because
91 * of address clashes between the three contexts.
93 for (i
= 0; i
< GUEST_MAPPINGS_TRIES
; i
++) {
94 guest_mappings
[i
] = vmap(pages
, (size
>> PAGE_SHIFT
),
95 VM_MAP
, PAGE_KERNEL_RO
);
96 if (!guest_mappings
[i
])
99 req
->header
.request_type
= VMMDEVREQ_SET_HYPERVISOR_INFO
;
100 req
->header
.rc
= VERR_INTERNAL_ERROR
;
101 req
->hypervisor_size
= hypervisor_size
;
102 req
->hypervisor_start
=
103 (unsigned long)PTR_ALIGN(guest_mappings
[i
], SZ_4M
);
105 rc
= vbg_req_perform(gdev
, req
);
107 gdev
->guest_mappings
= guest_mappings
[i
];
112 /* Free vmap's from failed attempts. */
114 vunmap(guest_mappings
[i
]);
116 /* On failure free the dummy-page backing the vmap */
117 if (!gdev
->guest_mappings
) {
118 __free_page(gdev
->guest_mappings_dummy_page
);
119 gdev
->guest_mappings_dummy_page
= NULL
;
123 vbg_req_free(req
, sizeof(*req
));
128 * Undo what vbg_guest_mappings_init did.
130 * @gdev: The Guest extension device.
132 static void vbg_guest_mappings_exit(struct vbg_dev
*gdev
)
134 struct vmmdev_hypervisorinfo
*req
;
137 if (!gdev
->guest_mappings
)
141 * Tell the host that we're going to free the memory we reserved for
142 * it, the free it up. (Leak the memory if anything goes wrong here.)
144 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_HYPERVISOR_INFO
,
149 req
->hypervisor_start
= 0;
150 req
->hypervisor_size
= 0;
152 rc
= vbg_req_perform(gdev
, req
);
154 vbg_req_free(req
, sizeof(*req
));
157 vbg_err("%s error: %d\n", __func__
, rc
);
161 vunmap(gdev
->guest_mappings
);
162 gdev
->guest_mappings
= NULL
;
164 __free_page(gdev
->guest_mappings_dummy_page
);
165 gdev
->guest_mappings_dummy_page
= NULL
;
169 * Report the guest information to the host.
170 * Return: 0 or negative errno value.
171 * @gdev: The Guest extension device.
173 static int vbg_report_guest_info(struct vbg_dev
*gdev
)
176 * Allocate and fill in the two guest info reports.
178 struct vmmdev_guest_info
*req1
= NULL
;
179 struct vmmdev_guest_info2
*req2
= NULL
;
180 int rc
, ret
= -ENOMEM
;
182 req1
= vbg_req_alloc(sizeof(*req1
), VMMDEVREQ_REPORT_GUEST_INFO
,
184 req2
= vbg_req_alloc(sizeof(*req2
), VMMDEVREQ_REPORT_GUEST_INFO2
,
189 req1
->interface_version
= VMMDEV_VERSION
;
190 req1
->os_type
= VMMDEV_OSTYPE_LINUX26
;
191 #if __BITS_PER_LONG == 64
192 req1
->os_type
|= VMMDEV_OSTYPE_X64
;
195 req2
->additions_major
= VBG_VERSION_MAJOR
;
196 req2
->additions_minor
= VBG_VERSION_MINOR
;
197 req2
->additions_build
= VBG_VERSION_BUILD
;
198 req2
->additions_revision
= VBG_SVN_REV
;
199 req2
->additions_features
=
200 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO
;
201 strlcpy(req2
->name
, VBG_VERSION_STRING
,
205 * There are two protocols here:
206 * 1. INFO2 + INFO1. Supported by >=3.2.51.
207 * 2. INFO1 and optionally INFO2. The old protocol.
209 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
210 * if not supported by the VMMDev (message ordering requirement).
212 rc
= vbg_req_perform(gdev
, req2
);
214 rc
= vbg_req_perform(gdev
, req1
);
215 } else if (rc
== VERR_NOT_SUPPORTED
|| rc
== VERR_NOT_IMPLEMENTED
) {
216 rc
= vbg_req_perform(gdev
, req1
);
218 rc
= vbg_req_perform(gdev
, req2
);
219 if (rc
== VERR_NOT_IMPLEMENTED
)
223 ret
= vbg_status_code_to_errno(rc
);
226 vbg_req_free(req2
, sizeof(*req2
));
227 vbg_req_free(req1
, sizeof(*req1
));
232 * Report the guest driver status to the host.
233 * Return: 0 or negative errno value.
234 * @gdev: The Guest extension device.
235 * @active: Flag whether the driver is now active or not.
237 static int vbg_report_driver_status(struct vbg_dev
*gdev
, bool active
)
239 struct vmmdev_guest_status
*req
;
242 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_REPORT_GUEST_STATUS
,
247 req
->facility
= VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
;
249 req
->status
= VBOXGUEST_FACILITY_STATUS_ACTIVE
;
251 req
->status
= VBOXGUEST_FACILITY_STATUS_INACTIVE
;
254 rc
= vbg_req_perform(gdev
, req
);
255 if (rc
== VERR_NOT_IMPLEMENTED
) /* Compatibility with older hosts. */
258 vbg_req_free(req
, sizeof(*req
));
260 return vbg_status_code_to_errno(rc
);
264 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
265 * Return: 0 or negative errno value.
266 * @gdev: The Guest extension device.
267 * @chunk_idx: Index of the chunk.
269 static int vbg_balloon_inflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
271 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
275 pages
= kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
,
277 GFP_KERNEL
| __GFP_NOWARN
);
281 req
->header
.size
= sizeof(*req
);
283 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
285 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++) {
286 pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_NOWARN
);
292 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
295 rc
= vbg_req_perform(gdev
, req
);
297 vbg_err("%s error, rc: %d\n", __func__
, rc
);
298 ret
= vbg_status_code_to_errno(rc
);
302 gdev
->mem_balloon
.pages
[chunk_idx
] = pages
;
308 __free_page(pages
[i
]);
315 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
316 * Return: 0 or negative errno value.
317 * @gdev: The Guest extension device.
318 * @chunk_idx: Index of the chunk.
320 static int vbg_balloon_deflate(struct vbg_dev
*gdev
, u32 chunk_idx
)
322 struct vmmdev_memballoon_change
*req
= gdev
->mem_balloon
.change_req
;
323 struct page
**pages
= gdev
->mem_balloon
.pages
[chunk_idx
];
326 req
->header
.size
= sizeof(*req
);
327 req
->inflate
= false;
328 req
->pages
= VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
;
330 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
331 req
->phys_page
[i
] = page_to_phys(pages
[i
]);
333 rc
= vbg_req_perform(gdev
, req
);
335 vbg_err("%s error, rc: %d\n", __func__
, rc
);
336 return vbg_status_code_to_errno(rc
);
339 for (i
= 0; i
< VMMDEV_MEMORY_BALLOON_CHUNK_PAGES
; i
++)
340 __free_page(pages
[i
]);
342 gdev
->mem_balloon
.pages
[chunk_idx
] = NULL
;
348 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
349 * the host wants the balloon to be and adjust accordingly.
351 static void vbg_balloon_work(struct work_struct
*work
)
353 struct vbg_dev
*gdev
=
354 container_of(work
, struct vbg_dev
, mem_balloon
.work
);
355 struct vmmdev_memballoon_info
*req
= gdev
->mem_balloon
.get_req
;
360 * Setting this bit means that we request the value from the host and
361 * change the guest memory balloon according to the returned value.
363 req
->event_ack
= VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
364 rc
= vbg_req_perform(gdev
, req
);
366 vbg_err("%s error, rc: %d)\n", __func__
, rc
);
371 * The host always returns the same maximum amount of chunks, so
374 if (!gdev
->mem_balloon
.max_chunks
) {
375 gdev
->mem_balloon
.pages
=
376 devm_kcalloc(gdev
->dev
, req
->phys_mem_chunks
,
377 sizeof(struct page
**), GFP_KERNEL
);
378 if (!gdev
->mem_balloon
.pages
)
381 gdev
->mem_balloon
.max_chunks
= req
->phys_mem_chunks
;
384 chunks
= req
->balloon_chunks
;
385 if (chunks
> gdev
->mem_balloon
.max_chunks
) {
386 vbg_err("%s: illegal balloon size %u (max=%u)\n",
387 __func__
, chunks
, gdev
->mem_balloon
.max_chunks
);
391 if (chunks
> gdev
->mem_balloon
.chunks
) {
393 for (i
= gdev
->mem_balloon
.chunks
; i
< chunks
; i
++) {
394 ret
= vbg_balloon_inflate(gdev
, i
);
398 gdev
->mem_balloon
.chunks
++;
402 for (i
= gdev
->mem_balloon
.chunks
; i
-- > chunks
;) {
403 ret
= vbg_balloon_deflate(gdev
, i
);
407 gdev
->mem_balloon
.chunks
--;
413 * Callback for heartbeat timer.
415 static void vbg_heartbeat_timer(struct timer_list
*t
)
417 struct vbg_dev
*gdev
= from_timer(gdev
, t
, heartbeat_timer
);
419 vbg_req_perform(gdev
, gdev
->guest_heartbeat_req
);
420 mod_timer(&gdev
->heartbeat_timer
,
421 msecs_to_jiffies(gdev
->heartbeat_interval_ms
));
425 * Configure the host to check guest's heartbeat
426 * and get heartbeat interval from the host.
427 * Return: 0 or negative errno value.
428 * @gdev: The Guest extension device.
429 * @enabled: Set true to enable guest heartbeat checks on host.
431 static int vbg_heartbeat_host_config(struct vbg_dev
*gdev
, bool enabled
)
433 struct vmmdev_heartbeat
*req
;
436 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_HEARTBEAT_CONFIGURE
,
441 req
->enabled
= enabled
;
442 req
->interval_ns
= 0;
443 rc
= vbg_req_perform(gdev
, req
);
444 do_div(req
->interval_ns
, 1000000); /* ns -> ms */
445 gdev
->heartbeat_interval_ms
= req
->interval_ns
;
446 vbg_req_free(req
, sizeof(*req
));
448 return vbg_status_code_to_errno(rc
);
452 * Initializes the heartbeat timer. This feature may be disabled by the host.
453 * Return: 0 or negative errno value.
454 * @gdev: The Guest extension device.
456 static int vbg_heartbeat_init(struct vbg_dev
*gdev
)
460 /* Make sure that heartbeat checking is disabled if we fail. */
461 ret
= vbg_heartbeat_host_config(gdev
, false);
465 ret
= vbg_heartbeat_host_config(gdev
, true);
469 gdev
->guest_heartbeat_req
= vbg_req_alloc(
470 sizeof(*gdev
->guest_heartbeat_req
),
471 VMMDEVREQ_GUEST_HEARTBEAT
,
473 if (!gdev
->guest_heartbeat_req
)
476 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
477 __func__
, gdev
->heartbeat_interval_ms
);
478 mod_timer(&gdev
->heartbeat_timer
, 0);
484 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
485 * @gdev: The Guest extension device.
487 static void vbg_heartbeat_exit(struct vbg_dev
*gdev
)
489 del_timer_sync(&gdev
->heartbeat_timer
);
490 vbg_heartbeat_host_config(gdev
, false);
491 vbg_req_free(gdev
->guest_heartbeat_req
,
492 sizeof(*gdev
->guest_heartbeat_req
));
496 * Applies a change to the bit usage tracker.
497 * Return: true if the mask changed, false if not.
498 * @tracker: The bit usage tracker.
499 * @changed: The bits to change.
500 * @previous: The previous value of the bits.
502 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker
*tracker
,
503 u32 changed
, u32 previous
)
505 bool global_change
= false;
508 u32 bit
= ffs(changed
) - 1;
509 u32 bitmask
= BIT(bit
);
511 if (bitmask
& previous
) {
512 tracker
->per_bit_usage
[bit
] -= 1;
513 if (tracker
->per_bit_usage
[bit
] == 0) {
514 global_change
= true;
515 tracker
->mask
&= ~bitmask
;
518 tracker
->per_bit_usage
[bit
] += 1;
519 if (tracker
->per_bit_usage
[bit
] == 1) {
520 global_change
= true;
521 tracker
->mask
|= bitmask
;
528 return global_change
;
532 * Init and termination worker for resetting the (host) event filter on the host
533 * Return: 0 or negative errno value.
534 * @gdev: The Guest extension device.
535 * @fixed_events: Fixed events (init time).
537 static int vbg_reset_host_event_filter(struct vbg_dev
*gdev
,
540 struct vmmdev_mask
*req
;
543 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
548 req
->not_mask
= U32_MAX
& ~fixed_events
;
549 req
->or_mask
= fixed_events
;
550 rc
= vbg_req_perform(gdev
, req
);
552 vbg_err("%s error, rc: %d\n", __func__
, rc
);
554 vbg_req_free(req
, sizeof(*req
));
555 return vbg_status_code_to_errno(rc
);
559 * Changes the event filter mask for the given session.
561 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
562 * do session cleanup. Takes the session mutex.
564 * Return: 0 or negative errno value.
565 * @gdev: The Guest extension device.
566 * @session: The session.
567 * @or_mask: The events to add.
568 * @not_mask: The events to remove.
569 * @session_termination: Set if we're called by the session cleanup code.
570 * This tweaks the error handling so we perform
571 * proper session cleanup even if the host
574 static int vbg_set_session_event_filter(struct vbg_dev
*gdev
,
575 struct vbg_session
*session
,
576 u32 or_mask
, u32 not_mask
,
577 bool session_termination
)
579 struct vmmdev_mask
*req
;
580 u32 changed
, previous
;
584 * Allocate a request buffer before taking the spinlock, when
585 * the session is being terminated the requestor is the kernel,
586 * as we're cleaning up.
588 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_CTL_GUEST_FILTER_MASK
,
589 session_termination
? VBG_KERNEL_REQUEST
:
592 if (!session_termination
)
594 /* Ignore allocation failure, we must do session cleanup. */
597 mutex_lock(&gdev
->session_mutex
);
599 /* Apply the changes to the session mask. */
600 previous
= session
->event_filter
;
601 session
->event_filter
|= or_mask
;
602 session
->event_filter
&= ~not_mask
;
604 /* If anything actually changed, update the global usage counters. */
605 changed
= previous
^ session
->event_filter
;
609 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
, previous
);
610 or_mask
= gdev
->fixed_events
| gdev
->event_filter_tracker
.mask
;
612 if (gdev
->event_filter_host
== or_mask
|| !req
)
615 gdev
->event_filter_host
= or_mask
;
616 req
->or_mask
= or_mask
;
617 req
->not_mask
= ~or_mask
;
618 rc
= vbg_req_perform(gdev
, req
);
620 ret
= vbg_status_code_to_errno(rc
);
622 /* Failed, roll back (unless it's session termination time). */
623 gdev
->event_filter_host
= U32_MAX
;
624 if (session_termination
)
627 vbg_track_bit_usage(&gdev
->event_filter_tracker
, changed
,
628 session
->event_filter
);
629 session
->event_filter
= previous
;
633 mutex_unlock(&gdev
->session_mutex
);
634 vbg_req_free(req
, sizeof(*req
));
640 * Init and termination worker for set guest capabilities to zero on the host.
641 * Return: 0 or negative errno value.
642 * @gdev: The Guest extension device.
644 static int vbg_reset_host_capabilities(struct vbg_dev
*gdev
)
646 struct vmmdev_mask
*req
;
649 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
654 req
->not_mask
= U32_MAX
;
656 rc
= vbg_req_perform(gdev
, req
);
658 vbg_err("%s error, rc: %d\n", __func__
, rc
);
660 vbg_req_free(req
, sizeof(*req
));
661 return vbg_status_code_to_errno(rc
);
665 * Set guest capabilities on the host.
666 * Must be called with gdev->session_mutex hold.
667 * Return: 0 or negative errno value.
668 * @gdev: The Guest extension device.
669 * @session: The session.
670 * @session_termination: Set if we're called by the session cleanup code.
672 static int vbg_set_host_capabilities(struct vbg_dev
*gdev
,
673 struct vbg_session
*session
,
674 bool session_termination
)
676 struct vmmdev_mask
*req
;
680 WARN_ON(!mutex_is_locked(&gdev
->session_mutex
));
682 caps
= gdev
->acquired_guest_caps
| gdev
->set_guest_caps_tracker
.mask
;
684 if (gdev
->guest_caps_host
== caps
)
687 /* On termination the requestor is the kernel, as we're cleaning up. */
688 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_GUEST_CAPABILITIES
,
689 session_termination
? VBG_KERNEL_REQUEST
:
692 gdev
->guest_caps_host
= U32_MAX
;
697 req
->not_mask
= ~caps
;
698 rc
= vbg_req_perform(gdev
, req
);
699 vbg_req_free(req
, sizeof(*req
));
701 gdev
->guest_caps_host
= (rc
>= 0) ? caps
: U32_MAX
;
703 return vbg_status_code_to_errno(rc
);
707 * Acquire (get exclusive access) guest capabilities for a session.
708 * Takes the session mutex.
709 * Return: 0 or negative errno value.
710 * @gdev: The Guest extension device.
711 * @session: The session.
712 * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX).
713 * @or_mask: The capabilities to add.
714 * @not_mask: The capabilities to remove.
715 * @session_termination: Set if we're called by the session cleanup code.
716 * This tweaks the error handling so we perform
717 * proper session cleanup even if the host
720 static int vbg_acquire_session_capabilities(struct vbg_dev
*gdev
,
721 struct vbg_session
*session
,
722 u32 or_mask
, u32 not_mask
,
723 u32 flags
, bool session_termination
)
725 unsigned long irqflags
;
729 mutex_lock(&gdev
->session_mutex
);
731 if (gdev
->set_guest_caps_tracker
.mask
& or_mask
) {
732 vbg_err("%s error: cannot acquire caps which are currently set\n",
739 * Mark any caps in the or_mask as now being in acquire-mode. Note
740 * once caps are in acquire_mode they always stay in this mode.
741 * This impacts event handling, so we take the event-lock.
743 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
744 gdev
->acquire_mode_guest_caps
|= or_mask
;
745 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
747 /* If we only have to switch the caps to acquire mode, we're done. */
748 if (flags
& VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE
)
751 not_mask
&= ~or_mask
; /* or_mask takes priority over not_mask */
752 not_mask
&= session
->acquired_guest_caps
;
753 or_mask
&= ~session
->acquired_guest_caps
;
755 if (or_mask
== 0 && not_mask
== 0)
758 if (gdev
->acquired_guest_caps
& or_mask
) {
763 gdev
->acquired_guest_caps
|= or_mask
;
764 gdev
->acquired_guest_caps
&= ~not_mask
;
765 /* session->acquired_guest_caps impacts event handling, take the lock */
766 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
767 session
->acquired_guest_caps
|= or_mask
;
768 session
->acquired_guest_caps
&= ~not_mask
;
769 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
771 ret
= vbg_set_host_capabilities(gdev
, session
, session_termination
);
772 /* Roll back on failure, unless it's session termination time. */
773 if (ret
< 0 && !session_termination
) {
774 gdev
->acquired_guest_caps
&= ~or_mask
;
775 gdev
->acquired_guest_caps
|= not_mask
;
776 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
777 session
->acquired_guest_caps
&= ~or_mask
;
778 session
->acquired_guest_caps
|= not_mask
;
779 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
783 * If we added a capability, check if that means some other thread in
784 * our session should be unblocked because there are events pending
785 * (the result of vbg_get_allowed_event_mask_for_session() may change).
787 * HACK ALERT! When the seamless support capability is added we generate
788 * a seamless change event so that the ring-3 client can sync with
789 * the seamless state.
791 if (ret
== 0 && or_mask
!= 0) {
792 spin_lock_irqsave(&gdev
->event_spinlock
, irqflags
);
794 if (or_mask
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
)
795 gdev
->pending_events
|=
796 VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST
;
798 if (gdev
->pending_events
)
801 spin_unlock_irqrestore(&gdev
->event_spinlock
, irqflags
);
804 wake_up(&gdev
->event_wq
);
808 mutex_unlock(&gdev
->session_mutex
);
814 * Sets the guest capabilities for a session. Takes the session mutex.
815 * Return: 0 or negative errno value.
816 * @gdev: The Guest extension device.
817 * @session: The session.
818 * @or_mask: The capabilities to add.
819 * @not_mask: The capabilities to remove.
820 * @session_termination: Set if we're called by the session cleanup code.
821 * This tweaks the error handling so we perform
822 * proper session cleanup even if the host
825 static int vbg_set_session_capabilities(struct vbg_dev
*gdev
,
826 struct vbg_session
*session
,
827 u32 or_mask
, u32 not_mask
,
828 bool session_termination
)
830 u32 changed
, previous
;
833 mutex_lock(&gdev
->session_mutex
);
835 if (gdev
->acquire_mode_guest_caps
& or_mask
) {
836 vbg_err("%s error: cannot set caps which are in acquire_mode\n",
842 /* Apply the changes to the session mask. */
843 previous
= session
->set_guest_caps
;
844 session
->set_guest_caps
|= or_mask
;
845 session
->set_guest_caps
&= ~not_mask
;
847 /* If anything actually changed, update the global usage counters. */
848 changed
= previous
^ session
->set_guest_caps
;
852 vbg_track_bit_usage(&gdev
->set_guest_caps_tracker
, changed
, previous
);
854 ret
= vbg_set_host_capabilities(gdev
, session
, session_termination
);
855 /* Roll back on failure, unless it's session termination time. */
856 if (ret
< 0 && !session_termination
) {
857 vbg_track_bit_usage(&gdev
->set_guest_caps_tracker
, changed
,
858 session
->set_guest_caps
);
859 session
->set_guest_caps
= previous
;
863 mutex_unlock(&gdev
->session_mutex
);
869 * vbg_query_host_version get the host feature mask and version information.
870 * Return: 0 or negative errno value.
871 * @gdev: The Guest extension device.
873 static int vbg_query_host_version(struct vbg_dev
*gdev
)
875 struct vmmdev_host_version
*req
;
878 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_GET_HOST_VERSION
,
883 rc
= vbg_req_perform(gdev
, req
);
884 ret
= vbg_status_code_to_errno(rc
);
886 vbg_err("%s error: %d\n", __func__
, rc
);
890 snprintf(gdev
->host_version
, sizeof(gdev
->host_version
), "%u.%u.%ur%u",
891 req
->major
, req
->minor
, req
->build
, req
->revision
);
892 gdev
->host_features
= req
->features
;
894 vbg_info("vboxguest: host-version: %s %#x\n", gdev
->host_version
,
895 gdev
->host_features
);
897 if (!(req
->features
& VMMDEV_HVF_HGCM_PHYS_PAGE_LIST
)) {
898 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
903 vbg_req_free(req
, sizeof(*req
));
908 * Initializes the VBoxGuest device extension when the
909 * device driver is loaded.
911 * The native code locates the VMMDev on the PCI bus and retrieve
912 * the MMIO and I/O port ranges, this function will take care of
913 * mapping the MMIO memory (if present). Upon successful return
914 * the native code should set up the interrupt handler.
916 * Return: 0 or negative errno value.
918 * @gdev: The Guest extension device.
919 * @fixed_events: Events that will be enabled upon init and no client
920 * will ever be allowed to mask.
922 int vbg_core_init(struct vbg_dev
*gdev
, u32 fixed_events
)
926 gdev
->fixed_events
= fixed_events
| VMMDEV_EVENT_HGCM
;
927 gdev
->event_filter_host
= U32_MAX
; /* forces a report */
928 gdev
->guest_caps_host
= U32_MAX
; /* forces a report */
930 init_waitqueue_head(&gdev
->event_wq
);
931 init_waitqueue_head(&gdev
->hgcm_wq
);
932 spin_lock_init(&gdev
->event_spinlock
);
933 mutex_init(&gdev
->session_mutex
);
934 mutex_init(&gdev
->cancel_req_mutex
);
935 timer_setup(&gdev
->heartbeat_timer
, vbg_heartbeat_timer
, 0);
936 INIT_WORK(&gdev
->mem_balloon
.work
, vbg_balloon_work
);
938 gdev
->mem_balloon
.get_req
=
939 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.get_req
),
940 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ
,
942 gdev
->mem_balloon
.change_req
=
943 vbg_req_alloc(sizeof(*gdev
->mem_balloon
.change_req
),
944 VMMDEVREQ_CHANGE_MEMBALLOON
,
947 vbg_req_alloc(sizeof(*(gdev
->cancel_req
)),
948 VMMDEVREQ_HGCM_CANCEL2
,
950 gdev
->ack_events_req
=
951 vbg_req_alloc(sizeof(*gdev
->ack_events_req
),
952 VMMDEVREQ_ACKNOWLEDGE_EVENTS
,
954 gdev
->mouse_status_req
=
955 vbg_req_alloc(sizeof(*gdev
->mouse_status_req
),
956 VMMDEVREQ_GET_MOUSE_STATUS
,
959 if (!gdev
->mem_balloon
.get_req
|| !gdev
->mem_balloon
.change_req
||
960 !gdev
->cancel_req
|| !gdev
->ack_events_req
||
961 !gdev
->mouse_status_req
)
964 ret
= vbg_query_host_version(gdev
);
968 ret
= vbg_report_guest_info(gdev
);
970 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret
);
974 ret
= vbg_reset_host_event_filter(gdev
, gdev
->fixed_events
);
976 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
981 ret
= vbg_reset_host_capabilities(gdev
);
983 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
988 ret
= vbg_core_set_mouse_status(gdev
, 0);
990 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret
);
994 /* These may fail without requiring the driver init to fail. */
995 vbg_guest_mappings_init(gdev
);
996 vbg_heartbeat_init(gdev
);
999 ret
= vbg_report_driver_status(gdev
, true);
1001 vbg_err("vboxguest: Error reporting driver status: %d\n", ret
);
1006 vbg_req_free(gdev
->mouse_status_req
,
1007 sizeof(*gdev
->mouse_status_req
));
1008 vbg_req_free(gdev
->ack_events_req
,
1009 sizeof(*gdev
->ack_events_req
));
1010 vbg_req_free(gdev
->cancel_req
,
1011 sizeof(*gdev
->cancel_req
));
1012 vbg_req_free(gdev
->mem_balloon
.change_req
,
1013 sizeof(*gdev
->mem_balloon
.change_req
));
1014 vbg_req_free(gdev
->mem_balloon
.get_req
,
1015 sizeof(*gdev
->mem_balloon
.get_req
));
1020 * Call this on exit to clean-up vboxguest-core managed resources.
1022 * The native code should call this before the driver is loaded,
1023 * but don't call this on shutdown.
1024 * @gdev: The Guest extension device.
1026 void vbg_core_exit(struct vbg_dev
*gdev
)
1028 vbg_heartbeat_exit(gdev
);
1029 vbg_guest_mappings_exit(gdev
);
1031 /* Clear the host flags (mouse status etc). */
1032 vbg_reset_host_event_filter(gdev
, 0);
1033 vbg_reset_host_capabilities(gdev
);
1034 vbg_core_set_mouse_status(gdev
, 0);
1036 vbg_req_free(gdev
->mouse_status_req
,
1037 sizeof(*gdev
->mouse_status_req
));
1038 vbg_req_free(gdev
->ack_events_req
,
1039 sizeof(*gdev
->ack_events_req
));
1040 vbg_req_free(gdev
->cancel_req
,
1041 sizeof(*gdev
->cancel_req
));
1042 vbg_req_free(gdev
->mem_balloon
.change_req
,
1043 sizeof(*gdev
->mem_balloon
.change_req
));
1044 vbg_req_free(gdev
->mem_balloon
.get_req
,
1045 sizeof(*gdev
->mem_balloon
.get_req
));
1049 * Creates a VBoxGuest user session.
1051 * vboxguest_linux.c calls this when userspace opens the char-device.
1052 * Return: A pointer to the new session or an ERR_PTR on error.
1053 * @gdev: The Guest extension device.
1054 * @requestor: VMMDEV_REQUESTOR_* flags
1056 struct vbg_session
*vbg_core_open_session(struct vbg_dev
*gdev
, u32 requestor
)
1058 struct vbg_session
*session
;
1060 session
= kzalloc(sizeof(*session
), GFP_KERNEL
);
1062 return ERR_PTR(-ENOMEM
);
1064 session
->gdev
= gdev
;
1065 session
->requestor
= requestor
;
1071 * Closes a VBoxGuest session.
1072 * @session: The session to close (and free).
1074 void vbg_core_close_session(struct vbg_session
*session
)
1076 struct vbg_dev
*gdev
= session
->gdev
;
1079 vbg_acquire_session_capabilities(gdev
, session
, 0, U32_MAX
, 0, true);
1080 vbg_set_session_capabilities(gdev
, session
, 0, U32_MAX
, true);
1081 vbg_set_session_event_filter(gdev
, session
, 0, U32_MAX
, true);
1083 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1084 if (!session
->hgcm_client_ids
[i
])
1087 /* requestor is kernel here, as we're cleaning up. */
1088 vbg_hgcm_disconnect(gdev
, VBG_KERNEL_REQUEST
,
1089 session
->hgcm_client_ids
[i
], &rc
);
1095 static int vbg_ioctl_chk(struct vbg_ioctl_hdr
*hdr
, size_t in_size
,
1098 if (hdr
->size_in
!= (sizeof(*hdr
) + in_size
) ||
1099 hdr
->size_out
!= (sizeof(*hdr
) + out_size
))
1105 static int vbg_ioctl_driver_version_info(
1106 struct vbg_ioctl_driver_version_info
*info
)
1108 const u16 vbg_maj_version
= VBG_IOC_VERSION
>> 16;
1109 u16 min_maj_version
, req_maj_version
;
1111 if (vbg_ioctl_chk(&info
->hdr
, sizeof(info
->u
.in
), sizeof(info
->u
.out
)))
1114 req_maj_version
= info
->u
.in
.req_version
>> 16;
1115 min_maj_version
= info
->u
.in
.min_version
>> 16;
1117 if (info
->u
.in
.min_version
> info
->u
.in
.req_version
||
1118 min_maj_version
!= req_maj_version
)
1121 if (info
->u
.in
.min_version
<= VBG_IOC_VERSION
&&
1122 min_maj_version
== vbg_maj_version
) {
1123 info
->u
.out
.session_version
= VBG_IOC_VERSION
;
1125 info
->u
.out
.session_version
= U32_MAX
;
1126 info
->hdr
.rc
= VERR_VERSION_MISMATCH
;
1129 info
->u
.out
.driver_version
= VBG_IOC_VERSION
;
1130 info
->u
.out
.driver_revision
= 0;
1131 info
->u
.out
.reserved1
= 0;
1132 info
->u
.out
.reserved2
= 0;
1137 /* Must be called with the event_lock held */
1138 static u32
vbg_get_allowed_event_mask_for_session(struct vbg_dev
*gdev
,
1139 struct vbg_session
*session
)
1141 u32 acquire_mode_caps
= gdev
->acquire_mode_guest_caps
;
1142 u32 session_acquired_caps
= session
->acquired_guest_caps
;
1143 u32 allowed_events
= VMMDEV_EVENT_VALID_EVENT_MASK
;
1145 if ((acquire_mode_caps
& VMMDEV_GUEST_SUPPORTS_GRAPHICS
) &&
1146 !(session_acquired_caps
& VMMDEV_GUEST_SUPPORTS_GRAPHICS
))
1147 allowed_events
&= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST
;
1149 if ((acquire_mode_caps
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
) &&
1150 !(session_acquired_caps
& VMMDEV_GUEST_SUPPORTS_SEAMLESS
))
1151 allowed_events
&= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST
;
1153 return allowed_events
;
1156 static bool vbg_wait_event_cond(struct vbg_dev
*gdev
,
1157 struct vbg_session
*session
,
1160 unsigned long flags
;
1164 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1166 events
= gdev
->pending_events
& event_mask
;
1167 events
&= vbg_get_allowed_event_mask_for_session(gdev
, session
);
1168 wakeup
= events
|| session
->cancel_waiters
;
1170 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1175 /* Must be called with the event_lock held */
1176 static u32
vbg_consume_events_locked(struct vbg_dev
*gdev
,
1177 struct vbg_session
*session
,
1180 u32 events
= gdev
->pending_events
& event_mask
;
1182 events
&= vbg_get_allowed_event_mask_for_session(gdev
, session
);
1183 gdev
->pending_events
&= ~events
;
1187 static int vbg_ioctl_wait_for_events(struct vbg_dev
*gdev
,
1188 struct vbg_session
*session
,
1189 struct vbg_ioctl_wait_for_events
*wait
)
1191 u32 timeout_ms
= wait
->u
.in
.timeout_ms
;
1192 u32 event_mask
= wait
->u
.in
.events
;
1193 unsigned long flags
;
1197 if (vbg_ioctl_chk(&wait
->hdr
, sizeof(wait
->u
.in
), sizeof(wait
->u
.out
)))
1200 if (timeout_ms
== U32_MAX
)
1201 timeout
= MAX_SCHEDULE_TIMEOUT
;
1203 timeout
= msecs_to_jiffies(timeout_ms
);
1205 wait
->u
.out
.events
= 0;
1207 timeout
= wait_event_interruptible_timeout(
1209 vbg_wait_event_cond(gdev
, session
, event_mask
),
1212 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1214 if (timeout
< 0 || session
->cancel_waiters
) {
1216 } else if (timeout
== 0) {
1219 wait
->u
.out
.events
=
1220 vbg_consume_events_locked(gdev
, session
, event_mask
);
1223 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1226 * Someone else may have consumed the event(s) first, in
1227 * which case we go back to waiting.
1229 } while (ret
== 0 && wait
->u
.out
.events
== 0);
1234 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev
*gdev
,
1235 struct vbg_session
*session
,
1236 struct vbg_ioctl_hdr
*hdr
)
1238 unsigned long flags
;
1240 if (hdr
->size_in
!= sizeof(*hdr
) || hdr
->size_out
!= sizeof(*hdr
))
1243 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1244 session
->cancel_waiters
= true;
1245 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1247 wake_up(&gdev
->event_wq
);
1253 * Checks if the VMM request is allowed in the context of the given session.
1254 * Return: 0 or negative errno value.
1255 * @gdev: The Guest extension device.
1256 * @session: The calling session.
1257 * @req: The request.
1259 static int vbg_req_allowed(struct vbg_dev
*gdev
, struct vbg_session
*session
,
1260 const struct vmmdev_request_header
*req
)
1262 const struct vmmdev_guest_status
*guest_status
;
1263 bool trusted_apps_only
;
1265 switch (req
->request_type
) {
1266 /* Trusted users apps only. */
1267 case VMMDEVREQ_QUERY_CREDENTIALS
:
1268 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT
:
1269 case VMMDEVREQ_REGISTER_SHARED_MODULE
:
1270 case VMMDEVREQ_UNREGISTER_SHARED_MODULE
:
1271 case VMMDEVREQ_WRITE_COREDUMP
:
1272 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ
:
1273 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS
:
1274 case VMMDEVREQ_CHECK_SHARED_MODULES
:
1275 case VMMDEVREQ_GET_PAGE_SHARING_STATUS
:
1276 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED
:
1277 case VMMDEVREQ_REPORT_GUEST_STATS
:
1278 case VMMDEVREQ_REPORT_GUEST_USER_STATE
:
1279 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ
:
1280 trusted_apps_only
= true;
1284 case VMMDEVREQ_GET_MOUSE_STATUS
:
1285 case VMMDEVREQ_SET_MOUSE_STATUS
:
1286 case VMMDEVREQ_SET_POINTER_SHAPE
:
1287 case VMMDEVREQ_GET_HOST_VERSION
:
1288 case VMMDEVREQ_IDLE
:
1289 case VMMDEVREQ_GET_HOST_TIME
:
1290 case VMMDEVREQ_SET_POWER_STATUS
:
1291 case VMMDEVREQ_ACKNOWLEDGE_EVENTS
:
1292 case VMMDEVREQ_CTL_GUEST_FILTER_MASK
:
1293 case VMMDEVREQ_REPORT_GUEST_STATUS
:
1294 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ
:
1295 case VMMDEVREQ_VIDEMODE_SUPPORTED
:
1296 case VMMDEVREQ_GET_HEIGHT_REDUCTION
:
1297 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2
:
1298 case VMMDEVREQ_VIDEMODE_SUPPORTED2
:
1299 case VMMDEVREQ_VIDEO_ACCEL_ENABLE
:
1300 case VMMDEVREQ_VIDEO_ACCEL_FLUSH
:
1301 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION
:
1302 case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS
:
1303 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX
:
1304 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI
:
1305 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ
:
1306 case VMMDEVREQ_GET_VRDPCHANGE_REQ
:
1307 case VMMDEVREQ_LOG_STRING
:
1308 case VMMDEVREQ_GET_SESSION_ID
:
1309 trusted_apps_only
= false;
1312 /* Depends on the request parameters... */
1313 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES
:
1314 guest_status
= (const struct vmmdev_guest_status
*)req
;
1315 switch (guest_status
->facility
) {
1316 case VBOXGUEST_FACILITY_TYPE_ALL
:
1317 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER
:
1318 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1319 guest_status
->facility
);
1321 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE
:
1322 trusted_apps_only
= true;
1324 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT
:
1325 case VBOXGUEST_FACILITY_TYPE_SEAMLESS
:
1326 case VBOXGUEST_FACILITY_TYPE_GRAPHICS
:
1328 trusted_apps_only
= false;
1333 /* Anything else is not allowed. */
1335 vbg_err("Denying userspace vmm call type %#08x\n",
1340 if (trusted_apps_only
&&
1341 (session
->requestor
& VMMDEV_REQUESTOR_USER_DEVICE
)) {
1342 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1350 static int vbg_ioctl_vmmrequest(struct vbg_dev
*gdev
,
1351 struct vbg_session
*session
, void *data
)
1353 struct vbg_ioctl_hdr
*hdr
= data
;
1356 if (hdr
->size_in
!= hdr
->size_out
)
1359 if (hdr
->size_in
> VMMDEV_MAX_VMMDEVREQ_SIZE
)
1362 if (hdr
->type
== VBG_IOCTL_HDR_TYPE_DEFAULT
)
1365 ret
= vbg_req_allowed(gdev
, session
, data
);
1369 vbg_req_perform(gdev
, data
);
1370 WARN_ON(hdr
->rc
== VINF_HGCM_ASYNC_EXECUTE
);
1375 static int vbg_ioctl_hgcm_connect(struct vbg_dev
*gdev
,
1376 struct vbg_session
*session
,
1377 struct vbg_ioctl_hgcm_connect
*conn
)
1382 if (vbg_ioctl_chk(&conn
->hdr
, sizeof(conn
->u
.in
), sizeof(conn
->u
.out
)))
1385 /* Find a free place in the sessions clients array and claim it */
1386 mutex_lock(&gdev
->session_mutex
);
1387 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1388 if (!session
->hgcm_client_ids
[i
]) {
1389 session
->hgcm_client_ids
[i
] = U32_MAX
;
1393 mutex_unlock(&gdev
->session_mutex
);
1395 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1398 ret
= vbg_hgcm_connect(gdev
, session
->requestor
, &conn
->u
.in
.loc
,
1399 &client_id
, &conn
->hdr
.rc
);
1401 mutex_lock(&gdev
->session_mutex
);
1402 if (ret
== 0 && conn
->hdr
.rc
>= 0) {
1403 conn
->u
.out
.client_id
= client_id
;
1404 session
->hgcm_client_ids
[i
] = client_id
;
1406 conn
->u
.out
.client_id
= 0;
1407 session
->hgcm_client_ids
[i
] = 0;
1409 mutex_unlock(&gdev
->session_mutex
);
1414 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev
*gdev
,
1415 struct vbg_session
*session
,
1416 struct vbg_ioctl_hgcm_disconnect
*disconn
)
1421 if (vbg_ioctl_chk(&disconn
->hdr
, sizeof(disconn
->u
.in
), 0))
1424 client_id
= disconn
->u
.in
.client_id
;
1425 if (client_id
== 0 || client_id
== U32_MAX
)
1428 mutex_lock(&gdev
->session_mutex
);
1429 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++) {
1430 if (session
->hgcm_client_ids
[i
] == client_id
) {
1431 session
->hgcm_client_ids
[i
] = U32_MAX
;
1435 mutex_unlock(&gdev
->session_mutex
);
1437 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
))
1440 ret
= vbg_hgcm_disconnect(gdev
, session
->requestor
, client_id
,
1443 mutex_lock(&gdev
->session_mutex
);
1444 if (ret
== 0 && disconn
->hdr
.rc
>= 0)
1445 session
->hgcm_client_ids
[i
] = 0;
1447 session
->hgcm_client_ids
[i
] = client_id
;
1448 mutex_unlock(&gdev
->session_mutex
);
1453 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type
)
1456 case VMMDEV_HGCM_PARM_TYPE_32BIT
:
1457 case VMMDEV_HGCM_PARM_TYPE_64BIT
:
1458 case VMMDEV_HGCM_PARM_TYPE_LINADDR
:
1459 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN
:
1460 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT
:
1467 static int vbg_ioctl_hgcm_call(struct vbg_dev
*gdev
,
1468 struct vbg_session
*session
, bool f32bit
,
1469 struct vbg_ioctl_hgcm_call
*call
)
1475 if (call
->hdr
.size_in
< sizeof(*call
))
1478 if (call
->hdr
.size_in
!= call
->hdr
.size_out
)
1481 if (call
->parm_count
> VMMDEV_HGCM_MAX_PARMS
)
1484 client_id
= call
->client_id
;
1485 if (client_id
== 0 || client_id
== U32_MAX
)
1488 actual_size
= sizeof(*call
);
1490 actual_size
+= call
->parm_count
*
1491 sizeof(struct vmmdev_hgcm_function_parameter32
);
1493 actual_size
+= call
->parm_count
*
1494 sizeof(struct vmmdev_hgcm_function_parameter
);
1495 if (call
->hdr
.size_in
< actual_size
) {
1496 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1497 call
->hdr
.size_in
, actual_size
);
1500 call
->hdr
.size_out
= actual_size
;
1502 /* Validate parameter types */
1504 struct vmmdev_hgcm_function_parameter32
*parm
=
1505 VBG_IOCTL_HGCM_CALL_PARMS32(call
);
1507 for (i
= 0; i
< call
->parm_count
; i
++)
1508 if (!vbg_param_valid(parm
[i
].type
))
1511 struct vmmdev_hgcm_function_parameter
*parm
=
1512 VBG_IOCTL_HGCM_CALL_PARMS(call
);
1514 for (i
= 0; i
< call
->parm_count
; i
++)
1515 if (!vbg_param_valid(parm
[i
].type
))
1520 * Validate the client id.
1522 mutex_lock(&gdev
->session_mutex
);
1523 for (i
= 0; i
< ARRAY_SIZE(session
->hgcm_client_ids
); i
++)
1524 if (session
->hgcm_client_ids
[i
] == client_id
)
1526 mutex_unlock(&gdev
->session_mutex
);
1527 if (i
>= ARRAY_SIZE(session
->hgcm_client_ids
)) {
1528 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1533 if (IS_ENABLED(CONFIG_COMPAT
) && f32bit
)
1534 ret
= vbg_hgcm_call32(gdev
, session
->requestor
, client_id
,
1535 call
->function
, call
->timeout_ms
,
1536 VBG_IOCTL_HGCM_CALL_PARMS32(call
),
1537 call
->parm_count
, &call
->hdr
.rc
);
1539 ret
= vbg_hgcm_call(gdev
, session
->requestor
, client_id
,
1540 call
->function
, call
->timeout_ms
,
1541 VBG_IOCTL_HGCM_CALL_PARMS(call
),
1542 call
->parm_count
, &call
->hdr
.rc
);
1544 if (ret
== -E2BIG
) {
1545 /* E2BIG needs to be reported through the hdr.rc field. */
1546 call
->hdr
.rc
= VERR_OUT_OF_RANGE
;
1550 if (ret
&& ret
!= -EINTR
&& ret
!= -ETIMEDOUT
)
1551 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret
);
1556 static int vbg_ioctl_log(struct vbg_ioctl_log
*log
)
1558 if (log
->hdr
.size_out
!= sizeof(log
->hdr
))
1561 vbg_info("%.*s", (int)(log
->hdr
.size_in
- sizeof(log
->hdr
)),
1567 static int vbg_ioctl_change_filter_mask(struct vbg_dev
*gdev
,
1568 struct vbg_session
*session
,
1569 struct vbg_ioctl_change_filter
*filter
)
1571 u32 or_mask
, not_mask
;
1573 if (vbg_ioctl_chk(&filter
->hdr
, sizeof(filter
->u
.in
), 0))
1576 or_mask
= filter
->u
.in
.or_mask
;
1577 not_mask
= filter
->u
.in
.not_mask
;
1579 if ((or_mask
| not_mask
) & ~VMMDEV_EVENT_VALID_EVENT_MASK
)
1582 return vbg_set_session_event_filter(gdev
, session
, or_mask
, not_mask
,
1586 static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev
*gdev
,
1587 struct vbg_session
*session
,
1588 struct vbg_ioctl_acquire_guest_caps
*caps
)
1590 u32 flags
, or_mask
, not_mask
;
1592 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), 0))
1595 flags
= caps
->u
.in
.flags
;
1596 or_mask
= caps
->u
.in
.or_mask
;
1597 not_mask
= caps
->u
.in
.not_mask
;
1599 if (flags
& ~VBGL_IOC_AGC_FLAGS_VALID_MASK
)
1602 if ((or_mask
| not_mask
) & ~VMMDEV_GUEST_CAPABILITIES_MASK
)
1605 return vbg_acquire_session_capabilities(gdev
, session
, or_mask
,
1606 not_mask
, flags
, false);
1609 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev
*gdev
,
1610 struct vbg_session
*session
, struct vbg_ioctl_set_guest_caps
*caps
)
1612 u32 or_mask
, not_mask
;
1615 if (vbg_ioctl_chk(&caps
->hdr
, sizeof(caps
->u
.in
), sizeof(caps
->u
.out
)))
1618 or_mask
= caps
->u
.in
.or_mask
;
1619 not_mask
= caps
->u
.in
.not_mask
;
1621 if ((or_mask
| not_mask
) & ~VMMDEV_GUEST_CAPABILITIES_MASK
)
1624 ret
= vbg_set_session_capabilities(gdev
, session
, or_mask
, not_mask
,
1629 caps
->u
.out
.session_caps
= session
->set_guest_caps
;
1630 caps
->u
.out
.global_caps
= gdev
->guest_caps_host
;
1635 static int vbg_ioctl_check_balloon(struct vbg_dev
*gdev
,
1636 struct vbg_ioctl_check_balloon
*balloon_info
)
1638 if (vbg_ioctl_chk(&balloon_info
->hdr
, 0, sizeof(balloon_info
->u
.out
)))
1641 balloon_info
->u
.out
.balloon_chunks
= gdev
->mem_balloon
.chunks
;
1643 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1644 * events entirely in the kernel, see vbg_core_isr().
1646 balloon_info
->u
.out
.handle_in_r3
= false;
1651 static int vbg_ioctl_write_core_dump(struct vbg_dev
*gdev
,
1652 struct vbg_session
*session
,
1653 struct vbg_ioctl_write_coredump
*dump
)
1655 struct vmmdev_write_core_dump
*req
;
1657 if (vbg_ioctl_chk(&dump
->hdr
, sizeof(dump
->u
.in
), 0))
1660 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_WRITE_COREDUMP
,
1661 session
->requestor
);
1665 req
->flags
= dump
->u
.in
.flags
;
1666 dump
->hdr
.rc
= vbg_req_perform(gdev
, req
);
1668 vbg_req_free(req
, sizeof(*req
));
1673 * Common IOCtl for user to kernel communication.
1674 * Return: 0 or negative errno value.
1675 * @session: The client session.
1676 * @req: The requested function.
1677 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1679 int vbg_core_ioctl(struct vbg_session
*session
, unsigned int req
, void *data
)
1681 unsigned int req_no_size
= req
& ~IOCSIZE_MASK
;
1682 struct vbg_dev
*gdev
= session
->gdev
;
1683 struct vbg_ioctl_hdr
*hdr
= data
;
1684 bool f32bit
= false;
1686 hdr
->rc
= VINF_SUCCESS
;
1688 hdr
->size_out
= hdr
->size_in
;
1691 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1692 * already checked by vbg_misc_device_ioctl().
1695 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1696 if (req_no_size
== VBG_IOCTL_VMMDEV_REQUEST(0) ||
1697 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG
||
1698 req
== VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT
)
1699 return vbg_ioctl_vmmrequest(gdev
, session
, data
);
1701 if (hdr
->type
!= VBG_IOCTL_HDR_TYPE_DEFAULT
)
1704 /* Fixed size requests. */
1706 case VBG_IOCTL_DRIVER_VERSION_INFO
:
1707 return vbg_ioctl_driver_version_info(data
);
1708 case VBG_IOCTL_HGCM_CONNECT
:
1709 return vbg_ioctl_hgcm_connect(gdev
, session
, data
);
1710 case VBG_IOCTL_HGCM_DISCONNECT
:
1711 return vbg_ioctl_hgcm_disconnect(gdev
, session
, data
);
1712 case VBG_IOCTL_WAIT_FOR_EVENTS
:
1713 return vbg_ioctl_wait_for_events(gdev
, session
, data
);
1714 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS
:
1715 return vbg_ioctl_interrupt_all_wait_events(gdev
, session
, data
);
1716 case VBG_IOCTL_CHANGE_FILTER_MASK
:
1717 return vbg_ioctl_change_filter_mask(gdev
, session
, data
);
1718 case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES
:
1719 return vbg_ioctl_acquire_guest_capabilities(gdev
, session
, data
);
1720 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES
:
1721 return vbg_ioctl_change_guest_capabilities(gdev
, session
, data
);
1722 case VBG_IOCTL_CHECK_BALLOON
:
1723 return vbg_ioctl_check_balloon(gdev
, data
);
1724 case VBG_IOCTL_WRITE_CORE_DUMP
:
1725 return vbg_ioctl_write_core_dump(gdev
, session
, data
);
1728 /* Variable sized requests. */
1729 switch (req_no_size
) {
1730 #ifdef CONFIG_COMPAT
1731 case VBG_IOCTL_HGCM_CALL_32(0):
1735 case VBG_IOCTL_HGCM_CALL(0):
1736 return vbg_ioctl_hgcm_call(gdev
, session
, f32bit
, data
);
1737 case VBG_IOCTL_LOG(0):
1738 case VBG_IOCTL_LOG_ALT(0):
1739 return vbg_ioctl_log(data
);
1742 vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req
);
1747 * Report guest supported mouse-features to the host.
1749 * Return: 0 or negative errno value.
1750 * @gdev: The Guest extension device.
1751 * @features: The set of features to report to the host.
1753 int vbg_core_set_mouse_status(struct vbg_dev
*gdev
, u32 features
)
1755 struct vmmdev_mouse_status
*req
;
1758 req
= vbg_req_alloc(sizeof(*req
), VMMDEVREQ_SET_MOUSE_STATUS
,
1759 VBG_KERNEL_REQUEST
);
1763 req
->mouse_features
= features
;
1764 req
->pointer_pos_x
= 0;
1765 req
->pointer_pos_y
= 0;
1767 rc
= vbg_req_perform(gdev
, req
);
1769 vbg_err("%s error, rc: %d\n", __func__
, rc
);
1771 vbg_req_free(req
, sizeof(*req
));
1772 return vbg_status_code_to_errno(rc
);
1775 /** Core interrupt service routine. */
1776 irqreturn_t
vbg_core_isr(int irq
, void *dev_id
)
1778 struct vbg_dev
*gdev
= dev_id
;
1779 struct vmmdev_events
*req
= gdev
->ack_events_req
;
1780 bool mouse_position_changed
= false;
1781 unsigned long flags
;
1785 if (!gdev
->mmio
->V
.V1_04
.have_events
)
1788 /* Get and acknowlegde events. */
1789 req
->header
.rc
= VERR_INTERNAL_ERROR
;
1791 rc
= vbg_req_perform(gdev
, req
);
1793 vbg_err("Error performing events req, rc: %d\n", rc
);
1797 events
= req
->events
;
1799 if (events
& VMMDEV_EVENT_MOUSE_POSITION_CHANGED
) {
1800 mouse_position_changed
= true;
1801 events
&= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED
;
1804 if (events
& VMMDEV_EVENT_HGCM
) {
1805 wake_up(&gdev
->hgcm_wq
);
1806 events
&= ~VMMDEV_EVENT_HGCM
;
1809 if (events
& VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
) {
1810 schedule_work(&gdev
->mem_balloon
.work
);
1811 events
&= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
;
1815 spin_lock_irqsave(&gdev
->event_spinlock
, flags
);
1816 gdev
->pending_events
|= events
;
1817 spin_unlock_irqrestore(&gdev
->event_spinlock
, flags
);
1819 wake_up(&gdev
->event_wq
);
1822 if (mouse_position_changed
)
1823 vbg_linux_mouse_event(gdev
);