x86/topology: Fix function name in documentation
[cris-mirror.git] / drivers / virt / vboxguest / vboxguest_core.c
blob190dbf8cfcb564f3c36c5504472b4dfec8070f51
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
6 */
8 #include <linux/device.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/sizes.h>
12 #include <linux/slab.h>
13 #include <linux/vbox_err.h>
14 #include <linux/vbox_utils.h>
15 #include <linux/vmalloc.h>
16 #include "vboxguest_core.h"
17 #include "vboxguest_version.h"
19 /* Get the pointer to the first HGCM parameter. */
20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
21 ((struct vmmdev_hgcm_function_parameter *)( \
22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
25 ((struct vmmdev_hgcm_function_parameter32 *)( \
26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
28 #define GUEST_MAPPINGS_TRIES 5
30 /**
31 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around.
34 * This operation is a little bit tricky since the VMM might not accept
35 * just any address because of address clashes between the three contexts
36 * it operates in, so we try several times.
38 * Failure to reserve the guest mappings is ignored.
40 * @gdev: The Guest extension device.
42 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
44 struct vmmdev_hypervisorinfo *req;
45 void *guest_mappings[GUEST_MAPPINGS_TRIES];
46 struct page **pages = NULL;
47 u32 size, hypervisor_size;
48 int i, rc;
50 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
52 if (!req)
53 return;
55 req->hypervisor_start = 0;
56 req->hypervisor_size = 0;
57 rc = vbg_req_perform(gdev, req);
58 if (rc < 0)
59 goto out;
62 * The VMM will report back if there is nothing it wants to map, like
63 * for instance in VT-x and AMD-V mode.
65 if (req->hypervisor_size == 0)
66 goto out;
68 hypervisor_size = req->hypervisor_size;
69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
70 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
72 pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
73 if (!pages)
74 goto out;
76 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
77 if (!gdev->guest_mappings_dummy_page)
78 goto out;
80 for (i = 0; i < (size >> PAGE_SHIFT); i++)
81 pages[i] = gdev->guest_mappings_dummy_page;
84 * Try several times, the VMM might not accept some addresses because
85 * of address clashes between the three contexts.
87 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
88 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
89 VM_MAP, PAGE_KERNEL_RO);
90 if (!guest_mappings[i])
91 break;
93 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
94 req->header.rc = VERR_INTERNAL_ERROR;
95 req->hypervisor_size = hypervisor_size;
96 req->hypervisor_start =
97 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
99 rc = vbg_req_perform(gdev, req);
100 if (rc >= 0) {
101 gdev->guest_mappings = guest_mappings[i];
102 break;
106 /* Free vmap's from failed attempts. */
107 while (--i >= 0)
108 vunmap(guest_mappings[i]);
110 /* On failure free the dummy-page backing the vmap */
111 if (!gdev->guest_mappings) {
112 __free_page(gdev->guest_mappings_dummy_page);
113 gdev->guest_mappings_dummy_page = NULL;
116 out:
117 kfree(req);
118 kfree(pages);
122 * Undo what vbg_guest_mappings_init did.
124 * @gdev: The Guest extension device.
126 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
128 struct vmmdev_hypervisorinfo *req;
129 int rc;
131 if (!gdev->guest_mappings)
132 return;
135 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.)
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
139 if (!req)
140 return;
142 req->hypervisor_start = 0;
143 req->hypervisor_size = 0;
145 rc = vbg_req_perform(gdev, req);
147 kfree(req);
149 if (rc < 0) {
150 vbg_err("%s error: %d\n", __func__, rc);
151 return;
154 vunmap(gdev->guest_mappings);
155 gdev->guest_mappings = NULL;
157 __free_page(gdev->guest_mappings_dummy_page);
158 gdev->guest_mappings_dummy_page = NULL;
162 * Report the guest information to the host.
163 * Return: 0 or negative errno value.
164 * @gdev: The Guest extension device.
166 static int vbg_report_guest_info(struct vbg_dev *gdev)
169 * Allocate and fill in the two guest info reports.
171 struct vmmdev_guest_info *req1 = NULL;
172 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM;
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
177 if (!req1 || !req2)
178 goto out_free;
180 req1->interface_version = VMMDEV_VERSION;
181 req1->os_type = VMMDEV_OSTYPE_LINUX26;
182 #if __BITS_PER_LONG == 64
183 req1->os_type |= VMMDEV_OSTYPE_X64;
184 #endif
186 req2->additions_major = VBG_VERSION_MAJOR;
187 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */
191 req2->additions_features = 0;
192 strlcpy(req2->name, VBG_VERSION_STRING,
193 sizeof(req2->name));
196 * There are two protocols here:
197 * 1. INFO2 + INFO1. Supported by >=3.2.51.
198 * 2. INFO1 and optionally INFO2. The old protocol.
200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
201 * if not supported by the VMMDev (message ordering requirement).
203 rc = vbg_req_perform(gdev, req2);
204 if (rc >= 0) {
205 rc = vbg_req_perform(gdev, req1);
206 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
207 rc = vbg_req_perform(gdev, req1);
208 if (rc >= 0) {
209 rc = vbg_req_perform(gdev, req2);
210 if (rc == VERR_NOT_IMPLEMENTED)
211 rc = VINF_SUCCESS;
214 ret = vbg_status_code_to_errno(rc);
216 out_free:
217 kfree(req2);
218 kfree(req1);
219 return ret;
223 * Report the guest driver status to the host.
224 * Return: 0 or negative errno value.
225 * @gdev: The Guest extension device.
226 * @active: Flag whether the driver is now active or not.
228 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req;
231 int rc;
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
234 if (!req)
235 return -ENOMEM;
237 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
238 if (active)
239 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
240 else
241 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
242 req->flags = 0;
244 rc = vbg_req_perform(gdev, req);
245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
246 rc = VINF_SUCCESS;
248 kfree(req);
250 return vbg_status_code_to_errno(rc);
254 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
255 * Return: 0 or negative errno value.
256 * @gdev: The Guest extension device.
257 * @chunk_idx: Index of the chunk.
259 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
261 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
262 struct page **pages;
263 int i, rc, ret;
265 pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
266 GFP_KERNEL | __GFP_NOWARN);
267 if (!pages)
268 return -ENOMEM;
270 req->header.size = sizeof(*req);
271 req->inflate = true;
272 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
274 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
275 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
276 if (!pages[i]) {
277 ret = -ENOMEM;
278 goto out_error;
281 req->phys_page[i] = page_to_phys(pages[i]);
284 rc = vbg_req_perform(gdev, req);
285 if (rc < 0) {
286 vbg_err("%s error, rc: %d\n", __func__, rc);
287 ret = vbg_status_code_to_errno(rc);
288 goto out_error;
291 gdev->mem_balloon.pages[chunk_idx] = pages;
293 return 0;
295 out_error:
296 while (--i >= 0)
297 __free_page(pages[i]);
298 kfree(pages);
300 return ret;
304 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
305 * Return: 0 or negative errno value.
306 * @gdev: The Guest extension device.
307 * @chunk_idx: Index of the chunk.
309 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
311 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
312 struct page **pages = gdev->mem_balloon.pages[chunk_idx];
313 int i, rc;
315 req->header.size = sizeof(*req);
316 req->inflate = false;
317 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
319 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
320 req->phys_page[i] = page_to_phys(pages[i]);
322 rc = vbg_req_perform(gdev, req);
323 if (rc < 0) {
324 vbg_err("%s error, rc: %d\n", __func__, rc);
325 return vbg_status_code_to_errno(rc);
328 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
329 __free_page(pages[i]);
330 kfree(pages);
331 gdev->mem_balloon.pages[chunk_idx] = NULL;
333 return 0;
337 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
338 * the host wants the balloon to be and adjust accordingly.
340 static void vbg_balloon_work(struct work_struct *work)
342 struct vbg_dev *gdev =
343 container_of(work, struct vbg_dev, mem_balloon.work);
344 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
345 u32 i, chunks;
346 int rc, ret;
349 * Setting this bit means that we request the value from the host and
350 * change the guest memory balloon according to the returned value.
352 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
353 rc = vbg_req_perform(gdev, req);
354 if (rc < 0) {
355 vbg_err("%s error, rc: %d)\n", __func__, rc);
356 return;
360 * The host always returns the same maximum amount of chunks, so
361 * we do this once.
363 if (!gdev->mem_balloon.max_chunks) {
364 gdev->mem_balloon.pages =
365 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
366 sizeof(struct page **), GFP_KERNEL);
367 if (!gdev->mem_balloon.pages)
368 return;
370 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
373 chunks = req->balloon_chunks;
374 if (chunks > gdev->mem_balloon.max_chunks) {
375 vbg_err("%s: illegal balloon size %u (max=%u)\n",
376 __func__, chunks, gdev->mem_balloon.max_chunks);
377 return;
380 if (chunks > gdev->mem_balloon.chunks) {
381 /* inflate */
382 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
383 ret = vbg_balloon_inflate(gdev, i);
384 if (ret < 0)
385 return;
387 gdev->mem_balloon.chunks++;
389 } else {
390 /* deflate */
391 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
392 ret = vbg_balloon_deflate(gdev, i);
393 if (ret < 0)
394 return;
396 gdev->mem_balloon.chunks--;
402 * Callback for heartbeat timer.
404 static void vbg_heartbeat_timer(struct timer_list *t)
406 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
408 vbg_req_perform(gdev, gdev->guest_heartbeat_req);
409 mod_timer(&gdev->heartbeat_timer,
410 msecs_to_jiffies(gdev->heartbeat_interval_ms));
414 * Configure the host to check guest's heartbeat
415 * and get heartbeat interval from the host.
416 * Return: 0 or negative errno value.
417 * @gdev: The Guest extension device.
418 * @enabled: Set true to enable guest heartbeat checks on host.
420 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
422 struct vmmdev_heartbeat *req;
423 int rc;
425 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
426 if (!req)
427 return -ENOMEM;
429 req->enabled = enabled;
430 req->interval_ns = 0;
431 rc = vbg_req_perform(gdev, req);
432 do_div(req->interval_ns, 1000000); /* ns -> ms */
433 gdev->heartbeat_interval_ms = req->interval_ns;
434 kfree(req);
436 return vbg_status_code_to_errno(rc);
440 * Initializes the heartbeat timer. This feature may be disabled by the host.
441 * Return: 0 or negative errno value.
442 * @gdev: The Guest extension device.
444 static int vbg_heartbeat_init(struct vbg_dev *gdev)
446 int ret;
448 /* Make sure that heartbeat checking is disabled if we fail. */
449 ret = vbg_heartbeat_host_config(gdev, false);
450 if (ret < 0)
451 return ret;
453 ret = vbg_heartbeat_host_config(gdev, true);
454 if (ret < 0)
455 return ret;
458 * Preallocate the request to use it from the timer callback because:
459 * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
460 * and the timer callback runs at DISPATCH_LEVEL;
461 * 2) avoid repeated allocations.
463 gdev->guest_heartbeat_req = vbg_req_alloc(
464 sizeof(*gdev->guest_heartbeat_req),
465 VMMDEVREQ_GUEST_HEARTBEAT);
466 if (!gdev->guest_heartbeat_req)
467 return -ENOMEM;
469 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
470 __func__, gdev->heartbeat_interval_ms);
471 mod_timer(&gdev->heartbeat_timer, 0);
473 return 0;
477 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
478 * @gdev: The Guest extension device.
480 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
482 del_timer_sync(&gdev->heartbeat_timer);
483 vbg_heartbeat_host_config(gdev, false);
484 kfree(gdev->guest_heartbeat_req);
489 * Applies a change to the bit usage tracker.
490 * Return: true if the mask changed, false if not.
491 * @tracker: The bit usage tracker.
492 * @changed: The bits to change.
493 * @previous: The previous value of the bits.
495 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
496 u32 changed, u32 previous)
498 bool global_change = false;
500 while (changed) {
501 u32 bit = ffs(changed) - 1;
502 u32 bitmask = BIT(bit);
504 if (bitmask & previous) {
505 tracker->per_bit_usage[bit] -= 1;
506 if (tracker->per_bit_usage[bit] == 0) {
507 global_change = true;
508 tracker->mask &= ~bitmask;
510 } else {
511 tracker->per_bit_usage[bit] += 1;
512 if (tracker->per_bit_usage[bit] == 1) {
513 global_change = true;
514 tracker->mask |= bitmask;
518 changed &= ~bitmask;
521 return global_change;
525 * Init and termination worker for resetting the (host) event filter on the host
526 * Return: 0 or negative errno value.
527 * @gdev: The Guest extension device.
528 * @fixed_events: Fixed events (init time).
530 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
531 u32 fixed_events)
533 struct vmmdev_mask *req;
534 int rc;
536 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
537 if (!req)
538 return -ENOMEM;
540 req->not_mask = U32_MAX & ~fixed_events;
541 req->or_mask = fixed_events;
542 rc = vbg_req_perform(gdev, req);
543 if (rc < 0)
544 vbg_err("%s error, rc: %d\n", __func__, rc);
546 kfree(req);
547 return vbg_status_code_to_errno(rc);
551 * Changes the event filter mask for the given session.
553 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
554 * do session cleanup. Takes the session spinlock.
556 * Return: 0 or negative errno value.
557 * @gdev: The Guest extension device.
558 * @session: The session.
559 * @or_mask: The events to add.
560 * @not_mask: The events to remove.
561 * @session_termination: Set if we're called by the session cleanup code.
562 * This tweaks the error handling so we perform
563 * proper session cleanup even if the host
564 * misbehaves.
566 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
567 struct vbg_session *session,
568 u32 or_mask, u32 not_mask,
569 bool session_termination)
571 struct vmmdev_mask *req;
572 u32 changed, previous;
573 int rc, ret = 0;
575 /* Allocate a request buffer before taking the spinlock */
576 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
577 if (!req) {
578 if (!session_termination)
579 return -ENOMEM;
580 /* Ignore allocation failure, we must do session cleanup. */
583 mutex_lock(&gdev->session_mutex);
585 /* Apply the changes to the session mask. */
586 previous = session->event_filter;
587 session->event_filter |= or_mask;
588 session->event_filter &= ~not_mask;
590 /* If anything actually changed, update the global usage counters. */
591 changed = previous ^ session->event_filter;
592 if (!changed)
593 goto out;
595 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
596 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
598 if (gdev->event_filter_host == or_mask || !req)
599 goto out;
601 gdev->event_filter_host = or_mask;
602 req->or_mask = or_mask;
603 req->not_mask = ~or_mask;
604 rc = vbg_req_perform(gdev, req);
605 if (rc < 0) {
606 ret = vbg_status_code_to_errno(rc);
608 /* Failed, roll back (unless it's session termination time). */
609 gdev->event_filter_host = U32_MAX;
610 if (session_termination)
611 goto out;
613 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
614 session->event_filter);
615 session->event_filter = previous;
618 out:
619 mutex_unlock(&gdev->session_mutex);
620 kfree(req);
622 return ret;
626 * Init and termination worker for set guest capabilities to zero on the host.
627 * Return: 0 or negative errno value.
628 * @gdev: The Guest extension device.
630 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
632 struct vmmdev_mask *req;
633 int rc;
635 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
636 if (!req)
637 return -ENOMEM;
639 req->not_mask = U32_MAX;
640 req->or_mask = 0;
641 rc = vbg_req_perform(gdev, req);
642 if (rc < 0)
643 vbg_err("%s error, rc: %d\n", __func__, rc);
645 kfree(req);
646 return vbg_status_code_to_errno(rc);
650 * Sets the guest capabilities for a session. Takes the session spinlock.
651 * Return: 0 or negative errno value.
652 * @gdev: The Guest extension device.
653 * @session: The session.
654 * @or_mask: The capabilities to add.
655 * @not_mask: The capabilities to remove.
656 * @session_termination: Set if we're called by the session cleanup code.
657 * This tweaks the error handling so we perform
658 * proper session cleanup even if the host
659 * misbehaves.
661 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
662 struct vbg_session *session,
663 u32 or_mask, u32 not_mask,
664 bool session_termination)
666 struct vmmdev_mask *req;
667 u32 changed, previous;
668 int rc, ret = 0;
670 /* Allocate a request buffer before taking the spinlock */
671 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
672 if (!req) {
673 if (!session_termination)
674 return -ENOMEM;
675 /* Ignore allocation failure, we must do session cleanup. */
678 mutex_lock(&gdev->session_mutex);
680 /* Apply the changes to the session mask. */
681 previous = session->guest_caps;
682 session->guest_caps |= or_mask;
683 session->guest_caps &= ~not_mask;
685 /* If anything actually changed, update the global usage counters. */
686 changed = previous ^ session->guest_caps;
687 if (!changed)
688 goto out;
690 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
691 or_mask = gdev->guest_caps_tracker.mask;
693 if (gdev->guest_caps_host == or_mask || !req)
694 goto out;
696 gdev->guest_caps_host = or_mask;
697 req->or_mask = or_mask;
698 req->not_mask = ~or_mask;
699 rc = vbg_req_perform(gdev, req);
700 if (rc < 0) {
701 ret = vbg_status_code_to_errno(rc);
703 /* Failed, roll back (unless it's session termination time). */
704 gdev->guest_caps_host = U32_MAX;
705 if (session_termination)
706 goto out;
708 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
709 session->guest_caps);
710 session->guest_caps = previous;
713 out:
714 mutex_unlock(&gdev->session_mutex);
715 kfree(req);
717 return ret;
721 * vbg_query_host_version get the host feature mask and version information.
722 * Return: 0 or negative errno value.
723 * @gdev: The Guest extension device.
725 static int vbg_query_host_version(struct vbg_dev *gdev)
727 struct vmmdev_host_version *req;
728 int rc, ret;
730 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
731 if (!req)
732 return -ENOMEM;
734 rc = vbg_req_perform(gdev, req);
735 ret = vbg_status_code_to_errno(rc);
736 if (ret)
737 goto out;
739 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
740 req->major, req->minor, req->build, req->revision);
741 gdev->host_features = req->features;
743 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
744 gdev->host_features);
746 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
747 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
748 ret = -ENODEV;
751 out:
752 kfree(req);
753 return ret;
757 * Initializes the VBoxGuest device extension when the
758 * device driver is loaded.
760 * The native code locates the VMMDev on the PCI bus and retrieve
761 * the MMIO and I/O port ranges, this function will take care of
762 * mapping the MMIO memory (if present). Upon successful return
763 * the native code should set up the interrupt handler.
765 * Return: 0 or negative errno value.
767 * @gdev: The Guest extension device.
768 * @fixed_events: Events that will be enabled upon init and no client
769 * will ever be allowed to mask.
771 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
773 int ret = -ENOMEM;
775 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
776 gdev->event_filter_host = U32_MAX; /* forces a report */
777 gdev->guest_caps_host = U32_MAX; /* forces a report */
779 init_waitqueue_head(&gdev->event_wq);
780 init_waitqueue_head(&gdev->hgcm_wq);
781 spin_lock_init(&gdev->event_spinlock);
782 mutex_init(&gdev->session_mutex);
783 mutex_init(&gdev->cancel_req_mutex);
784 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
785 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
787 gdev->mem_balloon.get_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
789 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
790 gdev->mem_balloon.change_req =
791 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
792 VMMDEVREQ_CHANGE_MEMBALLOON);
793 gdev->cancel_req =
794 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
795 VMMDEVREQ_HGCM_CANCEL2);
796 gdev->ack_events_req =
797 vbg_req_alloc(sizeof(*gdev->ack_events_req),
798 VMMDEVREQ_ACKNOWLEDGE_EVENTS);
799 gdev->mouse_status_req =
800 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
801 VMMDEVREQ_GET_MOUSE_STATUS);
803 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
804 !gdev->cancel_req || !gdev->ack_events_req ||
805 !gdev->mouse_status_req)
806 goto err_free_reqs;
808 ret = vbg_query_host_version(gdev);
809 if (ret)
810 goto err_free_reqs;
812 ret = vbg_report_guest_info(gdev);
813 if (ret) {
814 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
815 goto err_free_reqs;
818 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
819 if (ret) {
820 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
821 ret);
822 goto err_free_reqs;
825 ret = vbg_reset_host_capabilities(gdev);
826 if (ret) {
827 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
828 ret);
829 goto err_free_reqs;
832 ret = vbg_core_set_mouse_status(gdev, 0);
833 if (ret) {
834 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
835 goto err_free_reqs;
838 /* These may fail without requiring the driver init to fail. */
839 vbg_guest_mappings_init(gdev);
840 vbg_heartbeat_init(gdev);
842 /* All Done! */
843 ret = vbg_report_driver_status(gdev, true);
844 if (ret < 0)
845 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
847 return 0;
849 err_free_reqs:
850 kfree(gdev->mouse_status_req);
851 kfree(gdev->ack_events_req);
852 kfree(gdev->cancel_req);
853 kfree(gdev->mem_balloon.change_req);
854 kfree(gdev->mem_balloon.get_req);
855 return ret;
859 * Call this on exit to clean-up vboxguest-core managed resources.
861 * The native code should call this before the driver is loaded,
862 * but don't call this on shutdown.
863 * @gdev: The Guest extension device.
865 void vbg_core_exit(struct vbg_dev *gdev)
867 vbg_heartbeat_exit(gdev);
868 vbg_guest_mappings_exit(gdev);
870 /* Clear the host flags (mouse status etc). */
871 vbg_reset_host_event_filter(gdev, 0);
872 vbg_reset_host_capabilities(gdev);
873 vbg_core_set_mouse_status(gdev, 0);
875 kfree(gdev->mouse_status_req);
876 kfree(gdev->ack_events_req);
877 kfree(gdev->cancel_req);
878 kfree(gdev->mem_balloon.change_req);
879 kfree(gdev->mem_balloon.get_req);
883 * Creates a VBoxGuest user session.
885 * vboxguest_linux.c calls this when userspace opens the char-device.
886 * Return: A pointer to the new session or an ERR_PTR on error.
887 * @gdev: The Guest extension device.
888 * @user: Set if this is a session for the vboxuser device.
890 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
892 struct vbg_session *session;
894 session = kzalloc(sizeof(*session), GFP_KERNEL);
895 if (!session)
896 return ERR_PTR(-ENOMEM);
898 session->gdev = gdev;
899 session->user_session = user;
901 return session;
905 * Closes a VBoxGuest session.
906 * @session: The session to close (and free).
908 void vbg_core_close_session(struct vbg_session *session)
910 struct vbg_dev *gdev = session->gdev;
911 int i, rc;
913 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
914 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
916 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
917 if (!session->hgcm_client_ids[i])
918 continue;
920 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
923 kfree(session);
926 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
927 size_t out_size)
929 if (hdr->size_in != (sizeof(*hdr) + in_size) ||
930 hdr->size_out != (sizeof(*hdr) + out_size))
931 return -EINVAL;
933 return 0;
936 static int vbg_ioctl_driver_version_info(
937 struct vbg_ioctl_driver_version_info *info)
939 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
940 u16 min_maj_version, req_maj_version;
942 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
943 return -EINVAL;
945 req_maj_version = info->u.in.req_version >> 16;
946 min_maj_version = info->u.in.min_version >> 16;
948 if (info->u.in.min_version > info->u.in.req_version ||
949 min_maj_version != req_maj_version)
950 return -EINVAL;
952 if (info->u.in.min_version <= VBG_IOC_VERSION &&
953 min_maj_version == vbg_maj_version) {
954 info->u.out.session_version = VBG_IOC_VERSION;
955 } else {
956 info->u.out.session_version = U32_MAX;
957 info->hdr.rc = VERR_VERSION_MISMATCH;
960 info->u.out.driver_version = VBG_IOC_VERSION;
961 info->u.out.driver_revision = 0;
962 info->u.out.reserved1 = 0;
963 info->u.out.reserved2 = 0;
965 return 0;
968 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
969 struct vbg_session *session,
970 u32 event_mask)
972 unsigned long flags;
973 bool wakeup;
974 u32 events;
976 spin_lock_irqsave(&gdev->event_spinlock, flags);
978 events = gdev->pending_events & event_mask;
979 wakeup = events || session->cancel_waiters;
981 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
983 return wakeup;
986 /* Must be called with the event_lock held */
987 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
988 struct vbg_session *session,
989 u32 event_mask)
991 u32 events = gdev->pending_events & event_mask;
993 gdev->pending_events &= ~events;
994 return events;
997 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
998 struct vbg_session *session,
999 struct vbg_ioctl_wait_for_events *wait)
1001 u32 timeout_ms = wait->u.in.timeout_ms;
1002 u32 event_mask = wait->u.in.events;
1003 unsigned long flags;
1004 long timeout;
1005 int ret = 0;
1007 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1008 return -EINVAL;
1010 if (timeout_ms == U32_MAX)
1011 timeout = MAX_SCHEDULE_TIMEOUT;
1012 else
1013 timeout = msecs_to_jiffies(timeout_ms);
1015 wait->u.out.events = 0;
1016 do {
1017 timeout = wait_event_interruptible_timeout(
1018 gdev->event_wq,
1019 vbg_wait_event_cond(gdev, session, event_mask),
1020 timeout);
1022 spin_lock_irqsave(&gdev->event_spinlock, flags);
1024 if (timeout < 0 || session->cancel_waiters) {
1025 ret = -EINTR;
1026 } else if (timeout == 0) {
1027 ret = -ETIMEDOUT;
1028 } else {
1029 wait->u.out.events =
1030 vbg_consume_events_locked(gdev, session, event_mask);
1033 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1036 * Someone else may have consumed the event(s) first, in
1037 * which case we go back to waiting.
1039 } while (ret == 0 && wait->u.out.events == 0);
1041 return ret;
1044 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1045 struct vbg_session *session,
1046 struct vbg_ioctl_hdr *hdr)
1048 unsigned long flags;
1050 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1051 return -EINVAL;
1053 spin_lock_irqsave(&gdev->event_spinlock, flags);
1054 session->cancel_waiters = true;
1055 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1057 wake_up(&gdev->event_wq);
1059 return 0;
1063 * Checks if the VMM request is allowed in the context of the given session.
1064 * Return: 0 or negative errno value.
1065 * @gdev: The Guest extension device.
1066 * @session: The calling session.
1067 * @req: The request.
1069 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1070 const struct vmmdev_request_header *req)
1072 const struct vmmdev_guest_status *guest_status;
1073 bool trusted_apps_only;
1075 switch (req->request_type) {
1076 /* Trusted users apps only. */
1077 case VMMDEVREQ_QUERY_CREDENTIALS:
1078 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1079 case VMMDEVREQ_REGISTER_SHARED_MODULE:
1080 case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1081 case VMMDEVREQ_WRITE_COREDUMP:
1082 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1083 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1084 case VMMDEVREQ_CHECK_SHARED_MODULES:
1085 case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1086 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1087 case VMMDEVREQ_REPORT_GUEST_STATS:
1088 case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1089 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1090 trusted_apps_only = true;
1091 break;
1093 /* Anyone. */
1094 case VMMDEVREQ_GET_MOUSE_STATUS:
1095 case VMMDEVREQ_SET_MOUSE_STATUS:
1096 case VMMDEVREQ_SET_POINTER_SHAPE:
1097 case VMMDEVREQ_GET_HOST_VERSION:
1098 case VMMDEVREQ_IDLE:
1099 case VMMDEVREQ_GET_HOST_TIME:
1100 case VMMDEVREQ_SET_POWER_STATUS:
1101 case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1102 case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1103 case VMMDEVREQ_REPORT_GUEST_STATUS:
1104 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1105 case VMMDEVREQ_VIDEMODE_SUPPORTED:
1106 case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1107 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1108 case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1109 case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1110 case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1111 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1112 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1113 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1114 case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1115 case VMMDEVREQ_LOG_STRING:
1116 case VMMDEVREQ_GET_SESSION_ID:
1117 trusted_apps_only = false;
1118 break;
1120 /* Depends on the request parameters... */
1121 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1122 guest_status = (const struct vmmdev_guest_status *)req;
1123 switch (guest_status->facility) {
1124 case VBOXGUEST_FACILITY_TYPE_ALL:
1125 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1126 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1127 guest_status->facility);
1128 return -EPERM;
1129 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1130 trusted_apps_only = true;
1131 break;
1132 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1133 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1134 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1135 default:
1136 trusted_apps_only = false;
1137 break;
1139 break;
1141 /* Anything else is not allowed. */
1142 default:
1143 vbg_err("Denying userspace vmm call type %#08x\n",
1144 req->request_type);
1145 return -EPERM;
1148 if (trusted_apps_only && session->user_session) {
1149 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1150 req->request_type);
1151 return -EPERM;
1154 return 0;
1157 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1158 struct vbg_session *session, void *data)
1160 struct vbg_ioctl_hdr *hdr = data;
1161 int ret;
1163 if (hdr->size_in != hdr->size_out)
1164 return -EINVAL;
1166 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1167 return -E2BIG;
1169 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1170 return -EINVAL;
1172 ret = vbg_req_allowed(gdev, session, data);
1173 if (ret < 0)
1174 return ret;
1176 vbg_req_perform(gdev, data);
1177 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1179 return 0;
1182 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1183 struct vbg_session *session,
1184 struct vbg_ioctl_hgcm_connect *conn)
1186 u32 client_id;
1187 int i, ret;
1189 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1190 return -EINVAL;
1192 /* Find a free place in the sessions clients array and claim it */
1193 mutex_lock(&gdev->session_mutex);
1194 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1195 if (!session->hgcm_client_ids[i]) {
1196 session->hgcm_client_ids[i] = U32_MAX;
1197 break;
1200 mutex_unlock(&gdev->session_mutex);
1202 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1203 return -EMFILE;
1205 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
1206 &conn->hdr.rc);
1208 mutex_lock(&gdev->session_mutex);
1209 if (ret == 0 && conn->hdr.rc >= 0) {
1210 conn->u.out.client_id = client_id;
1211 session->hgcm_client_ids[i] = client_id;
1212 } else {
1213 conn->u.out.client_id = 0;
1214 session->hgcm_client_ids[i] = 0;
1216 mutex_unlock(&gdev->session_mutex);
1218 return ret;
1221 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1222 struct vbg_session *session,
1223 struct vbg_ioctl_hgcm_disconnect *disconn)
1225 u32 client_id;
1226 int i, ret;
1228 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1229 return -EINVAL;
1231 client_id = disconn->u.in.client_id;
1232 if (client_id == 0 || client_id == U32_MAX)
1233 return -EINVAL;
1235 mutex_lock(&gdev->session_mutex);
1236 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1237 if (session->hgcm_client_ids[i] == client_id) {
1238 session->hgcm_client_ids[i] = U32_MAX;
1239 break;
1242 mutex_unlock(&gdev->session_mutex);
1244 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1245 return -EINVAL;
1247 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
1249 mutex_lock(&gdev->session_mutex);
1250 if (ret == 0 && disconn->hdr.rc >= 0)
1251 session->hgcm_client_ids[i] = 0;
1252 else
1253 session->hgcm_client_ids[i] = client_id;
1254 mutex_unlock(&gdev->session_mutex);
1256 return ret;
1259 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1260 struct vbg_session *session, bool f32bit,
1261 struct vbg_ioctl_hgcm_call *call)
1263 size_t actual_size;
1264 u32 client_id;
1265 int i, ret;
1267 if (call->hdr.size_in < sizeof(*call))
1268 return -EINVAL;
1270 if (call->hdr.size_in != call->hdr.size_out)
1271 return -EINVAL;
1273 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1274 return -E2BIG;
1276 client_id = call->client_id;
1277 if (client_id == 0 || client_id == U32_MAX)
1278 return -EINVAL;
1280 actual_size = sizeof(*call);
1281 if (f32bit)
1282 actual_size += call->parm_count *
1283 sizeof(struct vmmdev_hgcm_function_parameter32);
1284 else
1285 actual_size += call->parm_count *
1286 sizeof(struct vmmdev_hgcm_function_parameter);
1287 if (call->hdr.size_in < actual_size) {
1288 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1289 call->hdr.size_in, actual_size);
1290 return -EINVAL;
1292 call->hdr.size_out = actual_size;
1295 * Validate the client id.
1297 mutex_lock(&gdev->session_mutex);
1298 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1299 if (session->hgcm_client_ids[i] == client_id)
1300 break;
1301 mutex_unlock(&gdev->session_mutex);
1302 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1303 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1304 client_id);
1305 return -EINVAL;
1308 if (f32bit)
1309 ret = vbg_hgcm_call32(gdev, client_id,
1310 call->function, call->timeout_ms,
1311 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1312 call->parm_count, &call->hdr.rc);
1313 else
1314 ret = vbg_hgcm_call(gdev, client_id,
1315 call->function, call->timeout_ms,
1316 VBG_IOCTL_HGCM_CALL_PARMS(call),
1317 call->parm_count, &call->hdr.rc);
1319 if (ret == -E2BIG) {
1320 /* E2BIG needs to be reported through the hdr.rc field. */
1321 call->hdr.rc = VERR_OUT_OF_RANGE;
1322 ret = 0;
1325 if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1326 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1328 return ret;
1331 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1333 if (log->hdr.size_out != sizeof(log->hdr))
1334 return -EINVAL;
1336 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1337 log->u.in.msg);
1339 return 0;
1342 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1343 struct vbg_session *session,
1344 struct vbg_ioctl_change_filter *filter)
1346 u32 or_mask, not_mask;
1348 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1349 return -EINVAL;
1351 or_mask = filter->u.in.or_mask;
1352 not_mask = filter->u.in.not_mask;
1354 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1355 return -EINVAL;
1357 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1358 false);
1361 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1362 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1364 u32 or_mask, not_mask;
1365 int ret;
1367 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1368 return -EINVAL;
1370 or_mask = caps->u.in.or_mask;
1371 not_mask = caps->u.in.not_mask;
1373 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1374 return -EINVAL;
1376 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1377 false);
1378 if (ret)
1379 return ret;
1381 caps->u.out.session_caps = session->guest_caps;
1382 caps->u.out.global_caps = gdev->guest_caps_host;
1384 return 0;
1387 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1388 struct vbg_ioctl_check_balloon *balloon_info)
1390 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1391 return -EINVAL;
1393 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1395 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1396 * events entirely in the kernel, see vbg_core_isr().
1398 balloon_info->u.out.handle_in_r3 = false;
1400 return 0;
1403 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1404 struct vbg_ioctl_write_coredump *dump)
1406 struct vmmdev_write_core_dump *req;
1408 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1409 return -EINVAL;
1411 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
1412 if (!req)
1413 return -ENOMEM;
1415 req->flags = dump->u.in.flags;
1416 dump->hdr.rc = vbg_req_perform(gdev, req);
1418 kfree(req);
1419 return 0;
1423 * Common IOCtl for user to kernel communication.
1424 * Return: 0 or negative errno value.
1425 * @session: The client session.
1426 * @req: The requested function.
1427 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1429 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1431 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1432 struct vbg_dev *gdev = session->gdev;
1433 struct vbg_ioctl_hdr *hdr = data;
1434 bool f32bit = false;
1436 hdr->rc = VINF_SUCCESS;
1437 if (!hdr->size_out)
1438 hdr->size_out = hdr->size_in;
1441 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1442 * already checked by vbg_misc_device_ioctl().
1445 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1446 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1447 req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
1448 return vbg_ioctl_vmmrequest(gdev, session, data);
1450 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1451 return -EINVAL;
1453 /* Fixed size requests. */
1454 switch (req) {
1455 case VBG_IOCTL_DRIVER_VERSION_INFO:
1456 return vbg_ioctl_driver_version_info(data);
1457 case VBG_IOCTL_HGCM_CONNECT:
1458 return vbg_ioctl_hgcm_connect(gdev, session, data);
1459 case VBG_IOCTL_HGCM_DISCONNECT:
1460 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1461 case VBG_IOCTL_WAIT_FOR_EVENTS:
1462 return vbg_ioctl_wait_for_events(gdev, session, data);
1463 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1464 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1465 case VBG_IOCTL_CHANGE_FILTER_MASK:
1466 return vbg_ioctl_change_filter_mask(gdev, session, data);
1467 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1468 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1469 case VBG_IOCTL_CHECK_BALLOON:
1470 return vbg_ioctl_check_balloon(gdev, data);
1471 case VBG_IOCTL_WRITE_CORE_DUMP:
1472 return vbg_ioctl_write_core_dump(gdev, data);
1475 /* Variable sized requests. */
1476 switch (req_no_size) {
1477 #ifdef CONFIG_COMPAT
1478 case VBG_IOCTL_HGCM_CALL_32(0):
1479 f32bit = true;
1480 /* Fall through */
1481 #endif
1482 case VBG_IOCTL_HGCM_CALL(0):
1483 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1484 case VBG_IOCTL_LOG(0):
1485 return vbg_ioctl_log(data);
1488 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1489 return -ENOTTY;
1493 * Report guest supported mouse-features to the host.
1495 * Return: 0 or negative errno value.
1496 * @gdev: The Guest extension device.
1497 * @features: The set of features to report to the host.
1499 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1501 struct vmmdev_mouse_status *req;
1502 int rc;
1504 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
1505 if (!req)
1506 return -ENOMEM;
1508 req->mouse_features = features;
1509 req->pointer_pos_x = 0;
1510 req->pointer_pos_y = 0;
1512 rc = vbg_req_perform(gdev, req);
1513 if (rc < 0)
1514 vbg_err("%s error, rc: %d\n", __func__, rc);
1516 kfree(req);
1517 return vbg_status_code_to_errno(rc);
1520 /** Core interrupt service routine. */
1521 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1523 struct vbg_dev *gdev = dev_id;
1524 struct vmmdev_events *req = gdev->ack_events_req;
1525 bool mouse_position_changed = false;
1526 unsigned long flags;
1527 u32 events = 0;
1528 int rc;
1530 if (!gdev->mmio->V.V1_04.have_events)
1531 return IRQ_NONE;
1533 /* Get and acknowlegde events. */
1534 req->header.rc = VERR_INTERNAL_ERROR;
1535 req->events = 0;
1536 rc = vbg_req_perform(gdev, req);
1537 if (rc < 0) {
1538 vbg_err("Error performing events req, rc: %d\n", rc);
1539 return IRQ_NONE;
1542 events = req->events;
1544 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1545 mouse_position_changed = true;
1546 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1549 if (events & VMMDEV_EVENT_HGCM) {
1550 wake_up(&gdev->hgcm_wq);
1551 events &= ~VMMDEV_EVENT_HGCM;
1554 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1555 schedule_work(&gdev->mem_balloon.work);
1556 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1559 if (events) {
1560 spin_lock_irqsave(&gdev->event_spinlock, flags);
1561 gdev->pending_events |= events;
1562 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1564 wake_up(&gdev->event_wq);
1567 if (mouse_position_changed)
1568 vbg_linux_mouse_event(gdev);
1570 return IRQ_HANDLED;