treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / virt / vboxguest / vboxguest_core.c
blobd823d558c0c4315a8aaae146c623a721861f6846
1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2 /*
3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
5 * Copyright (C) 2007-2016 Oracle Corporation
6 */
8 #include <linux/device.h>
9 #include <linux/io.h>
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/sizes.h>
13 #include <linux/slab.h>
14 #include <linux/vbox_err.h>
15 #include <linux/vbox_utils.h>
16 #include <linux/vmalloc.h>
17 #include "vboxguest_core.h"
18 #include "vboxguest_version.h"
20 /* Get the pointer to the first HGCM parameter. */
21 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \
22 ((struct vmmdev_hgcm_function_parameter *)( \
23 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
24 /* Get the pointer to the first HGCM parameter in a 32-bit request. */
25 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
26 ((struct vmmdev_hgcm_function_parameter32 *)( \
27 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
29 #define GUEST_MAPPINGS_TRIES 5
31 #define VBG_KERNEL_REQUEST \
32 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
33 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
35 /**
36 * Reserves memory in which the VMM can relocate any guest mappings
37 * that are floating around.
39 * This operation is a little bit tricky since the VMM might not accept
40 * just any address because of address clashes between the three contexts
41 * it operates in, so we try several times.
43 * Failure to reserve the guest mappings is ignored.
45 * @gdev: The Guest extension device.
47 static void vbg_guest_mappings_init(struct vbg_dev *gdev)
49 struct vmmdev_hypervisorinfo *req;
50 void *guest_mappings[GUEST_MAPPINGS_TRIES];
51 struct page **pages = NULL;
52 u32 size, hypervisor_size;
53 int i, rc;
55 /* Query the required space. */
56 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
57 VBG_KERNEL_REQUEST);
58 if (!req)
59 return;
61 req->hypervisor_start = 0;
62 req->hypervisor_size = 0;
63 rc = vbg_req_perform(gdev, req);
64 if (rc < 0)
65 goto out;
68 * The VMM will report back if there is nothing it wants to map, like
69 * for instance in VT-x and AMD-V mode.
71 if (req->hypervisor_size == 0)
72 goto out;
74 hypervisor_size = req->hypervisor_size;
75 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
76 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
78 pages = kmalloc_array(size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL);
79 if (!pages)
80 goto out;
82 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
83 if (!gdev->guest_mappings_dummy_page)
84 goto out;
86 for (i = 0; i < (size >> PAGE_SHIFT); i++)
87 pages[i] = gdev->guest_mappings_dummy_page;
90 * Try several times, the VMM might not accept some addresses because
91 * of address clashes between the three contexts.
93 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
94 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
95 VM_MAP, PAGE_KERNEL_RO);
96 if (!guest_mappings[i])
97 break;
99 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
100 req->header.rc = VERR_INTERNAL_ERROR;
101 req->hypervisor_size = hypervisor_size;
102 req->hypervisor_start =
103 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
105 rc = vbg_req_perform(gdev, req);
106 if (rc >= 0) {
107 gdev->guest_mappings = guest_mappings[i];
108 break;
112 /* Free vmap's from failed attempts. */
113 while (--i >= 0)
114 vunmap(guest_mappings[i]);
116 /* On failure free the dummy-page backing the vmap */
117 if (!gdev->guest_mappings) {
118 __free_page(gdev->guest_mappings_dummy_page);
119 gdev->guest_mappings_dummy_page = NULL;
122 out:
123 vbg_req_free(req, sizeof(*req));
124 kfree(pages);
128 * Undo what vbg_guest_mappings_init did.
130 * @gdev: The Guest extension device.
132 static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
134 struct vmmdev_hypervisorinfo *req;
135 int rc;
137 if (!gdev->guest_mappings)
138 return;
141 * Tell the host that we're going to free the memory we reserved for
142 * it, the free it up. (Leak the memory if anything goes wrong here.)
144 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
145 VBG_KERNEL_REQUEST);
146 if (!req)
147 return;
149 req->hypervisor_start = 0;
150 req->hypervisor_size = 0;
152 rc = vbg_req_perform(gdev, req);
154 vbg_req_free(req, sizeof(*req));
156 if (rc < 0) {
157 vbg_err("%s error: %d\n", __func__, rc);
158 return;
161 vunmap(gdev->guest_mappings);
162 gdev->guest_mappings = NULL;
164 __free_page(gdev->guest_mappings_dummy_page);
165 gdev->guest_mappings_dummy_page = NULL;
169 * Report the guest information to the host.
170 * Return: 0 or negative errno value.
171 * @gdev: The Guest extension device.
173 static int vbg_report_guest_info(struct vbg_dev *gdev)
176 * Allocate and fill in the two guest info reports.
178 struct vmmdev_guest_info *req1 = NULL;
179 struct vmmdev_guest_info2 *req2 = NULL;
180 int rc, ret = -ENOMEM;
182 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
183 VBG_KERNEL_REQUEST);
184 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
185 VBG_KERNEL_REQUEST);
186 if (!req1 || !req2)
187 goto out_free;
189 req1->interface_version = VMMDEV_VERSION;
190 req1->os_type = VMMDEV_OSTYPE_LINUX26;
191 #if __BITS_PER_LONG == 64
192 req1->os_type |= VMMDEV_OSTYPE_X64;
193 #endif
195 req2->additions_major = VBG_VERSION_MAJOR;
196 req2->additions_minor = VBG_VERSION_MINOR;
197 req2->additions_build = VBG_VERSION_BUILD;
198 req2->additions_revision = VBG_SVN_REV;
199 req2->additions_features =
200 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
201 strlcpy(req2->name, VBG_VERSION_STRING,
202 sizeof(req2->name));
205 * There are two protocols here:
206 * 1. INFO2 + INFO1. Supported by >=3.2.51.
207 * 2. INFO1 and optionally INFO2. The old protocol.
209 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
210 * if not supported by the VMMDev (message ordering requirement).
212 rc = vbg_req_perform(gdev, req2);
213 if (rc >= 0) {
214 rc = vbg_req_perform(gdev, req1);
215 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
216 rc = vbg_req_perform(gdev, req1);
217 if (rc >= 0) {
218 rc = vbg_req_perform(gdev, req2);
219 if (rc == VERR_NOT_IMPLEMENTED)
220 rc = VINF_SUCCESS;
223 ret = vbg_status_code_to_errno(rc);
225 out_free:
226 vbg_req_free(req2, sizeof(*req2));
227 vbg_req_free(req1, sizeof(*req1));
228 return ret;
232 * Report the guest driver status to the host.
233 * Return: 0 or negative errno value.
234 * @gdev: The Guest extension device.
235 * @active: Flag whether the driver is now active or not.
237 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
239 struct vmmdev_guest_status *req;
240 int rc;
242 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
243 VBG_KERNEL_REQUEST);
244 if (!req)
245 return -ENOMEM;
247 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
248 if (active)
249 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
250 else
251 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
252 req->flags = 0;
254 rc = vbg_req_perform(gdev, req);
255 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
256 rc = VINF_SUCCESS;
258 vbg_req_free(req, sizeof(*req));
260 return vbg_status_code_to_errno(rc);
264 * Inflate the balloon by one chunk. The caller owns the balloon mutex.
265 * Return: 0 or negative errno value.
266 * @gdev: The Guest extension device.
267 * @chunk_idx: Index of the chunk.
269 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
271 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
272 struct page **pages;
273 int i, rc, ret;
275 pages = kmalloc_array(VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
276 sizeof(*pages),
277 GFP_KERNEL | __GFP_NOWARN);
278 if (!pages)
279 return -ENOMEM;
281 req->header.size = sizeof(*req);
282 req->inflate = true;
283 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
285 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
286 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
287 if (!pages[i]) {
288 ret = -ENOMEM;
289 goto out_error;
292 req->phys_page[i] = page_to_phys(pages[i]);
295 rc = vbg_req_perform(gdev, req);
296 if (rc < 0) {
297 vbg_err("%s error, rc: %d\n", __func__, rc);
298 ret = vbg_status_code_to_errno(rc);
299 goto out_error;
302 gdev->mem_balloon.pages[chunk_idx] = pages;
304 return 0;
306 out_error:
307 while (--i >= 0)
308 __free_page(pages[i]);
309 kfree(pages);
311 return ret;
315 * Deflate the balloon by one chunk. The caller owns the balloon mutex.
316 * Return: 0 or negative errno value.
317 * @gdev: The Guest extension device.
318 * @chunk_idx: Index of the chunk.
320 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
322 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
323 struct page **pages = gdev->mem_balloon.pages[chunk_idx];
324 int i, rc;
326 req->header.size = sizeof(*req);
327 req->inflate = false;
328 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
330 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
331 req->phys_page[i] = page_to_phys(pages[i]);
333 rc = vbg_req_perform(gdev, req);
334 if (rc < 0) {
335 vbg_err("%s error, rc: %d\n", __func__, rc);
336 return vbg_status_code_to_errno(rc);
339 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
340 __free_page(pages[i]);
341 kfree(pages);
342 gdev->mem_balloon.pages[chunk_idx] = NULL;
344 return 0;
348 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
349 * the host wants the balloon to be and adjust accordingly.
351 static void vbg_balloon_work(struct work_struct *work)
353 struct vbg_dev *gdev =
354 container_of(work, struct vbg_dev, mem_balloon.work);
355 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
356 u32 i, chunks;
357 int rc, ret;
360 * Setting this bit means that we request the value from the host and
361 * change the guest memory balloon according to the returned value.
363 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
364 rc = vbg_req_perform(gdev, req);
365 if (rc < 0) {
366 vbg_err("%s error, rc: %d)\n", __func__, rc);
367 return;
371 * The host always returns the same maximum amount of chunks, so
372 * we do this once.
374 if (!gdev->mem_balloon.max_chunks) {
375 gdev->mem_balloon.pages =
376 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
377 sizeof(struct page **), GFP_KERNEL);
378 if (!gdev->mem_balloon.pages)
379 return;
381 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
384 chunks = req->balloon_chunks;
385 if (chunks > gdev->mem_balloon.max_chunks) {
386 vbg_err("%s: illegal balloon size %u (max=%u)\n",
387 __func__, chunks, gdev->mem_balloon.max_chunks);
388 return;
391 if (chunks > gdev->mem_balloon.chunks) {
392 /* inflate */
393 for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
394 ret = vbg_balloon_inflate(gdev, i);
395 if (ret < 0)
396 return;
398 gdev->mem_balloon.chunks++;
400 } else {
401 /* deflate */
402 for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
403 ret = vbg_balloon_deflate(gdev, i);
404 if (ret < 0)
405 return;
407 gdev->mem_balloon.chunks--;
413 * Callback for heartbeat timer.
415 static void vbg_heartbeat_timer(struct timer_list *t)
417 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
419 vbg_req_perform(gdev, gdev->guest_heartbeat_req);
420 mod_timer(&gdev->heartbeat_timer,
421 msecs_to_jiffies(gdev->heartbeat_interval_ms));
425 * Configure the host to check guest's heartbeat
426 * and get heartbeat interval from the host.
427 * Return: 0 or negative errno value.
428 * @gdev: The Guest extension device.
429 * @enabled: Set true to enable guest heartbeat checks on host.
431 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
433 struct vmmdev_heartbeat *req;
434 int rc;
436 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
437 VBG_KERNEL_REQUEST);
438 if (!req)
439 return -ENOMEM;
441 req->enabled = enabled;
442 req->interval_ns = 0;
443 rc = vbg_req_perform(gdev, req);
444 do_div(req->interval_ns, 1000000); /* ns -> ms */
445 gdev->heartbeat_interval_ms = req->interval_ns;
446 vbg_req_free(req, sizeof(*req));
448 return vbg_status_code_to_errno(rc);
452 * Initializes the heartbeat timer. This feature may be disabled by the host.
453 * Return: 0 or negative errno value.
454 * @gdev: The Guest extension device.
456 static int vbg_heartbeat_init(struct vbg_dev *gdev)
458 int ret;
460 /* Make sure that heartbeat checking is disabled if we fail. */
461 ret = vbg_heartbeat_host_config(gdev, false);
462 if (ret < 0)
463 return ret;
465 ret = vbg_heartbeat_host_config(gdev, true);
466 if (ret < 0)
467 return ret;
469 gdev->guest_heartbeat_req = vbg_req_alloc(
470 sizeof(*gdev->guest_heartbeat_req),
471 VMMDEVREQ_GUEST_HEARTBEAT,
472 VBG_KERNEL_REQUEST);
473 if (!gdev->guest_heartbeat_req)
474 return -ENOMEM;
476 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
477 __func__, gdev->heartbeat_interval_ms);
478 mod_timer(&gdev->heartbeat_timer, 0);
480 return 0;
484 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
485 * @gdev: The Guest extension device.
487 static void vbg_heartbeat_exit(struct vbg_dev *gdev)
489 del_timer_sync(&gdev->heartbeat_timer);
490 vbg_heartbeat_host_config(gdev, false);
491 vbg_req_free(gdev->guest_heartbeat_req,
492 sizeof(*gdev->guest_heartbeat_req));
496 * Applies a change to the bit usage tracker.
497 * Return: true if the mask changed, false if not.
498 * @tracker: The bit usage tracker.
499 * @changed: The bits to change.
500 * @previous: The previous value of the bits.
502 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
503 u32 changed, u32 previous)
505 bool global_change = false;
507 while (changed) {
508 u32 bit = ffs(changed) - 1;
509 u32 bitmask = BIT(bit);
511 if (bitmask & previous) {
512 tracker->per_bit_usage[bit] -= 1;
513 if (tracker->per_bit_usage[bit] == 0) {
514 global_change = true;
515 tracker->mask &= ~bitmask;
517 } else {
518 tracker->per_bit_usage[bit] += 1;
519 if (tracker->per_bit_usage[bit] == 1) {
520 global_change = true;
521 tracker->mask |= bitmask;
525 changed &= ~bitmask;
528 return global_change;
532 * Init and termination worker for resetting the (host) event filter on the host
533 * Return: 0 or negative errno value.
534 * @gdev: The Guest extension device.
535 * @fixed_events: Fixed events (init time).
537 static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
538 u32 fixed_events)
540 struct vmmdev_mask *req;
541 int rc;
543 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
544 VBG_KERNEL_REQUEST);
545 if (!req)
546 return -ENOMEM;
548 req->not_mask = U32_MAX & ~fixed_events;
549 req->or_mask = fixed_events;
550 rc = vbg_req_perform(gdev, req);
551 if (rc < 0)
552 vbg_err("%s error, rc: %d\n", __func__, rc);
554 vbg_req_free(req, sizeof(*req));
555 return vbg_status_code_to_errno(rc);
559 * Changes the event filter mask for the given session.
561 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
562 * do session cleanup. Takes the session spinlock.
564 * Return: 0 or negative errno value.
565 * @gdev: The Guest extension device.
566 * @session: The session.
567 * @or_mask: The events to add.
568 * @not_mask: The events to remove.
569 * @session_termination: Set if we're called by the session cleanup code.
570 * This tweaks the error handling so we perform
571 * proper session cleanup even if the host
572 * misbehaves.
574 static int vbg_set_session_event_filter(struct vbg_dev *gdev,
575 struct vbg_session *session,
576 u32 or_mask, u32 not_mask,
577 bool session_termination)
579 struct vmmdev_mask *req;
580 u32 changed, previous;
581 int rc, ret = 0;
584 * Allocate a request buffer before taking the spinlock, when
585 * the session is being terminated the requestor is the kernel,
586 * as we're cleaning up.
588 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
589 session_termination ? VBG_KERNEL_REQUEST :
590 session->requestor);
591 if (!req) {
592 if (!session_termination)
593 return -ENOMEM;
594 /* Ignore allocation failure, we must do session cleanup. */
597 mutex_lock(&gdev->session_mutex);
599 /* Apply the changes to the session mask. */
600 previous = session->event_filter;
601 session->event_filter |= or_mask;
602 session->event_filter &= ~not_mask;
604 /* If anything actually changed, update the global usage counters. */
605 changed = previous ^ session->event_filter;
606 if (!changed)
607 goto out;
609 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
610 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
612 if (gdev->event_filter_host == or_mask || !req)
613 goto out;
615 gdev->event_filter_host = or_mask;
616 req->or_mask = or_mask;
617 req->not_mask = ~or_mask;
618 rc = vbg_req_perform(gdev, req);
619 if (rc < 0) {
620 ret = vbg_status_code_to_errno(rc);
622 /* Failed, roll back (unless it's session termination time). */
623 gdev->event_filter_host = U32_MAX;
624 if (session_termination)
625 goto out;
627 vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
628 session->event_filter);
629 session->event_filter = previous;
632 out:
633 mutex_unlock(&gdev->session_mutex);
634 vbg_req_free(req, sizeof(*req));
636 return ret;
640 * Init and termination worker for set guest capabilities to zero on the host.
641 * Return: 0 or negative errno value.
642 * @gdev: The Guest extension device.
644 static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
646 struct vmmdev_mask *req;
647 int rc;
649 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
650 VBG_KERNEL_REQUEST);
651 if (!req)
652 return -ENOMEM;
654 req->not_mask = U32_MAX;
655 req->or_mask = 0;
656 rc = vbg_req_perform(gdev, req);
657 if (rc < 0)
658 vbg_err("%s error, rc: %d\n", __func__, rc);
660 vbg_req_free(req, sizeof(*req));
661 return vbg_status_code_to_errno(rc);
665 * Sets the guest capabilities for a session. Takes the session spinlock.
666 * Return: 0 or negative errno value.
667 * @gdev: The Guest extension device.
668 * @session: The session.
669 * @or_mask: The capabilities to add.
670 * @not_mask: The capabilities to remove.
671 * @session_termination: Set if we're called by the session cleanup code.
672 * This tweaks the error handling so we perform
673 * proper session cleanup even if the host
674 * misbehaves.
676 static int vbg_set_session_capabilities(struct vbg_dev *gdev,
677 struct vbg_session *session,
678 u32 or_mask, u32 not_mask,
679 bool session_termination)
681 struct vmmdev_mask *req;
682 u32 changed, previous;
683 int rc, ret = 0;
686 * Allocate a request buffer before taking the spinlock, when
687 * the session is being terminated the requestor is the kernel,
688 * as we're cleaning up.
690 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
691 session_termination ? VBG_KERNEL_REQUEST :
692 session->requestor);
693 if (!req) {
694 if (!session_termination)
695 return -ENOMEM;
696 /* Ignore allocation failure, we must do session cleanup. */
699 mutex_lock(&gdev->session_mutex);
701 /* Apply the changes to the session mask. */
702 previous = session->guest_caps;
703 session->guest_caps |= or_mask;
704 session->guest_caps &= ~not_mask;
706 /* If anything actually changed, update the global usage counters. */
707 changed = previous ^ session->guest_caps;
708 if (!changed)
709 goto out;
711 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
712 or_mask = gdev->guest_caps_tracker.mask;
714 if (gdev->guest_caps_host == or_mask || !req)
715 goto out;
717 gdev->guest_caps_host = or_mask;
718 req->or_mask = or_mask;
719 req->not_mask = ~or_mask;
720 rc = vbg_req_perform(gdev, req);
721 if (rc < 0) {
722 ret = vbg_status_code_to_errno(rc);
724 /* Failed, roll back (unless it's session termination time). */
725 gdev->guest_caps_host = U32_MAX;
726 if (session_termination)
727 goto out;
729 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
730 session->guest_caps);
731 session->guest_caps = previous;
734 out:
735 mutex_unlock(&gdev->session_mutex);
736 vbg_req_free(req, sizeof(*req));
738 return ret;
742 * vbg_query_host_version get the host feature mask and version information.
743 * Return: 0 or negative errno value.
744 * @gdev: The Guest extension device.
746 static int vbg_query_host_version(struct vbg_dev *gdev)
748 struct vmmdev_host_version *req;
749 int rc, ret;
751 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
752 VBG_KERNEL_REQUEST);
753 if (!req)
754 return -ENOMEM;
756 rc = vbg_req_perform(gdev, req);
757 ret = vbg_status_code_to_errno(rc);
758 if (ret) {
759 vbg_err("%s error: %d\n", __func__, rc);
760 goto out;
763 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
764 req->major, req->minor, req->build, req->revision);
765 gdev->host_features = req->features;
767 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
768 gdev->host_features);
770 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
771 vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
772 ret = -ENODEV;
775 out:
776 vbg_req_free(req, sizeof(*req));
777 return ret;
781 * Initializes the VBoxGuest device extension when the
782 * device driver is loaded.
784 * The native code locates the VMMDev on the PCI bus and retrieve
785 * the MMIO and I/O port ranges, this function will take care of
786 * mapping the MMIO memory (if present). Upon successful return
787 * the native code should set up the interrupt handler.
789 * Return: 0 or negative errno value.
791 * @gdev: The Guest extension device.
792 * @fixed_events: Events that will be enabled upon init and no client
793 * will ever be allowed to mask.
795 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
797 int ret = -ENOMEM;
799 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
800 gdev->event_filter_host = U32_MAX; /* forces a report */
801 gdev->guest_caps_host = U32_MAX; /* forces a report */
803 init_waitqueue_head(&gdev->event_wq);
804 init_waitqueue_head(&gdev->hgcm_wq);
805 spin_lock_init(&gdev->event_spinlock);
806 mutex_init(&gdev->session_mutex);
807 mutex_init(&gdev->cancel_req_mutex);
808 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
809 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
811 gdev->mem_balloon.get_req =
812 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
813 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
814 VBG_KERNEL_REQUEST);
815 gdev->mem_balloon.change_req =
816 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
817 VMMDEVREQ_CHANGE_MEMBALLOON,
818 VBG_KERNEL_REQUEST);
819 gdev->cancel_req =
820 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
821 VMMDEVREQ_HGCM_CANCEL2,
822 VBG_KERNEL_REQUEST);
823 gdev->ack_events_req =
824 vbg_req_alloc(sizeof(*gdev->ack_events_req),
825 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
826 VBG_KERNEL_REQUEST);
827 gdev->mouse_status_req =
828 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
829 VMMDEVREQ_GET_MOUSE_STATUS,
830 VBG_KERNEL_REQUEST);
832 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
833 !gdev->cancel_req || !gdev->ack_events_req ||
834 !gdev->mouse_status_req)
835 goto err_free_reqs;
837 ret = vbg_query_host_version(gdev);
838 if (ret)
839 goto err_free_reqs;
841 ret = vbg_report_guest_info(gdev);
842 if (ret) {
843 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
844 goto err_free_reqs;
847 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
848 if (ret) {
849 vbg_err("vboxguest: Error setting fixed event filter: %d\n",
850 ret);
851 goto err_free_reqs;
854 ret = vbg_reset_host_capabilities(gdev);
855 if (ret) {
856 vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
857 ret);
858 goto err_free_reqs;
861 ret = vbg_core_set_mouse_status(gdev, 0);
862 if (ret) {
863 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
864 goto err_free_reqs;
867 /* These may fail without requiring the driver init to fail. */
868 vbg_guest_mappings_init(gdev);
869 vbg_heartbeat_init(gdev);
871 /* All Done! */
872 ret = vbg_report_driver_status(gdev, true);
873 if (ret < 0)
874 vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
876 return 0;
878 err_free_reqs:
879 vbg_req_free(gdev->mouse_status_req,
880 sizeof(*gdev->mouse_status_req));
881 vbg_req_free(gdev->ack_events_req,
882 sizeof(*gdev->ack_events_req));
883 vbg_req_free(gdev->cancel_req,
884 sizeof(*gdev->cancel_req));
885 vbg_req_free(gdev->mem_balloon.change_req,
886 sizeof(*gdev->mem_balloon.change_req));
887 vbg_req_free(gdev->mem_balloon.get_req,
888 sizeof(*gdev->mem_balloon.get_req));
889 return ret;
893 * Call this on exit to clean-up vboxguest-core managed resources.
895 * The native code should call this before the driver is loaded,
896 * but don't call this on shutdown.
897 * @gdev: The Guest extension device.
899 void vbg_core_exit(struct vbg_dev *gdev)
901 vbg_heartbeat_exit(gdev);
902 vbg_guest_mappings_exit(gdev);
904 /* Clear the host flags (mouse status etc). */
905 vbg_reset_host_event_filter(gdev, 0);
906 vbg_reset_host_capabilities(gdev);
907 vbg_core_set_mouse_status(gdev, 0);
909 vbg_req_free(gdev->mouse_status_req,
910 sizeof(*gdev->mouse_status_req));
911 vbg_req_free(gdev->ack_events_req,
912 sizeof(*gdev->ack_events_req));
913 vbg_req_free(gdev->cancel_req,
914 sizeof(*gdev->cancel_req));
915 vbg_req_free(gdev->mem_balloon.change_req,
916 sizeof(*gdev->mem_balloon.change_req));
917 vbg_req_free(gdev->mem_balloon.get_req,
918 sizeof(*gdev->mem_balloon.get_req));
922 * Creates a VBoxGuest user session.
924 * vboxguest_linux.c calls this when userspace opens the char-device.
925 * Return: A pointer to the new session or an ERR_PTR on error.
926 * @gdev: The Guest extension device.
927 * @requestor: VMMDEV_REQUESTOR_* flags
929 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
931 struct vbg_session *session;
933 session = kzalloc(sizeof(*session), GFP_KERNEL);
934 if (!session)
935 return ERR_PTR(-ENOMEM);
937 session->gdev = gdev;
938 session->requestor = requestor;
940 return session;
944 * Closes a VBoxGuest session.
945 * @session: The session to close (and free).
947 void vbg_core_close_session(struct vbg_session *session)
949 struct vbg_dev *gdev = session->gdev;
950 int i, rc;
952 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
953 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
955 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
956 if (!session->hgcm_client_ids[i])
957 continue;
959 /* requestor is kernel here, as we're cleaning up. */
960 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
961 session->hgcm_client_ids[i], &rc);
964 kfree(session);
967 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
968 size_t out_size)
970 if (hdr->size_in != (sizeof(*hdr) + in_size) ||
971 hdr->size_out != (sizeof(*hdr) + out_size))
972 return -EINVAL;
974 return 0;
977 static int vbg_ioctl_driver_version_info(
978 struct vbg_ioctl_driver_version_info *info)
980 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
981 u16 min_maj_version, req_maj_version;
983 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
984 return -EINVAL;
986 req_maj_version = info->u.in.req_version >> 16;
987 min_maj_version = info->u.in.min_version >> 16;
989 if (info->u.in.min_version > info->u.in.req_version ||
990 min_maj_version != req_maj_version)
991 return -EINVAL;
993 if (info->u.in.min_version <= VBG_IOC_VERSION &&
994 min_maj_version == vbg_maj_version) {
995 info->u.out.session_version = VBG_IOC_VERSION;
996 } else {
997 info->u.out.session_version = U32_MAX;
998 info->hdr.rc = VERR_VERSION_MISMATCH;
1001 info->u.out.driver_version = VBG_IOC_VERSION;
1002 info->u.out.driver_revision = 0;
1003 info->u.out.reserved1 = 0;
1004 info->u.out.reserved2 = 0;
1006 return 0;
1009 static bool vbg_wait_event_cond(struct vbg_dev *gdev,
1010 struct vbg_session *session,
1011 u32 event_mask)
1013 unsigned long flags;
1014 bool wakeup;
1015 u32 events;
1017 spin_lock_irqsave(&gdev->event_spinlock, flags);
1019 events = gdev->pending_events & event_mask;
1020 wakeup = events || session->cancel_waiters;
1022 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1024 return wakeup;
1027 /* Must be called with the event_lock held */
1028 static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
1029 struct vbg_session *session,
1030 u32 event_mask)
1032 u32 events = gdev->pending_events & event_mask;
1034 gdev->pending_events &= ~events;
1035 return events;
1038 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
1039 struct vbg_session *session,
1040 struct vbg_ioctl_wait_for_events *wait)
1042 u32 timeout_ms = wait->u.in.timeout_ms;
1043 u32 event_mask = wait->u.in.events;
1044 unsigned long flags;
1045 long timeout;
1046 int ret = 0;
1048 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
1049 return -EINVAL;
1051 if (timeout_ms == U32_MAX)
1052 timeout = MAX_SCHEDULE_TIMEOUT;
1053 else
1054 timeout = msecs_to_jiffies(timeout_ms);
1056 wait->u.out.events = 0;
1057 do {
1058 timeout = wait_event_interruptible_timeout(
1059 gdev->event_wq,
1060 vbg_wait_event_cond(gdev, session, event_mask),
1061 timeout);
1063 spin_lock_irqsave(&gdev->event_spinlock, flags);
1065 if (timeout < 0 || session->cancel_waiters) {
1066 ret = -EINTR;
1067 } else if (timeout == 0) {
1068 ret = -ETIMEDOUT;
1069 } else {
1070 wait->u.out.events =
1071 vbg_consume_events_locked(gdev, session, event_mask);
1074 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1077 * Someone else may have consumed the event(s) first, in
1078 * which case we go back to waiting.
1080 } while (ret == 0 && wait->u.out.events == 0);
1082 return ret;
1085 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
1086 struct vbg_session *session,
1087 struct vbg_ioctl_hdr *hdr)
1089 unsigned long flags;
1091 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
1092 return -EINVAL;
1094 spin_lock_irqsave(&gdev->event_spinlock, flags);
1095 session->cancel_waiters = true;
1096 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1098 wake_up(&gdev->event_wq);
1100 return 0;
1104 * Checks if the VMM request is allowed in the context of the given session.
1105 * Return: 0 or negative errno value.
1106 * @gdev: The Guest extension device.
1107 * @session: The calling session.
1108 * @req: The request.
1110 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1111 const struct vmmdev_request_header *req)
1113 const struct vmmdev_guest_status *guest_status;
1114 bool trusted_apps_only;
1116 switch (req->request_type) {
1117 /* Trusted users apps only. */
1118 case VMMDEVREQ_QUERY_CREDENTIALS:
1119 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
1120 case VMMDEVREQ_REGISTER_SHARED_MODULE:
1121 case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
1122 case VMMDEVREQ_WRITE_COREDUMP:
1123 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
1124 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
1125 case VMMDEVREQ_CHECK_SHARED_MODULES:
1126 case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
1127 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
1128 case VMMDEVREQ_REPORT_GUEST_STATS:
1129 case VMMDEVREQ_REPORT_GUEST_USER_STATE:
1130 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
1131 trusted_apps_only = true;
1132 break;
1134 /* Anyone. */
1135 case VMMDEVREQ_GET_MOUSE_STATUS:
1136 case VMMDEVREQ_SET_MOUSE_STATUS:
1137 case VMMDEVREQ_SET_POINTER_SHAPE:
1138 case VMMDEVREQ_GET_HOST_VERSION:
1139 case VMMDEVREQ_IDLE:
1140 case VMMDEVREQ_GET_HOST_TIME:
1141 case VMMDEVREQ_SET_POWER_STATUS:
1142 case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
1143 case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
1144 case VMMDEVREQ_REPORT_GUEST_STATUS:
1145 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
1146 case VMMDEVREQ_VIDEMODE_SUPPORTED:
1147 case VMMDEVREQ_GET_HEIGHT_REDUCTION:
1148 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
1149 case VMMDEVREQ_VIDEMODE_SUPPORTED2:
1150 case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
1151 case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
1152 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
1153 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
1154 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
1155 case VMMDEVREQ_GET_VRDPCHANGE_REQ:
1156 case VMMDEVREQ_LOG_STRING:
1157 case VMMDEVREQ_GET_SESSION_ID:
1158 trusted_apps_only = false;
1159 break;
1161 /* Depends on the request parameters... */
1162 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
1163 guest_status = (const struct vmmdev_guest_status *)req;
1164 switch (guest_status->facility) {
1165 case VBOXGUEST_FACILITY_TYPE_ALL:
1166 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
1167 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
1168 guest_status->facility);
1169 return -EPERM;
1170 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
1171 trusted_apps_only = true;
1172 break;
1173 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
1174 case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
1175 case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
1176 default:
1177 trusted_apps_only = false;
1178 break;
1180 break;
1182 /* Anything else is not allowed. */
1183 default:
1184 vbg_err("Denying userspace vmm call type %#08x\n",
1185 req->request_type);
1186 return -EPERM;
1189 if (trusted_apps_only &&
1190 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1191 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1192 req->request_type);
1193 return -EPERM;
1196 return 0;
1199 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
1200 struct vbg_session *session, void *data)
1202 struct vbg_ioctl_hdr *hdr = data;
1203 int ret;
1205 if (hdr->size_in != hdr->size_out)
1206 return -EINVAL;
1208 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
1209 return -E2BIG;
1211 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
1212 return -EINVAL;
1214 ret = vbg_req_allowed(gdev, session, data);
1215 if (ret < 0)
1216 return ret;
1218 vbg_req_perform(gdev, data);
1219 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
1221 return 0;
1224 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1225 struct vbg_session *session,
1226 struct vbg_ioctl_hgcm_connect *conn)
1228 u32 client_id;
1229 int i, ret;
1231 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
1232 return -EINVAL;
1234 /* Find a free place in the sessions clients array and claim it */
1235 mutex_lock(&gdev->session_mutex);
1236 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1237 if (!session->hgcm_client_ids[i]) {
1238 session->hgcm_client_ids[i] = U32_MAX;
1239 break;
1242 mutex_unlock(&gdev->session_mutex);
1244 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1245 return -EMFILE;
1247 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1248 &client_id, &conn->hdr.rc);
1250 mutex_lock(&gdev->session_mutex);
1251 if (ret == 0 && conn->hdr.rc >= 0) {
1252 conn->u.out.client_id = client_id;
1253 session->hgcm_client_ids[i] = client_id;
1254 } else {
1255 conn->u.out.client_id = 0;
1256 session->hgcm_client_ids[i] = 0;
1258 mutex_unlock(&gdev->session_mutex);
1260 return ret;
1263 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1264 struct vbg_session *session,
1265 struct vbg_ioctl_hgcm_disconnect *disconn)
1267 u32 client_id;
1268 int i, ret;
1270 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
1271 return -EINVAL;
1273 client_id = disconn->u.in.client_id;
1274 if (client_id == 0 || client_id == U32_MAX)
1275 return -EINVAL;
1277 mutex_lock(&gdev->session_mutex);
1278 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
1279 if (session->hgcm_client_ids[i] == client_id) {
1280 session->hgcm_client_ids[i] = U32_MAX;
1281 break;
1284 mutex_unlock(&gdev->session_mutex);
1286 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1287 return -EINVAL;
1289 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1290 &disconn->hdr.rc);
1292 mutex_lock(&gdev->session_mutex);
1293 if (ret == 0 && disconn->hdr.rc >= 0)
1294 session->hgcm_client_ids[i] = 0;
1295 else
1296 session->hgcm_client_ids[i] = client_id;
1297 mutex_unlock(&gdev->session_mutex);
1299 return ret;
1302 static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1304 switch (type) {
1305 case VMMDEV_HGCM_PARM_TYPE_32BIT:
1306 case VMMDEV_HGCM_PARM_TYPE_64BIT:
1307 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1308 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1309 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1310 return true;
1311 default:
1312 return false;
1316 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1317 struct vbg_session *session, bool f32bit,
1318 struct vbg_ioctl_hgcm_call *call)
1320 size_t actual_size;
1321 u32 client_id;
1322 int i, ret;
1324 if (call->hdr.size_in < sizeof(*call))
1325 return -EINVAL;
1327 if (call->hdr.size_in != call->hdr.size_out)
1328 return -EINVAL;
1330 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
1331 return -E2BIG;
1333 client_id = call->client_id;
1334 if (client_id == 0 || client_id == U32_MAX)
1335 return -EINVAL;
1337 actual_size = sizeof(*call);
1338 if (f32bit)
1339 actual_size += call->parm_count *
1340 sizeof(struct vmmdev_hgcm_function_parameter32);
1341 else
1342 actual_size += call->parm_count *
1343 sizeof(struct vmmdev_hgcm_function_parameter);
1344 if (call->hdr.size_in < actual_size) {
1345 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
1346 call->hdr.size_in, actual_size);
1347 return -EINVAL;
1349 call->hdr.size_out = actual_size;
1351 /* Validate parameter types */
1352 if (f32bit) {
1353 struct vmmdev_hgcm_function_parameter32 *parm =
1354 VBG_IOCTL_HGCM_CALL_PARMS32(call);
1356 for (i = 0; i < call->parm_count; i++)
1357 if (!vbg_param_valid(parm[i].type))
1358 return -EINVAL;
1359 } else {
1360 struct vmmdev_hgcm_function_parameter *parm =
1361 VBG_IOCTL_HGCM_CALL_PARMS(call);
1363 for (i = 0; i < call->parm_count; i++)
1364 if (!vbg_param_valid(parm[i].type))
1365 return -EINVAL;
1369 * Validate the client id.
1371 mutex_lock(&gdev->session_mutex);
1372 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
1373 if (session->hgcm_client_ids[i] == client_id)
1374 break;
1375 mutex_unlock(&gdev->session_mutex);
1376 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
1377 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
1378 client_id);
1379 return -EINVAL;
1382 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1383 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1384 call->function, call->timeout_ms,
1385 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1386 call->parm_count, &call->hdr.rc);
1387 else
1388 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1389 call->function, call->timeout_ms,
1390 VBG_IOCTL_HGCM_CALL_PARMS(call),
1391 call->parm_count, &call->hdr.rc);
1393 if (ret == -E2BIG) {
1394 /* E2BIG needs to be reported through the hdr.rc field. */
1395 call->hdr.rc = VERR_OUT_OF_RANGE;
1396 ret = 0;
1399 if (ret && ret != -EINTR && ret != -ETIMEDOUT)
1400 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
1402 return ret;
1405 static int vbg_ioctl_log(struct vbg_ioctl_log *log)
1407 if (log->hdr.size_out != sizeof(log->hdr))
1408 return -EINVAL;
1410 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
1411 log->u.in.msg);
1413 return 0;
1416 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
1417 struct vbg_session *session,
1418 struct vbg_ioctl_change_filter *filter)
1420 u32 or_mask, not_mask;
1422 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
1423 return -EINVAL;
1425 or_mask = filter->u.in.or_mask;
1426 not_mask = filter->u.in.not_mask;
1428 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1429 return -EINVAL;
1431 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
1432 false);
1435 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
1436 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
1438 u32 or_mask, not_mask;
1439 int ret;
1441 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
1442 return -EINVAL;
1444 or_mask = caps->u.in.or_mask;
1445 not_mask = caps->u.in.not_mask;
1447 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1448 return -EINVAL;
1450 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
1451 false);
1452 if (ret)
1453 return ret;
1455 caps->u.out.session_caps = session->guest_caps;
1456 caps->u.out.global_caps = gdev->guest_caps_host;
1458 return 0;
1461 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1462 struct vbg_ioctl_check_balloon *balloon_info)
1464 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
1465 return -EINVAL;
1467 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
1469 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
1470 * events entirely in the kernel, see vbg_core_isr().
1472 balloon_info->u.out.handle_in_r3 = false;
1474 return 0;
1477 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1478 struct vbg_session *session,
1479 struct vbg_ioctl_write_coredump *dump)
1481 struct vmmdev_write_core_dump *req;
1483 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1484 return -EINVAL;
1486 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1487 session->requestor);
1488 if (!req)
1489 return -ENOMEM;
1491 req->flags = dump->u.in.flags;
1492 dump->hdr.rc = vbg_req_perform(gdev, req);
1494 vbg_req_free(req, sizeof(*req));
1495 return 0;
1499 * Common IOCtl for user to kernel communication.
1500 * Return: 0 or negative errno value.
1501 * @session: The client session.
1502 * @req: The requested function.
1503 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
1505 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1507 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1508 struct vbg_dev *gdev = session->gdev;
1509 struct vbg_ioctl_hdr *hdr = data;
1510 bool f32bit = false;
1512 hdr->rc = VINF_SUCCESS;
1513 if (!hdr->size_out)
1514 hdr->size_out = hdr->size_in;
1517 * hdr->version and hdr->size_in / hdr->size_out minimum size are
1518 * already checked by vbg_misc_device_ioctl().
1521 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
1522 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
1523 req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
1524 return vbg_ioctl_vmmrequest(gdev, session, data);
1526 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
1527 return -EINVAL;
1529 /* Fixed size requests. */
1530 switch (req) {
1531 case VBG_IOCTL_DRIVER_VERSION_INFO:
1532 return vbg_ioctl_driver_version_info(data);
1533 case VBG_IOCTL_HGCM_CONNECT:
1534 return vbg_ioctl_hgcm_connect(gdev, session, data);
1535 case VBG_IOCTL_HGCM_DISCONNECT:
1536 return vbg_ioctl_hgcm_disconnect(gdev, session, data);
1537 case VBG_IOCTL_WAIT_FOR_EVENTS:
1538 return vbg_ioctl_wait_for_events(gdev, session, data);
1539 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
1540 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
1541 case VBG_IOCTL_CHANGE_FILTER_MASK:
1542 return vbg_ioctl_change_filter_mask(gdev, session, data);
1543 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
1544 return vbg_ioctl_change_guest_capabilities(gdev, session, data);
1545 case VBG_IOCTL_CHECK_BALLOON:
1546 return vbg_ioctl_check_balloon(gdev, data);
1547 case VBG_IOCTL_WRITE_CORE_DUMP:
1548 return vbg_ioctl_write_core_dump(gdev, session, data);
1551 /* Variable sized requests. */
1552 switch (req_no_size) {
1553 #ifdef CONFIG_COMPAT
1554 case VBG_IOCTL_HGCM_CALL_32(0):
1555 f32bit = true;
1556 #endif
1557 /* Fall through */
1558 case VBG_IOCTL_HGCM_CALL(0):
1559 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
1560 case VBG_IOCTL_LOG(0):
1561 return vbg_ioctl_log(data);
1564 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
1565 return -ENOTTY;
1569 * Report guest supported mouse-features to the host.
1571 * Return: 0 or negative errno value.
1572 * @gdev: The Guest extension device.
1573 * @features: The set of features to report to the host.
1575 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1577 struct vmmdev_mouse_status *req;
1578 int rc;
1580 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1581 VBG_KERNEL_REQUEST);
1582 if (!req)
1583 return -ENOMEM;
1585 req->mouse_features = features;
1586 req->pointer_pos_x = 0;
1587 req->pointer_pos_y = 0;
1589 rc = vbg_req_perform(gdev, req);
1590 if (rc < 0)
1591 vbg_err("%s error, rc: %d\n", __func__, rc);
1593 vbg_req_free(req, sizeof(*req));
1594 return vbg_status_code_to_errno(rc);
1597 /** Core interrupt service routine. */
1598 irqreturn_t vbg_core_isr(int irq, void *dev_id)
1600 struct vbg_dev *gdev = dev_id;
1601 struct vmmdev_events *req = gdev->ack_events_req;
1602 bool mouse_position_changed = false;
1603 unsigned long flags;
1604 u32 events = 0;
1605 int rc;
1607 if (!gdev->mmio->V.V1_04.have_events)
1608 return IRQ_NONE;
1610 /* Get and acknowlegde events. */
1611 req->header.rc = VERR_INTERNAL_ERROR;
1612 req->events = 0;
1613 rc = vbg_req_perform(gdev, req);
1614 if (rc < 0) {
1615 vbg_err("Error performing events req, rc: %d\n", rc);
1616 return IRQ_NONE;
1619 events = req->events;
1621 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
1622 mouse_position_changed = true;
1623 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1626 if (events & VMMDEV_EVENT_HGCM) {
1627 wake_up(&gdev->hgcm_wq);
1628 events &= ~VMMDEV_EVENT_HGCM;
1631 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
1632 schedule_work(&gdev->mem_balloon.work);
1633 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1636 if (events) {
1637 spin_lock_irqsave(&gdev->event_spinlock, flags);
1638 gdev->pending_events |= events;
1639 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
1641 wake_up(&gdev->event_wq);
1644 if (mouse_position_changed)
1645 vbg_linux_mouse_event(gdev);
1647 return IRQ_HANDLED;