1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Google LLC.
5 * Tests for exiting into userspace on registered MSRs
8 #define _GNU_SOURCE /* for program_invocation_short_name */
11 #include "test_util.h"
15 /* Forced emulation prefix, used to invoke the emulator unconditionally. */
16 #define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
17 #define KVM_FEP_LENGTH 5
18 static int fep_available
= 1;
21 #define MSR_NON_EXISTENT 0x474f4f00
23 static u64 deny_bits
= 0;
24 struct kvm_msr_filter filter_allow
= {
25 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
28 .flags
= KVM_MSR_FILTER_READ
|
31 /* Test an MSR the kernel knows about. */
33 .bitmap
= (uint8_t*)&deny_bits
,
35 .flags
= KVM_MSR_FILTER_READ
|
38 /* Test an MSR the kernel doesn't know about. */
39 .base
= MSR_IA32_FLUSH_CMD
,
40 .bitmap
= (uint8_t*)&deny_bits
,
42 .flags
= KVM_MSR_FILTER_READ
|
45 /* Test a fabricated MSR that no one knows about. */
46 .base
= MSR_NON_EXISTENT
,
47 .bitmap
= (uint8_t*)&deny_bits
,
52 struct kvm_msr_filter filter_fs
= {
53 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
56 .flags
= KVM_MSR_FILTER_READ
,
59 .bitmap
= (uint8_t*)&deny_bits
,
64 struct kvm_msr_filter filter_gs
= {
65 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
68 .flags
= KVM_MSR_FILTER_READ
,
71 .bitmap
= (uint8_t*)&deny_bits
,
76 static uint64_t msr_non_existent_data
;
77 static int guest_exception_count
;
78 static u32 msr_reads
, msr_writes
;
80 static u8 bitmap_00000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
81 static u8 bitmap_00000000_write
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
82 static u8 bitmap_40000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
83 static u8 bitmap_c0000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
84 static u8 bitmap_c0000000_read
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
85 static u8 bitmap_deadbeef
[1] = { 0x1 };
87 static void deny_msr(uint8_t *bitmap
, u32 msr
)
89 u32 idx
= msr
& (KVM_MSR_FILTER_MAX_BITMAP_SIZE
- 1);
91 bitmap
[idx
/ 8] &= ~(1 << (idx
% 8));
94 static void prepare_bitmaps(void)
96 memset(bitmap_00000000
, 0xff, sizeof(bitmap_00000000
));
97 memset(bitmap_00000000_write
, 0xff, sizeof(bitmap_00000000_write
));
98 memset(bitmap_40000000
, 0xff, sizeof(bitmap_40000000
));
99 memset(bitmap_c0000000
, 0xff, sizeof(bitmap_c0000000
));
100 memset(bitmap_c0000000_read
, 0xff, sizeof(bitmap_c0000000_read
));
102 deny_msr(bitmap_00000000_write
, MSR_IA32_POWER_CTL
);
103 deny_msr(bitmap_c0000000_read
, MSR_SYSCALL_MASK
);
104 deny_msr(bitmap_c0000000_read
, MSR_GS_BASE
);
107 struct kvm_msr_filter filter_deny
= {
108 .flags
= KVM_MSR_FILTER_DEFAULT_DENY
,
111 .flags
= KVM_MSR_FILTER_READ
,
113 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
114 .bitmap
= bitmap_00000000
,
116 .flags
= KVM_MSR_FILTER_WRITE
,
118 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
119 .bitmap
= bitmap_00000000_write
,
121 .flags
= KVM_MSR_FILTER_READ
| KVM_MSR_FILTER_WRITE
,
123 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
124 .bitmap
= bitmap_40000000
,
126 .flags
= KVM_MSR_FILTER_READ
,
128 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
129 .bitmap
= bitmap_c0000000_read
,
131 .flags
= KVM_MSR_FILTER_WRITE
,
133 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
134 .bitmap
= bitmap_c0000000
,
136 .flags
= KVM_MSR_FILTER_WRITE
| KVM_MSR_FILTER_READ
,
139 .bitmap
= bitmap_deadbeef
,
144 struct kvm_msr_filter no_filter_deny
= {
145 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
149 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
150 * rdmsr_start and rdmsr_end, from being defined multiple times.
152 static noinline
uint64_t test_rdmsr(uint32_t msr
)
156 guest_exception_count
= 0;
158 __asm__
__volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
159 "=a"(a
), "=d"(d
) : "c"(msr
) : "memory");
161 return a
| ((uint64_t) d
<< 32);
165 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
166 * wrmsr_start and wrmsr_end, from being defined multiple times.
168 static noinline
void test_wrmsr(uint32_t msr
, uint64_t value
)
171 uint32_t d
= value
>> 32;
173 guest_exception_count
= 0;
175 __asm__
__volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
176 "a"(a
), "d"(d
), "c"(msr
) : "memory");
179 extern char rdmsr_start
, rdmsr_end
;
180 extern char wrmsr_start
, wrmsr_end
;
183 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
184 * rdmsr_start and rdmsr_end, from being defined multiple times.
186 static noinline
uint64_t test_em_rdmsr(uint32_t msr
)
190 guest_exception_count
= 0;
192 __asm__
__volatile__(KVM_FEP
"em_rdmsr_start: rdmsr; em_rdmsr_end:" :
193 "=a"(a
), "=d"(d
) : "c"(msr
) : "memory");
195 return a
| ((uint64_t) d
<< 32);
199 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
200 * wrmsr_start and wrmsr_end, from being defined multiple times.
202 static noinline
void test_em_wrmsr(uint32_t msr
, uint64_t value
)
205 uint32_t d
= value
>> 32;
207 guest_exception_count
= 0;
209 __asm__
__volatile__(KVM_FEP
"em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
210 "a"(a
), "d"(d
), "c"(msr
) : "memory");
213 extern char em_rdmsr_start
, em_rdmsr_end
;
214 extern char em_wrmsr_start
, em_wrmsr_end
;
216 static void guest_code_filter_allow(void)
221 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
223 * A GP is thrown if anything other than 0 is written to
226 data
= test_rdmsr(MSR_IA32_XSS
);
227 GUEST_ASSERT(data
== 0);
228 GUEST_ASSERT(guest_exception_count
== 0);
230 test_wrmsr(MSR_IA32_XSS
, 0);
231 GUEST_ASSERT(guest_exception_count
== 0);
233 test_wrmsr(MSR_IA32_XSS
, 1);
234 GUEST_ASSERT(guest_exception_count
== 1);
237 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
239 * A GP is thrown if MSR_IA32_FLUSH_CMD is read
240 * from or if a value other than 1 is written to it.
242 test_rdmsr(MSR_IA32_FLUSH_CMD
);
243 GUEST_ASSERT(guest_exception_count
== 1);
245 test_wrmsr(MSR_IA32_FLUSH_CMD
, 0);
246 GUEST_ASSERT(guest_exception_count
== 1);
248 test_wrmsr(MSR_IA32_FLUSH_CMD
, 1);
249 GUEST_ASSERT(guest_exception_count
== 0);
252 * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
254 * Test that a fabricated MSR can pass through the kernel
255 * and be handled in userspace.
257 test_wrmsr(MSR_NON_EXISTENT
, 2);
258 GUEST_ASSERT(guest_exception_count
== 0);
260 data
= test_rdmsr(MSR_NON_EXISTENT
);
261 GUEST_ASSERT(data
== 2);
262 GUEST_ASSERT(guest_exception_count
== 0);
265 * Test to see if the instruction emulator is available (ie: the module
266 * parameter 'kvm.force_emulation_prefix=1' is set). This instruction
267 * will #UD if it isn't available.
269 __asm__
__volatile__(KVM_FEP
"nop");
272 /* Let userspace know we aren't done. */
276 * Now run the same tests with the instruction emulator.
278 data
= test_em_rdmsr(MSR_IA32_XSS
);
279 GUEST_ASSERT(data
== 0);
280 GUEST_ASSERT(guest_exception_count
== 0);
281 test_em_wrmsr(MSR_IA32_XSS
, 0);
282 GUEST_ASSERT(guest_exception_count
== 0);
283 test_em_wrmsr(MSR_IA32_XSS
, 1);
284 GUEST_ASSERT(guest_exception_count
== 1);
286 test_em_rdmsr(MSR_IA32_FLUSH_CMD
);
287 GUEST_ASSERT(guest_exception_count
== 1);
288 test_em_wrmsr(MSR_IA32_FLUSH_CMD
, 0);
289 GUEST_ASSERT(guest_exception_count
== 1);
290 test_em_wrmsr(MSR_IA32_FLUSH_CMD
, 1);
291 GUEST_ASSERT(guest_exception_count
== 0);
293 test_em_wrmsr(MSR_NON_EXISTENT
, 2);
294 GUEST_ASSERT(guest_exception_count
== 0);
295 data
= test_em_rdmsr(MSR_NON_EXISTENT
);
296 GUEST_ASSERT(data
== 2);
297 GUEST_ASSERT(guest_exception_count
== 0);
303 static void guest_msr_calls(bool trapped
)
305 /* This goes into the in-kernel emulation */
306 wrmsr(MSR_SYSCALL_MASK
, 0);
309 /* This goes into user space emulation */
310 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK
) == MSR_SYSCALL_MASK
);
311 GUEST_ASSERT(rdmsr(MSR_GS_BASE
) == MSR_GS_BASE
);
313 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK
) != MSR_SYSCALL_MASK
);
314 GUEST_ASSERT(rdmsr(MSR_GS_BASE
) != MSR_GS_BASE
);
317 /* If trapped == true, this goes into user space emulation */
318 wrmsr(MSR_IA32_POWER_CTL
, 0x1234);
320 /* This goes into the in-kernel emulation */
321 rdmsr(MSR_IA32_POWER_CTL
);
323 /* Invalid MSR, should always be handled by user space exit */
324 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
325 wrmsr(0xdeadbeef, 0x1234);
328 static void guest_code_filter_deny(void)
330 guest_msr_calls(true);
333 * Disable msr filtering, so that the kernel
334 * handles everything in the next round
338 guest_msr_calls(false);
343 static void guest_code_permission_bitmap(void)
347 data
= test_rdmsr(MSR_FS_BASE
);
348 GUEST_ASSERT(data
== MSR_FS_BASE
);
349 data
= test_rdmsr(MSR_GS_BASE
);
350 GUEST_ASSERT(data
!= MSR_GS_BASE
);
352 /* Let userspace know to switch the filter */
355 data
= test_rdmsr(MSR_FS_BASE
);
356 GUEST_ASSERT(data
!= MSR_FS_BASE
);
357 data
= test_rdmsr(MSR_GS_BASE
);
358 GUEST_ASSERT(data
== MSR_GS_BASE
);
363 static void __guest_gp_handler(struct ex_regs
*regs
,
364 char *r_start
, char *r_end
,
365 char *w_start
, char *w_end
)
367 if (regs
->rip
== (uintptr_t)r_start
) {
368 regs
->rip
= (uintptr_t)r_end
;
371 } else if (regs
->rip
== (uintptr_t)w_start
) {
372 regs
->rip
= (uintptr_t)w_end
;
374 GUEST_ASSERT(!"RIP is at an unknown location!");
377 ++guest_exception_count
;
380 static void guest_gp_handler(struct ex_regs
*regs
)
382 __guest_gp_handler(regs
, &rdmsr_start
, &rdmsr_end
,
383 &wrmsr_start
, &wrmsr_end
);
386 static void guest_fep_gp_handler(struct ex_regs
*regs
)
388 __guest_gp_handler(regs
, &em_rdmsr_start
, &em_rdmsr_end
,
389 &em_wrmsr_start
, &em_wrmsr_end
);
392 static void guest_ud_handler(struct ex_regs
*regs
)
395 regs
->rip
+= KVM_FEP_LENGTH
;
398 static void run_guest(struct kvm_vm
*vm
)
402 rc
= _vcpu_run(vm
, VCPU_ID
);
403 TEST_ASSERT(rc
== 0, "vcpu_run failed: %d\n", rc
);
406 static void check_for_guest_assert(struct kvm_vm
*vm
)
408 struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
411 if (run
->exit_reason
== KVM_EXIT_IO
&&
412 get_ucall(vm
, VCPU_ID
, &uc
) == UCALL_ABORT
) {
413 TEST_FAIL("%s at %s:%ld", (const char *)uc
.args
[0],
414 __FILE__
, uc
.args
[1]);
418 static void process_rdmsr(struct kvm_vm
*vm
, uint32_t msr_index
)
420 struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
422 check_for_guest_assert(vm
);
424 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_X86_RDMSR
,
425 "Unexpected exit reason: %u (%s),\n",
427 exit_reason_str(run
->exit_reason
));
428 TEST_ASSERT(run
->msr
.index
== msr_index
,
429 "Unexpected msr (0x%04x), expected 0x%04x",
430 run
->msr
.index
, msr_index
);
432 switch (run
->msr
.index
) {
436 case MSR_IA32_FLUSH_CMD
:
439 case MSR_NON_EXISTENT
:
440 run
->msr
.data
= msr_non_existent_data
;
443 run
->msr
.data
= MSR_FS_BASE
;
446 run
->msr
.data
= MSR_GS_BASE
;
449 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run
->msr
.index
);
453 static void process_wrmsr(struct kvm_vm
*vm
, uint32_t msr_index
)
455 struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
457 check_for_guest_assert(vm
);
459 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_X86_WRMSR
,
460 "Unexpected exit reason: %u (%s),\n",
462 exit_reason_str(run
->exit_reason
));
463 TEST_ASSERT(run
->msr
.index
== msr_index
,
464 "Unexpected msr (0x%04x), expected 0x%04x",
465 run
->msr
.index
, msr_index
);
467 switch (run
->msr
.index
) {
469 if (run
->msr
.data
!= 0)
472 case MSR_IA32_FLUSH_CMD
:
473 if (run
->msr
.data
!= 1)
476 case MSR_NON_EXISTENT
:
477 msr_non_existent_data
= run
->msr
.data
;
480 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run
->msr
.index
);
484 static void process_ucall_done(struct kvm_vm
*vm
)
486 struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
489 check_for_guest_assert(vm
);
491 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_IO
,
492 "Unexpected exit reason: %u (%s)",
494 exit_reason_str(run
->exit_reason
));
496 TEST_ASSERT(get_ucall(vm
, VCPU_ID
, &uc
) == UCALL_DONE
,
497 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
501 static uint64_t process_ucall(struct kvm_vm
*vm
)
503 struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
504 struct ucall uc
= {};
506 check_for_guest_assert(vm
);
508 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_IO
,
509 "Unexpected exit reason: %u (%s)",
511 exit_reason_str(run
->exit_reason
));
513 switch (get_ucall(vm
, VCPU_ID
, &uc
)) {
517 check_for_guest_assert(vm
);
520 process_ucall_done(vm
);
523 TEST_ASSERT(false, "Unexpected ucall");
529 static void run_guest_then_process_rdmsr(struct kvm_vm
*vm
, uint32_t msr_index
)
532 process_rdmsr(vm
, msr_index
);
535 static void run_guest_then_process_wrmsr(struct kvm_vm
*vm
, uint32_t msr_index
)
538 process_wrmsr(vm
, msr_index
);
541 static uint64_t run_guest_then_process_ucall(struct kvm_vm
*vm
)
544 return process_ucall(vm
);
547 static void run_guest_then_process_ucall_done(struct kvm_vm
*vm
)
550 process_ucall_done(vm
);
553 static void test_msr_filter_allow(void) {
554 struct kvm_enable_cap cap
= {
555 .cap
= KVM_CAP_X86_USER_SPACE_MSR
,
556 .args
[0] = KVM_MSR_EXIT_REASON_FILTER
,
562 vm
= vm_create_default(VCPU_ID
, 0, guest_code_filter_allow
);
563 vcpu_set_cpuid(vm
, VCPU_ID
, kvm_get_supported_cpuid());
565 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
566 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
567 vm_enable_cap(vm
, &cap
);
569 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
570 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
572 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_allow
);
574 vm_init_descriptor_tables(vm
);
575 vcpu_init_descriptor_tables(vm
, VCPU_ID
);
577 vm_handle_exception(vm
, GP_VECTOR
, guest_gp_handler
);
579 /* Process guest code userspace exits. */
580 run_guest_then_process_rdmsr(vm
, MSR_IA32_XSS
);
581 run_guest_then_process_wrmsr(vm
, MSR_IA32_XSS
);
582 run_guest_then_process_wrmsr(vm
, MSR_IA32_XSS
);
584 run_guest_then_process_rdmsr(vm
, MSR_IA32_FLUSH_CMD
);
585 run_guest_then_process_wrmsr(vm
, MSR_IA32_FLUSH_CMD
);
586 run_guest_then_process_wrmsr(vm
, MSR_IA32_FLUSH_CMD
);
588 run_guest_then_process_wrmsr(vm
, MSR_NON_EXISTENT
);
589 run_guest_then_process_rdmsr(vm
, MSR_NON_EXISTENT
);
591 vm_handle_exception(vm
, UD_VECTOR
, guest_ud_handler
);
593 vm_handle_exception(vm
, UD_VECTOR
, NULL
);
595 if (process_ucall(vm
) != UCALL_DONE
) {
596 vm_handle_exception(vm
, GP_VECTOR
, guest_fep_gp_handler
);
598 /* Process emulated rdmsr and wrmsr instructions. */
599 run_guest_then_process_rdmsr(vm
, MSR_IA32_XSS
);
600 run_guest_then_process_wrmsr(vm
, MSR_IA32_XSS
);
601 run_guest_then_process_wrmsr(vm
, MSR_IA32_XSS
);
603 run_guest_then_process_rdmsr(vm
, MSR_IA32_FLUSH_CMD
);
604 run_guest_then_process_wrmsr(vm
, MSR_IA32_FLUSH_CMD
);
605 run_guest_then_process_wrmsr(vm
, MSR_IA32_FLUSH_CMD
);
607 run_guest_then_process_wrmsr(vm
, MSR_NON_EXISTENT
);
608 run_guest_then_process_rdmsr(vm
, MSR_NON_EXISTENT
);
610 /* Confirm the guest completed without issues. */
611 run_guest_then_process_ucall_done(vm
);
613 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
619 static int handle_ucall(struct kvm_vm
*vm
)
623 switch (get_ucall(vm
, VCPU_ID
, &uc
)) {
625 TEST_FAIL("Guest assertion not met");
628 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &no_filter_deny
);
633 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
639 static void handle_rdmsr(struct kvm_run
*run
)
641 run
->msr
.data
= run
->msr
.index
;
644 if (run
->msr
.index
== MSR_SYSCALL_MASK
||
645 run
->msr
.index
== MSR_GS_BASE
) {
646 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
,
647 "MSR read trap w/o access fault");
650 if (run
->msr
.index
== 0xdeadbeef) {
651 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_UNKNOWN
,
652 "MSR deadbeef read trap w/o inval fault");
656 static void handle_wrmsr(struct kvm_run
*run
)
661 if (run
->msr
.index
== MSR_IA32_POWER_CTL
) {
662 TEST_ASSERT(run
->msr
.data
== 0x1234,
663 "MSR data for MSR_IA32_POWER_CTL incorrect");
664 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
,
665 "MSR_IA32_POWER_CTL trap w/o access fault");
668 if (run
->msr
.index
== 0xdeadbeef) {
669 TEST_ASSERT(run
->msr
.data
== 0x1234,
670 "MSR data for deadbeef incorrect");
671 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_UNKNOWN
,
672 "deadbeef trap w/o inval fault");
676 static void test_msr_filter_deny(void) {
677 struct kvm_enable_cap cap
= {
678 .cap
= KVM_CAP_X86_USER_SPACE_MSR
,
679 .args
[0] = KVM_MSR_EXIT_REASON_INVAL
|
680 KVM_MSR_EXIT_REASON_UNKNOWN
|
681 KVM_MSR_EXIT_REASON_FILTER
,
688 vm
= vm_create_default(VCPU_ID
, 0, guest_code_filter_deny
);
689 vcpu_set_cpuid(vm
, VCPU_ID
, kvm_get_supported_cpuid());
690 run
= vcpu_state(vm
, VCPU_ID
);
692 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
693 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
694 vm_enable_cap(vm
, &cap
);
696 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
697 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
700 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_deny
);
703 rc
= _vcpu_run(vm
, VCPU_ID
);
705 TEST_ASSERT(rc
== 0, "vcpu_run failed: %d\n", rc
);
707 switch (run
->exit_reason
) {
708 case KVM_EXIT_X86_RDMSR
:
711 case KVM_EXIT_X86_WRMSR
:
715 if (handle_ucall(vm
))
723 TEST_ASSERT(msr_reads
== 4, "Handled 4 rdmsr in user space");
724 TEST_ASSERT(msr_writes
== 3, "Handled 3 wrmsr in user space");
729 static void test_msr_permission_bitmap(void) {
730 struct kvm_enable_cap cap
= {
731 .cap
= KVM_CAP_X86_USER_SPACE_MSR
,
732 .args
[0] = KVM_MSR_EXIT_REASON_FILTER
,
738 vm
= vm_create_default(VCPU_ID
, 0, guest_code_permission_bitmap
);
739 vcpu_set_cpuid(vm
, VCPU_ID
, kvm_get_supported_cpuid());
741 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
742 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
743 vm_enable_cap(vm
, &cap
);
745 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
746 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
748 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_fs
);
749 run_guest_then_process_rdmsr(vm
, MSR_FS_BASE
);
750 TEST_ASSERT(run_guest_then_process_ucall(vm
) == UCALL_SYNC
, "Expected ucall state to be UCALL_SYNC.");
751 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_gs
);
752 run_guest_then_process_rdmsr(vm
, MSR_GS_BASE
);
753 run_guest_then_process_ucall_done(vm
);
758 int main(int argc
, char *argv
[])
760 /* Tell stdout not to buffer its content */
761 setbuf(stdout
, NULL
);
763 test_msr_filter_allow();
765 test_msr_filter_deny();
767 test_msr_permission_bitmap();