1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Google LLC.
5 * Tests for exiting into userspace on registered MSRs
9 #include "kvm_test_harness.h"
10 #include "test_util.h"
14 #define MSR_NON_EXISTENT 0x474f4f00
16 static u64 deny_bits
= 0;
17 struct kvm_msr_filter filter_allow
= {
18 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
21 .flags
= KVM_MSR_FILTER_READ
|
24 /* Test an MSR the kernel knows about. */
26 .bitmap
= (uint8_t*)&deny_bits
,
28 .flags
= KVM_MSR_FILTER_READ
|
31 /* Test an MSR the kernel doesn't know about. */
32 .base
= MSR_IA32_FLUSH_CMD
,
33 .bitmap
= (uint8_t*)&deny_bits
,
35 .flags
= KVM_MSR_FILTER_READ
|
38 /* Test a fabricated MSR that no one knows about. */
39 .base
= MSR_NON_EXISTENT
,
40 .bitmap
= (uint8_t*)&deny_bits
,
45 struct kvm_msr_filter filter_fs
= {
46 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
49 .flags
= KVM_MSR_FILTER_READ
,
52 .bitmap
= (uint8_t*)&deny_bits
,
57 struct kvm_msr_filter filter_gs
= {
58 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
61 .flags
= KVM_MSR_FILTER_READ
,
64 .bitmap
= (uint8_t*)&deny_bits
,
69 static uint64_t msr_non_existent_data
;
70 static int guest_exception_count
;
71 static u32 msr_reads
, msr_writes
;
73 static u8 bitmap_00000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
74 static u8 bitmap_00000000_write
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
75 static u8 bitmap_40000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
76 static u8 bitmap_c0000000
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
77 static u8 bitmap_c0000000_read
[KVM_MSR_FILTER_MAX_BITMAP_SIZE
];
78 static u8 bitmap_deadbeef
[1] = { 0x1 };
80 static void deny_msr(uint8_t *bitmap
, u32 msr
)
82 u32 idx
= msr
& (KVM_MSR_FILTER_MAX_BITMAP_SIZE
- 1);
84 bitmap
[idx
/ 8] &= ~(1 << (idx
% 8));
87 static void prepare_bitmaps(void)
89 memset(bitmap_00000000
, 0xff, sizeof(bitmap_00000000
));
90 memset(bitmap_00000000_write
, 0xff, sizeof(bitmap_00000000_write
));
91 memset(bitmap_40000000
, 0xff, sizeof(bitmap_40000000
));
92 memset(bitmap_c0000000
, 0xff, sizeof(bitmap_c0000000
));
93 memset(bitmap_c0000000_read
, 0xff, sizeof(bitmap_c0000000_read
));
95 deny_msr(bitmap_00000000_write
, MSR_IA32_POWER_CTL
);
96 deny_msr(bitmap_c0000000_read
, MSR_SYSCALL_MASK
);
97 deny_msr(bitmap_c0000000_read
, MSR_GS_BASE
);
100 struct kvm_msr_filter filter_deny
= {
101 .flags
= KVM_MSR_FILTER_DEFAULT_DENY
,
104 .flags
= KVM_MSR_FILTER_READ
,
106 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
107 .bitmap
= bitmap_00000000
,
109 .flags
= KVM_MSR_FILTER_WRITE
,
111 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
112 .bitmap
= bitmap_00000000_write
,
114 .flags
= KVM_MSR_FILTER_READ
| KVM_MSR_FILTER_WRITE
,
116 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
117 .bitmap
= bitmap_40000000
,
119 .flags
= KVM_MSR_FILTER_READ
,
121 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
122 .bitmap
= bitmap_c0000000_read
,
124 .flags
= KVM_MSR_FILTER_WRITE
,
126 .nmsrs
= KVM_MSR_FILTER_MAX_BITMAP_SIZE
* BITS_PER_BYTE
,
127 .bitmap
= bitmap_c0000000
,
129 .flags
= KVM_MSR_FILTER_WRITE
| KVM_MSR_FILTER_READ
,
132 .bitmap
= bitmap_deadbeef
,
137 struct kvm_msr_filter no_filter_deny
= {
138 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
142 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
143 * rdmsr_start and rdmsr_end, from being defined multiple times.
145 static noinline
uint64_t test_rdmsr(uint32_t msr
)
149 guest_exception_count
= 0;
151 __asm__
__volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
152 "=a"(a
), "=d"(d
) : "c"(msr
) : "memory");
154 return a
| ((uint64_t) d
<< 32);
158 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
159 * wrmsr_start and wrmsr_end, from being defined multiple times.
161 static noinline
void test_wrmsr(uint32_t msr
, uint64_t value
)
164 uint32_t d
= value
>> 32;
166 guest_exception_count
= 0;
168 __asm__
__volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
169 "a"(a
), "d"(d
), "c"(msr
) : "memory");
172 extern char rdmsr_start
, rdmsr_end
;
173 extern char wrmsr_start
, wrmsr_end
;
176 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
177 * rdmsr_start and rdmsr_end, from being defined multiple times.
179 static noinline
uint64_t test_em_rdmsr(uint32_t msr
)
183 guest_exception_count
= 0;
185 __asm__
__volatile__(KVM_FEP
"em_rdmsr_start: rdmsr; em_rdmsr_end:" :
186 "=a"(a
), "=d"(d
) : "c"(msr
) : "memory");
188 return a
| ((uint64_t) d
<< 32);
192 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
193 * wrmsr_start and wrmsr_end, from being defined multiple times.
195 static noinline
void test_em_wrmsr(uint32_t msr
, uint64_t value
)
198 uint32_t d
= value
>> 32;
200 guest_exception_count
= 0;
202 __asm__
__volatile__(KVM_FEP
"em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
203 "a"(a
), "d"(d
), "c"(msr
) : "memory");
206 extern char em_rdmsr_start
, em_rdmsr_end
;
207 extern char em_wrmsr_start
, em_wrmsr_end
;
209 static void guest_code_filter_allow(void)
214 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
216 * A GP is thrown if anything other than 0 is written to
219 data
= test_rdmsr(MSR_IA32_XSS
);
220 GUEST_ASSERT(data
== 0);
221 GUEST_ASSERT(guest_exception_count
== 0);
223 test_wrmsr(MSR_IA32_XSS
, 0);
224 GUEST_ASSERT(guest_exception_count
== 0);
226 test_wrmsr(MSR_IA32_XSS
, 1);
227 GUEST_ASSERT(guest_exception_count
== 1);
230 * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
232 * A GP is thrown if MSR_IA32_FLUSH_CMD is read
233 * from or if a value other than 1 is written to it.
235 test_rdmsr(MSR_IA32_FLUSH_CMD
);
236 GUEST_ASSERT(guest_exception_count
== 1);
238 test_wrmsr(MSR_IA32_FLUSH_CMD
, 0);
239 GUEST_ASSERT(guest_exception_count
== 1);
241 test_wrmsr(MSR_IA32_FLUSH_CMD
, 1);
242 GUEST_ASSERT(guest_exception_count
== 0);
245 * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
247 * Test that a fabricated MSR can pass through the kernel
248 * and be handled in userspace.
250 test_wrmsr(MSR_NON_EXISTENT
, 2);
251 GUEST_ASSERT(guest_exception_count
== 0);
253 data
= test_rdmsr(MSR_NON_EXISTENT
);
254 GUEST_ASSERT(data
== 2);
255 GUEST_ASSERT(guest_exception_count
== 0);
257 if (is_forced_emulation_enabled
) {
258 /* Let userspace know we aren't done. */
262 * Now run the same tests with the instruction emulator.
264 data
= test_em_rdmsr(MSR_IA32_XSS
);
265 GUEST_ASSERT(data
== 0);
266 GUEST_ASSERT(guest_exception_count
== 0);
267 test_em_wrmsr(MSR_IA32_XSS
, 0);
268 GUEST_ASSERT(guest_exception_count
== 0);
269 test_em_wrmsr(MSR_IA32_XSS
, 1);
270 GUEST_ASSERT(guest_exception_count
== 1);
272 test_em_rdmsr(MSR_IA32_FLUSH_CMD
);
273 GUEST_ASSERT(guest_exception_count
== 1);
274 test_em_wrmsr(MSR_IA32_FLUSH_CMD
, 0);
275 GUEST_ASSERT(guest_exception_count
== 1);
276 test_em_wrmsr(MSR_IA32_FLUSH_CMD
, 1);
277 GUEST_ASSERT(guest_exception_count
== 0);
279 test_em_wrmsr(MSR_NON_EXISTENT
, 2);
280 GUEST_ASSERT(guest_exception_count
== 0);
281 data
= test_em_rdmsr(MSR_NON_EXISTENT
);
282 GUEST_ASSERT(data
== 2);
283 GUEST_ASSERT(guest_exception_count
== 0);
289 static void guest_msr_calls(bool trapped
)
291 /* This goes into the in-kernel emulation */
292 wrmsr(MSR_SYSCALL_MASK
, 0);
295 /* This goes into user space emulation */
296 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK
) == MSR_SYSCALL_MASK
);
297 GUEST_ASSERT(rdmsr(MSR_GS_BASE
) == MSR_GS_BASE
);
299 GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK
) != MSR_SYSCALL_MASK
);
300 GUEST_ASSERT(rdmsr(MSR_GS_BASE
) != MSR_GS_BASE
);
303 /* If trapped == true, this goes into user space emulation */
304 wrmsr(MSR_IA32_POWER_CTL
, 0x1234);
306 /* This goes into the in-kernel emulation */
307 rdmsr(MSR_IA32_POWER_CTL
);
309 /* Invalid MSR, should always be handled by user space exit */
310 GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
311 wrmsr(0xdeadbeef, 0x1234);
314 static void guest_code_filter_deny(void)
316 guest_msr_calls(true);
319 * Disable msr filtering, so that the kernel
320 * handles everything in the next round
324 guest_msr_calls(false);
329 static void guest_code_permission_bitmap(void)
333 data
= test_rdmsr(MSR_FS_BASE
);
334 GUEST_ASSERT(data
== MSR_FS_BASE
);
335 data
= test_rdmsr(MSR_GS_BASE
);
336 GUEST_ASSERT(data
!= MSR_GS_BASE
);
338 /* Let userspace know to switch the filter */
341 data
= test_rdmsr(MSR_FS_BASE
);
342 GUEST_ASSERT(data
!= MSR_FS_BASE
);
343 data
= test_rdmsr(MSR_GS_BASE
);
344 GUEST_ASSERT(data
== MSR_GS_BASE
);
349 static void __guest_gp_handler(struct ex_regs
*regs
,
350 char *r_start
, char *r_end
,
351 char *w_start
, char *w_end
)
353 if (regs
->rip
== (uintptr_t)r_start
) {
354 regs
->rip
= (uintptr_t)r_end
;
357 } else if (regs
->rip
== (uintptr_t)w_start
) {
358 regs
->rip
= (uintptr_t)w_end
;
360 GUEST_ASSERT(!"RIP is at an unknown location!");
363 ++guest_exception_count
;
366 static void guest_gp_handler(struct ex_regs
*regs
)
368 __guest_gp_handler(regs
, &rdmsr_start
, &rdmsr_end
,
369 &wrmsr_start
, &wrmsr_end
);
372 static void guest_fep_gp_handler(struct ex_regs
*regs
)
374 __guest_gp_handler(regs
, &em_rdmsr_start
, &em_rdmsr_end
,
375 &em_wrmsr_start
, &em_wrmsr_end
);
378 static void check_for_guest_assert(struct kvm_vcpu
*vcpu
)
382 if (vcpu
->run
->exit_reason
== KVM_EXIT_IO
&&
383 get_ucall(vcpu
, &uc
) == UCALL_ABORT
) {
384 REPORT_GUEST_ASSERT(uc
);
388 static void process_rdmsr(struct kvm_vcpu
*vcpu
, uint32_t msr_index
)
390 struct kvm_run
*run
= vcpu
->run
;
392 check_for_guest_assert(vcpu
);
394 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_X86_RDMSR
);
395 TEST_ASSERT(run
->msr
.index
== msr_index
,
396 "Unexpected msr (0x%04x), expected 0x%04x",
397 run
->msr
.index
, msr_index
);
399 switch (run
->msr
.index
) {
403 case MSR_IA32_FLUSH_CMD
:
406 case MSR_NON_EXISTENT
:
407 run
->msr
.data
= msr_non_existent_data
;
410 run
->msr
.data
= MSR_FS_BASE
;
413 run
->msr
.data
= MSR_GS_BASE
;
416 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run
->msr
.index
);
420 static void process_wrmsr(struct kvm_vcpu
*vcpu
, uint32_t msr_index
)
422 struct kvm_run
*run
= vcpu
->run
;
424 check_for_guest_assert(vcpu
);
426 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_X86_WRMSR
);
427 TEST_ASSERT(run
->msr
.index
== msr_index
,
428 "Unexpected msr (0x%04x), expected 0x%04x",
429 run
->msr
.index
, msr_index
);
431 switch (run
->msr
.index
) {
433 if (run
->msr
.data
!= 0)
436 case MSR_IA32_FLUSH_CMD
:
437 if (run
->msr
.data
!= 1)
440 case MSR_NON_EXISTENT
:
441 msr_non_existent_data
= run
->msr
.data
;
444 TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run
->msr
.index
);
448 static void process_ucall_done(struct kvm_vcpu
*vcpu
)
452 check_for_guest_assert(vcpu
);
454 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
456 TEST_ASSERT(get_ucall(vcpu
, &uc
) == UCALL_DONE
,
457 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
461 static uint64_t process_ucall(struct kvm_vcpu
*vcpu
)
463 struct ucall uc
= {};
465 check_for_guest_assert(vcpu
);
467 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
469 switch (get_ucall(vcpu
, &uc
)) {
473 check_for_guest_assert(vcpu
);
476 process_ucall_done(vcpu
);
479 TEST_ASSERT(false, "Unexpected ucall");
485 static void run_guest_then_process_rdmsr(struct kvm_vcpu
*vcpu
,
489 process_rdmsr(vcpu
, msr_index
);
492 static void run_guest_then_process_wrmsr(struct kvm_vcpu
*vcpu
,
496 process_wrmsr(vcpu
, msr_index
);
499 static uint64_t run_guest_then_process_ucall(struct kvm_vcpu
*vcpu
)
502 return process_ucall(vcpu
);
505 static void run_guest_then_process_ucall_done(struct kvm_vcpu
*vcpu
)
508 process_ucall_done(vcpu
);
511 KVM_ONE_VCPU_TEST_SUITE(user_msr
);
513 KVM_ONE_VCPU_TEST(user_msr
, msr_filter_allow
, guest_code_filter_allow
)
515 struct kvm_vm
*vm
= vcpu
->vm
;
519 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
520 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
521 vm_enable_cap(vm
, KVM_CAP_X86_USER_SPACE_MSR
, KVM_MSR_EXIT_REASON_FILTER
);
523 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
524 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
526 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_allow
);
528 vm_install_exception_handler(vm
, GP_VECTOR
, guest_gp_handler
);
530 /* Process guest code userspace exits. */
531 run_guest_then_process_rdmsr(vcpu
, MSR_IA32_XSS
);
532 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_XSS
);
533 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_XSS
);
535 run_guest_then_process_rdmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
536 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
537 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
539 run_guest_then_process_wrmsr(vcpu
, MSR_NON_EXISTENT
);
540 run_guest_then_process_rdmsr(vcpu
, MSR_NON_EXISTENT
);
543 cmd
= process_ucall(vcpu
);
545 if (is_forced_emulation_enabled
) {
546 TEST_ASSERT_EQ(cmd
, UCALL_SYNC
);
547 vm_install_exception_handler(vm
, GP_VECTOR
, guest_fep_gp_handler
);
549 /* Process emulated rdmsr and wrmsr instructions. */
550 run_guest_then_process_rdmsr(vcpu
, MSR_IA32_XSS
);
551 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_XSS
);
552 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_XSS
);
554 run_guest_then_process_rdmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
555 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
556 run_guest_then_process_wrmsr(vcpu
, MSR_IA32_FLUSH_CMD
);
558 run_guest_then_process_wrmsr(vcpu
, MSR_NON_EXISTENT
);
559 run_guest_then_process_rdmsr(vcpu
, MSR_NON_EXISTENT
);
561 /* Confirm the guest completed without issues. */
562 run_guest_then_process_ucall_done(vcpu
);
564 TEST_ASSERT_EQ(cmd
, UCALL_DONE
);
565 printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
569 static int handle_ucall(struct kvm_vcpu
*vcpu
)
573 switch (get_ucall(vcpu
, &uc
)) {
575 REPORT_GUEST_ASSERT(uc
);
578 vm_ioctl(vcpu
->vm
, KVM_X86_SET_MSR_FILTER
, &no_filter_deny
);
583 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
589 static void handle_rdmsr(struct kvm_run
*run
)
591 run
->msr
.data
= run
->msr
.index
;
594 if (run
->msr
.index
== MSR_SYSCALL_MASK
||
595 run
->msr
.index
== MSR_GS_BASE
) {
596 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
,
597 "MSR read trap w/o access fault");
600 if (run
->msr
.index
== 0xdeadbeef) {
601 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_UNKNOWN
,
602 "MSR deadbeef read trap w/o inval fault");
606 static void handle_wrmsr(struct kvm_run
*run
)
611 if (run
->msr
.index
== MSR_IA32_POWER_CTL
) {
612 TEST_ASSERT(run
->msr
.data
== 0x1234,
613 "MSR data for MSR_IA32_POWER_CTL incorrect");
614 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_FILTER
,
615 "MSR_IA32_POWER_CTL trap w/o access fault");
618 if (run
->msr
.index
== 0xdeadbeef) {
619 TEST_ASSERT(run
->msr
.data
== 0x1234,
620 "MSR data for deadbeef incorrect");
621 TEST_ASSERT(run
->msr
.reason
== KVM_MSR_EXIT_REASON_UNKNOWN
,
622 "deadbeef trap w/o inval fault");
626 KVM_ONE_VCPU_TEST(user_msr
, msr_filter_deny
, guest_code_filter_deny
)
628 struct kvm_vm
*vm
= vcpu
->vm
;
629 struct kvm_run
*run
= vcpu
->run
;
632 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
633 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
634 vm_enable_cap(vm
, KVM_CAP_X86_USER_SPACE_MSR
, KVM_MSR_EXIT_REASON_INVAL
|
635 KVM_MSR_EXIT_REASON_UNKNOWN
|
636 KVM_MSR_EXIT_REASON_FILTER
);
638 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
639 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
642 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_deny
);
647 switch (run
->exit_reason
) {
648 case KVM_EXIT_X86_RDMSR
:
651 case KVM_EXIT_X86_WRMSR
:
655 if (handle_ucall(vcpu
))
663 TEST_ASSERT(msr_reads
== 4, "Handled 4 rdmsr in user space");
664 TEST_ASSERT(msr_writes
== 3, "Handled 3 wrmsr in user space");
667 KVM_ONE_VCPU_TEST(user_msr
, msr_permission_bitmap
, guest_code_permission_bitmap
)
669 struct kvm_vm
*vm
= vcpu
->vm
;
672 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
673 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
674 vm_enable_cap(vm
, KVM_CAP_X86_USER_SPACE_MSR
, KVM_MSR_EXIT_REASON_FILTER
);
676 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
677 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
679 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_fs
);
680 run_guest_then_process_rdmsr(vcpu
, MSR_FS_BASE
);
681 TEST_ASSERT(run_guest_then_process_ucall(vcpu
) == UCALL_SYNC
,
682 "Expected ucall state to be UCALL_SYNC.");
683 vm_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter_gs
);
684 run_guest_then_process_rdmsr(vcpu
, MSR_GS_BASE
);
685 run_guest_then_process_ucall_done(vcpu
);
688 #define test_user_exit_msr_ioctl(vm, cmd, arg, flag, valid_mask) \
690 int r = __vm_ioctl(vm, cmd, arg); \
692 if (flag & valid_mask) \
693 TEST_ASSERT(!r, __KVM_IOCTL_ERROR(#cmd, r)); \
695 TEST_ASSERT(r == -1 && errno == EINVAL, \
696 "Wanted EINVAL for %s with flag = 0x%llx, got rc: %i errno: %i (%s)", \
697 #cmd, flag, r, errno, strerror(errno)); \
700 static void run_user_space_msr_flag_test(struct kvm_vm
*vm
)
702 struct kvm_enable_cap cap
= { .cap
= KVM_CAP_X86_USER_SPACE_MSR
};
703 int nflags
= sizeof(cap
.args
[0]) * BITS_PER_BYTE
;
707 rc
= kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR
);
708 TEST_ASSERT(rc
, "KVM_CAP_X86_USER_SPACE_MSR is available");
710 for (i
= 0; i
< nflags
; i
++) {
711 cap
.args
[0] = BIT_ULL(i
);
712 test_user_exit_msr_ioctl(vm
, KVM_ENABLE_CAP
, &cap
,
713 BIT_ULL(i
), KVM_MSR_EXIT_REASON_VALID_MASK
);
717 static void run_msr_filter_flag_test(struct kvm_vm
*vm
)
720 struct kvm_msr_filter filter
= {
721 .flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
,
724 .flags
= KVM_MSR_FILTER_READ
,
727 .bitmap
= (uint8_t *)&deny_bits
,
735 rc
= kvm_check_cap(KVM_CAP_X86_MSR_FILTER
);
736 TEST_ASSERT(rc
, "KVM_CAP_X86_MSR_FILTER is available");
738 nflags
= sizeof(filter
.flags
) * BITS_PER_BYTE
;
739 for (i
= 0; i
< nflags
; i
++) {
740 filter
.flags
= BIT_ULL(i
);
741 test_user_exit_msr_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter
,
742 BIT_ULL(i
), KVM_MSR_FILTER_VALID_MASK
);
745 filter
.flags
= KVM_MSR_FILTER_DEFAULT_ALLOW
;
746 nflags
= sizeof(filter
.ranges
[0].flags
) * BITS_PER_BYTE
;
747 for (i
= 0; i
< nflags
; i
++) {
748 filter
.ranges
[0].flags
= BIT_ULL(i
);
749 test_user_exit_msr_ioctl(vm
, KVM_X86_SET_MSR_FILTER
, &filter
,
750 BIT_ULL(i
), KVM_MSR_FILTER_RANGE_VALID_MASK
);
754 /* Test that attempts to write to the unused bits in a flag fails. */
755 KVM_ONE_VCPU_TEST(user_msr
, user_exit_msr_flags
, NULL
)
757 struct kvm_vm
*vm
= vcpu
->vm
;
759 /* Test flags for KVM_CAP_X86_USER_SPACE_MSR. */
760 run_user_space_msr_flag_test(vm
);
762 /* Test flags and range flags for KVM_X86_SET_MSR_FILTER. */
763 run_msr_filter_flag_test(vm
);
766 int main(int argc
, char *argv
[])
768 return test_harness_run(argc
, argv
);