2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
17 #include <linux/highmem.h>
19 #include <linux/kvm_host.h>
24 static const char *vmx_msr_name
[] = {
25 "MSR_EFER", "MSR_STAR", "MSR_CSTAR",
26 "MSR_KERNEL_GS_BASE", "MSR_SYSCALL_MASK", "MSR_LSTAR"
29 #define NR_VMX_MSR (sizeof(vmx_msr_name) / sizeof(char*))
31 static unsigned long vmcs_readl(unsigned long field
)
35 asm volatile (ASM_VMX_VMREAD_RDX_RAX
36 : "=a"(value
) : "d"(field
) : "cc");
40 static u16
vmcs_read16(unsigned long field
)
42 return vmcs_readl(field
);
45 static u32
vmcs_read32(unsigned long field
)
47 return vmcs_readl(field
);
50 static u64
vmcs_read64(unsigned long field
)
53 return vmcs_readl(field
);
55 return vmcs_readl(field
) | ((u64
)vmcs_readl(field
+1) << 32);
59 void show_msrs(struct kvm_vcpu
*vcpu
)
63 for (i
= 0; i
< NR_VMX_MSR
; ++i
) {
64 vcpu_printf(vcpu
, "%s: %s=0x%llx\n",
67 vcpu
->guest_msrs
[i
].data
);
71 void show_code(struct kvm_vcpu
*vcpu
)
73 gva_t rip
= vmcs_readl(GUEST_RIP
);
75 char buf
[30 + 3 * sizeof code
];
78 if (!is_long_mode(vcpu
))
79 rip
+= vmcs_readl(GUEST_CS_BASE
);
81 kvm_read_guest(vcpu
, rip
, sizeof code
, code
);
82 for (i
= 0; i
< sizeof code
; ++i
)
83 sprintf(buf
+ i
* 3, " %02x", code
[i
]);
84 vcpu_printf(vcpu
, "code: %lx%s\n", rip
, buf
);
90 unsigned ist
: 3, zero0
: 5, type
: 5, dpl
: 2, p
: 1;
94 } __attribute__((packed
));
96 void show_irq(struct kvm_vcpu
*vcpu
, int irq
)
98 unsigned long idt_base
= vmcs_readl(GUEST_IDTR_BASE
);
99 unsigned long idt_limit
= vmcs_readl(GUEST_IDTR_LIMIT
);
100 struct gate_struct gate
;
102 if (!is_long_mode(vcpu
))
103 vcpu_printf(vcpu
, "%s: not in long mode\n", __FUNCTION__
);
105 if (!is_long_mode(vcpu
) || idt_limit
< irq
* sizeof(gate
)) {
106 vcpu_printf(vcpu
, "%s: 0x%x read_guest err\n",
112 if (kvm_read_guest(vcpu
, idt_base
+ irq
* sizeof(gate
), sizeof(gate
), &gate
) != sizeof(gate
)) {
113 vcpu_printf(vcpu
, "%s: 0x%x read_guest err\n",
118 vcpu_printf(vcpu
, "%s: 0x%x handler 0x%llx\n",
121 ((u64
)gate
.offset_high
<< 32) |
122 ((u64
)gate
.offset_middle
<< 16) |
126 void show_page(struct kvm_vcpu
*vcpu
,
129 u64
*buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
135 if (kvm_read_guest(vcpu
, addr
, PAGE_SIZE
, buf
)) {
137 for (i
= 0; i
< PAGE_SIZE
/ sizeof(u64
) ; i
++) {
138 u8
*ptr
= (u8
*)&buf
[i
];
140 vcpu_printf(vcpu
, " 0x%16.16lx:",
141 addr
+ i
* sizeof(u64
));
142 for (j
= 0; j
< sizeof(u64
) ; j
++)
143 vcpu_printf(vcpu
, " 0x%2.2x", ptr
[j
]);
144 vcpu_printf(vcpu
, "\n");
150 void show_u64(struct kvm_vcpu
*vcpu
, gva_t addr
)
154 if (kvm_read_guest(vcpu
, addr
, sizeof(u64
), &buf
) == sizeof(u64
)) {
157 vcpu_printf(vcpu
, " 0x%16.16lx:", addr
);
158 for (j
= 0; j
< sizeof(u64
) ; j
++)
159 vcpu_printf(vcpu
, " 0x%2.2x", ptr
[j
]);
160 vcpu_printf(vcpu
, "\n");
164 #define IA32_DEBUGCTL_RESERVED_BITS 0xfffffffffffffe3cULL
166 static int is_canonical(unsigned long addr
)
168 return addr
== ((long)addr
<< 16) >> 16;
171 int vm_entry_test_guest(struct kvm_vcpu
*vcpu
)
178 unsigned long sysenter_esp
;
179 unsigned long sysenter_eip
;
180 unsigned long rflags
;
185 #define RFLAGS_VM (1 << 17)
186 #define RFLAGS_RF (1 << 9)
189 #define VIR8086_SEG_BASE_TEST(seg)\
190 if (vmcs_readl(GUEST_##seg##_BASE) != \
191 (unsigned long)vmcs_read16(GUEST_##seg##_SELECTOR) << 4) {\
192 vcpu_printf(vcpu, "%s: "#seg" base 0x%lx in "\
193 "virtual8086 is not "#seg" selector 0x%x"\
194 " shifted right 4 bits\n",\
196 vmcs_readl(GUEST_##seg##_BASE),\
197 vmcs_read16(GUEST_##seg##_SELECTOR));\
201 #define VIR8086_SEG_LIMIT_TEST(seg)\
202 if (vmcs_readl(GUEST_##seg##_LIMIT) != 0x0ffff) { \
203 vcpu_printf(vcpu, "%s: "#seg" limit 0x%lx in "\
204 "virtual8086 is not 0xffff\n",\
206 vmcs_readl(GUEST_##seg##_LIMIT));\
210 #define VIR8086_SEG_AR_TEST(seg)\
211 if (vmcs_read32(GUEST_##seg##_AR_BYTES) != 0x0f3) { \
212 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x in "\
213 "virtual8086 is not 0xf3\n",\
215 vmcs_read32(GUEST_##seg##_AR_BYTES));\
220 cr0
= vmcs_readl(GUEST_CR0
);
222 if (!(cr0
& CR0_PG_MASK
)) {
223 vcpu_printf(vcpu
, "%s: cr0 0x%lx, PG is not set\n",
228 if (!(cr0
& CR0_PE_MASK
)) {
229 vcpu_printf(vcpu
, "%s: cr0 0x%lx, PE is not set\n",
234 if (!(cr0
& CR0_NE_MASK
)) {
235 vcpu_printf(vcpu
, "%s: cr0 0x%lx, NE is not set\n",
240 if (!(cr0
& CR0_WP_MASK
)) {
241 vcpu_printf(vcpu
, "%s: cr0 0x%lx, WP is not set\n",
245 cr4
= vmcs_readl(GUEST_CR4
);
247 if (!(cr4
& CR4_VMXE_MASK
)) {
248 vcpu_printf(vcpu
, "%s: cr4 0x%lx, VMXE is not set\n",
253 if (!(cr4
& CR4_PAE_MASK
)) {
254 vcpu_printf(vcpu
, "%s: cr4 0x%lx, PAE is not set\n",
258 ia32_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
260 if (ia32_debugctl
& IA32_DEBUGCTL_RESERVED_BITS
) {
261 vcpu_printf(vcpu
, "%s: ia32_debugctl 0x%llx, reserve bits\n",
262 __FUNCTION__
, ia32_debugctl
);
266 long_mode
= is_long_mode(vcpu
);
271 if ( long_mode
&& !(cr4
& CR4_PAE_MASK
)) {
272 vcpu_printf(vcpu
, "%s: long mode and not PAE\n",
277 cr3
= vmcs_readl(GUEST_CR3
);
279 if (cr3
& CR3_L_MODE_RESEVED_BITS
) {
280 vcpu_printf(vcpu
, "%s: cr3 0x%lx, reserved bits\n",
285 if ( !long_mode
&& (cr4
& CR4_PAE_MASK
)) {
286 /* check the 4 PDPTEs for reserved bits */
287 unsigned long pdpt_pfn
= cr3
>> PAGE_SHIFT
;
290 unsigned offset
= (cr3
& (PAGE_SIZE
-1)) >> 5;
291 u64
*pdpt
= kmap_atomic(pfn_to_page(pdpt_pfn
), KM_USER0
);
293 for (i
= 0; i
< 4; ++i
) {
294 pdpte
= pdpt
[offset
+ i
];
295 if ((pdpte
& 1) && (pdpte
& 0xfffffff0000001e6ull
))
299 kunmap_atomic(pdpt
, KM_USER0
);
302 vcpu_printf(vcpu
, "%s: pae cr3[%d] 0x%llx, reserved bits\n",
303 __FUNCTION__
, i
, pdpte
);
308 dr7
= vmcs_readl(GUEST_DR7
);
310 if (dr7
& ~((1ULL << 32) - 1)) {
311 vcpu_printf(vcpu
, "%s: dr7 0x%lx, reserved bits\n",
316 sysenter_esp
= vmcs_readl(GUEST_SYSENTER_ESP
);
318 if (!is_canonical(sysenter_esp
)) {
319 vcpu_printf(vcpu
, "%s: sysenter_esp 0x%lx, not canonical\n",
320 __FUNCTION__
, sysenter_esp
);
324 sysenter_eip
= vmcs_readl(GUEST_SYSENTER_EIP
);
326 if (!is_canonical(sysenter_eip
)) {
327 vcpu_printf(vcpu
, "%s: sysenter_eip 0x%lx, not canonical\n",
328 __FUNCTION__
, sysenter_eip
);
332 rflags
= vmcs_readl(GUEST_RFLAGS
);
333 virtual8086
= rflags
& RFLAGS_VM
;
336 if (vmcs_read16(GUEST_TR_SELECTOR
) & SELECTOR_TI_MASK
) {
337 vcpu_printf(vcpu
, "%s: tr selctor 0x%x, TI is set\n",
338 __FUNCTION__
, vmcs_read16(GUEST_TR_SELECTOR
));
342 if (!(vmcs_read32(GUEST_LDTR_AR_BYTES
) & AR_UNUSABLE_MASK
) &&
343 vmcs_read16(GUEST_LDTR_SELECTOR
) & SELECTOR_TI_MASK
) {
344 vcpu_printf(vcpu
, "%s: ldtr selctor 0x%x,"
345 " is usable and TI is set\n",
346 __FUNCTION__
, vmcs_read16(GUEST_LDTR_SELECTOR
));
351 (vmcs_read16(GUEST_SS_SELECTOR
) & SELECTOR_RPL_MASK
) !=
352 (vmcs_read16(GUEST_CS_SELECTOR
) & SELECTOR_RPL_MASK
)) {
353 vcpu_printf(vcpu
, "%s: ss selctor 0x%x cs selctor 0x%x,"
356 vmcs_read16(GUEST_SS_SELECTOR
),
357 vmcs_read16(GUEST_CS_SELECTOR
));
362 VIR8086_SEG_BASE_TEST(CS
);
363 VIR8086_SEG_BASE_TEST(SS
);
364 VIR8086_SEG_BASE_TEST(DS
);
365 VIR8086_SEG_BASE_TEST(ES
);
366 VIR8086_SEG_BASE_TEST(FS
);
367 VIR8086_SEG_BASE_TEST(GS
);
370 if (!is_canonical(vmcs_readl(GUEST_TR_BASE
)) ||
371 !is_canonical(vmcs_readl(GUEST_FS_BASE
)) ||
372 !is_canonical(vmcs_readl(GUEST_GS_BASE
)) ) {
373 vcpu_printf(vcpu
, "%s: TR 0x%lx FS 0x%lx or GS 0x%lx base"
374 " is not canonical\n",
376 vmcs_readl(GUEST_TR_BASE
),
377 vmcs_readl(GUEST_FS_BASE
),
378 vmcs_readl(GUEST_GS_BASE
));
383 if (!(vmcs_read32(GUEST_LDTR_AR_BYTES
) & AR_UNUSABLE_MASK
) &&
384 !is_canonical(vmcs_readl(GUEST_LDTR_BASE
))) {
385 vcpu_printf(vcpu
, "%s: LDTR base 0x%lx, usable and is not"
388 vmcs_readl(GUEST_LDTR_BASE
));
392 if ((vmcs_readl(GUEST_CS_BASE
) & ~((1ULL << 32) - 1))) {
393 vcpu_printf(vcpu
, "%s: CS base 0x%lx, not all bits 63-32"
396 vmcs_readl(GUEST_CS_BASE
));
400 #define SEG_BASE_TEST(seg)\
401 if ( !(vmcs_read32(GUEST_##seg##_AR_BYTES) & AR_UNUSABLE_MASK) &&\
402 (vmcs_readl(GUEST_##seg##_BASE) & ~((1ULL << 32) - 1))) {\
403 vcpu_printf(vcpu, "%s: "#seg" base 0x%lx, is usable and not"\
404 " all bits 63-32 are zero\n",\
406 vmcs_readl(GUEST_##seg##_BASE));\
414 VIR8086_SEG_LIMIT_TEST(CS
);
415 VIR8086_SEG_LIMIT_TEST(SS
);
416 VIR8086_SEG_LIMIT_TEST(DS
);
417 VIR8086_SEG_LIMIT_TEST(ES
);
418 VIR8086_SEG_LIMIT_TEST(FS
);
419 VIR8086_SEG_LIMIT_TEST(GS
);
423 VIR8086_SEG_AR_TEST(CS
);
424 VIR8086_SEG_AR_TEST(SS
);
425 VIR8086_SEG_AR_TEST(DS
);
426 VIR8086_SEG_AR_TEST(ES
);
427 VIR8086_SEG_AR_TEST(FS
);
428 VIR8086_SEG_AR_TEST(GS
);
431 u32 cs_ar
= vmcs_read32(GUEST_CS_AR_BYTES
);
432 u32 ss_ar
= vmcs_read32(GUEST_SS_AR_BYTES
);
433 u32 tr_ar
= vmcs_read32(GUEST_TR_AR_BYTES
);
434 u32 ldtr_ar
= vmcs_read32(GUEST_LDTR_AR_BYTES
);
436 #define SEG_G_TEST(seg) { \
437 u32 lim = vmcs_read32(GUEST_##seg##_LIMIT); \
438 u32 ar = vmcs_read32(GUEST_##seg##_AR_BYTES); \
440 if (((lim & ~PAGE_MASK) != ~PAGE_MASK) && (ar & AR_G_MASK)) \
442 if ((lim & ~((1u << 20) - 1)) && !(ar & AR_G_MASK)) \
445 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, G err. lim" \
454 if (!(cs_ar
& AR_TYPE_ACCESSES_MASK
)) {
455 vcpu_printf(vcpu
, "%s: cs AR 0x%x, accesses is clear\n",
461 if (!(cs_ar
& AR_TYPE_CODE_MASK
)) {
462 vcpu_printf(vcpu
, "%s: cs AR 0x%x, code is clear\n",
468 if (!(cs_ar
& AR_S_MASK
)) {
469 vcpu_printf(vcpu
, "%s: cs AR 0x%x, type is sys\n",
475 if ((cs_ar
& AR_TYPE_MASK
) >= 8 && (cs_ar
& AR_TYPE_MASK
) < 12 &&
477 (vmcs_read16(GUEST_CS_SELECTOR
) & SELECTOR_RPL_MASK
) ) {
478 vcpu_printf(vcpu
, "%s: cs AR 0x%x, "
479 "DPL(0x%x) not as RPL(0x%x)\n",
481 cs_ar
, AR_DPL(cs_ar
), vmcs_read16(GUEST_CS_SELECTOR
) & SELECTOR_RPL_MASK
);
485 if ((cs_ar
& AR_TYPE_MASK
) >= 13 && (cs_ar
& AR_TYPE_MASK
) < 16 &&
487 (vmcs_read16(GUEST_CS_SELECTOR
) & SELECTOR_RPL_MASK
) ) {
488 vcpu_printf(vcpu
, "%s: cs AR 0x%x, "
489 "DPL greater than RPL\n",
495 if (!(cs_ar
& AR_P_MASK
)) {
496 vcpu_printf(vcpu
, "%s: CS AR 0x%x, not "
503 if ((cs_ar
& AR_RESERVD_MASK
)) {
504 vcpu_printf(vcpu
, "%s: CS AR 0x%x, reseved"
511 if (long_mode
& (cs_ar
& AR_L_MASK
) && (cs_ar
& AR_DB_MASK
)) {
512 vcpu_printf(vcpu
, "%s: CS AR 0x%x, DB and L are set"
522 if (!(ss_ar
& AR_UNUSABLE_MASK
)) {
523 if ((ss_ar
& AR_TYPE_MASK
) != 3 &&
524 (ss_ar
& AR_TYPE_MASK
) != 7 ) {
525 vcpu_printf(vcpu
, "%s: ss AR 0x%x, usable and type"
532 if (!(ss_ar
& AR_S_MASK
)) {
533 vcpu_printf(vcpu
, "%s: ss AR 0x%x, usable and"
539 if (!(ss_ar
& AR_P_MASK
)) {
540 vcpu_printf(vcpu
, "%s: SS AR 0x%x, usable"
541 " and not present\n",
547 if ((ss_ar
& AR_RESERVD_MASK
)) {
548 vcpu_printf(vcpu
, "%s: SS AR 0x%x, reseved"
560 (vmcs_read16(GUEST_SS_SELECTOR
) & SELECTOR_RPL_MASK
) ) {
561 vcpu_printf(vcpu
, "%s: SS AR 0x%x, "
568 #define SEG_AR_TEST(seg) {\
569 u32 ar = vmcs_read32(GUEST_##seg##_AR_BYTES);\
570 if (!(ar & AR_UNUSABLE_MASK)) {\
571 if (!(ar & AR_TYPE_ACCESSES_MASK)) {\
572 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
573 "usable and not accesses\n",\
578 if ((ar & AR_TYPE_CODE_MASK) &&\
579 !(ar & AR_TYPE_READABLE_MASK)) {\
580 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
581 "code and not readable\n",\
586 if (!(ar & AR_S_MASK)) {\
587 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, usable and"\
593 if ((ar & AR_TYPE_MASK) >= 0 && \
594 (ar & AR_TYPE_MASK) < 12 && \
595 AR_DPL(ar) < (vmcs_read16(GUEST_##seg##_SELECTOR) & \
596 SELECTOR_RPL_MASK) ) {\
597 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
598 "DPL less than RPL\n",\
603 if (!(ar & AR_P_MASK)) {\
604 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, usable and"\
610 if ((ar & AR_RESERVD_MASK)) {\
611 vcpu_printf(vcpu, "%s: "#seg" AR"\
634 if ((tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
635 vcpu_printf(vcpu
, "%s: TR AR 0x%x, long"
636 " mode and not 64bit busy"
643 if ((tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_32_TSS
&&
644 (tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_16_TSS
) {
645 vcpu_printf(vcpu
, "%s: TR AR 0x%x, legacy"
646 " mode and not 16/32bit "
654 if ((tr_ar
& AR_S_MASK
)) {
655 vcpu_printf(vcpu
, "%s: TR AR 0x%x, S is set\n",
660 if (!(tr_ar
& AR_P_MASK
)) {
661 vcpu_printf(vcpu
, "%s: TR AR 0x%x, P is not set\n",
667 if ((tr_ar
& (AR_RESERVD_MASK
| AR_UNUSABLE_MASK
))) {
668 vcpu_printf(vcpu
, "%s: TR AR 0x%x, reserved bit are"
677 if (!(ldtr_ar
& AR_UNUSABLE_MASK
)) {
679 if ((ldtr_ar
& AR_TYPE_MASK
) != AR_TYPE_LDT
) {
680 vcpu_printf(vcpu
, "%s: LDTR AR 0x%x,"
687 if ((ldtr_ar
& AR_S_MASK
)) {
688 vcpu_printf(vcpu
, "%s: LDTR AR 0x%x,"
695 if (!(ldtr_ar
& AR_P_MASK
)) {
696 vcpu_printf(vcpu
, "%s: LDTR AR 0x%x,"
702 if ((ldtr_ar
& AR_RESERVD_MASK
)) {
703 vcpu_printf(vcpu
, "%s: LDTR AR 0x%x,"
704 " reserved bit are set\n",
716 #define IDT_GDT_TEST(reg)\
717 if (!is_canonical(vmcs_readl(GUEST_##reg##_BASE))) {\
718 vcpu_printf(vcpu, "%s: "#reg" BASE 0x%lx, not canonical\n",\
720 vmcs_readl(GUEST_##reg##_BASE));\
723 if (vmcs_read32(GUEST_##reg##_LIMIT) >> 16) {\
724 vcpu_printf(vcpu, "%s: "#reg" LIMIT 0x%x, size err\n",\
726 vmcs_read32(GUEST_##reg##_LIMIT));\
736 if ((!long_mode
|| !(vmcs_read32(GUEST_CS_AR_BYTES
) & AR_L_MASK
)) &&
737 vmcs_readl(GUEST_RIP
) & ~((1ULL << 32) - 1) ){
738 vcpu_printf(vcpu
, "%s: RIP 0x%lx, size err\n",
740 vmcs_readl(GUEST_RIP
));
744 if (!is_canonical(vmcs_readl(GUEST_RIP
))) {
745 vcpu_printf(vcpu
, "%s: RIP 0x%lx, not canonical\n",
747 vmcs_readl(GUEST_RIP
));
752 #define RFLAGS_RESEVED_CLEAR_BITS\
753 (~((1ULL << 22) - 1) | (1ULL << 15) | (1ULL << 5) | (1ULL << 3))
754 #define RFLAGS_RESEVED_SET_BITS (1 << 1)
756 if ((rflags
& RFLAGS_RESEVED_CLEAR_BITS
) ||
757 !(rflags
& RFLAGS_RESEVED_SET_BITS
)) {
758 vcpu_printf(vcpu
, "%s: RFLAGS 0x%lx, reserved bits 0x%llx 0x%x\n",
761 RFLAGS_RESEVED_CLEAR_BITS
,
762 RFLAGS_RESEVED_SET_BITS
);
766 if (long_mode
&& virtual8086
) {
767 vcpu_printf(vcpu
, "%s: RFLAGS 0x%lx, vm and long mode\n",
774 if (!(rflags
& RFLAGS_RF
)) {
775 u32 vm_entry_info
= vmcs_read32(VM_ENTRY_INTR_INFO_FIELD
);
776 if ((vm_entry_info
& INTR_INFO_VALID_MASK
) &&
777 (vm_entry_info
& INTR_INFO_INTR_TYPE_MASK
) ==
778 INTR_TYPE_EXT_INTR
) {
779 vcpu_printf(vcpu
, "%s: RFLAGS 0x%lx, external"
780 " interrupt and RF is clear\n",
788 // to be continued from Checks on Guest Non-Register State (22.3.1.5)
792 static int check_fixed_bits(struct kvm_vcpu
*vcpu
, const char *reg
,
794 u32 msr_fixed_0
, u32 msr_fixed_1
)
796 u64 fixed_bits_0
, fixed_bits_1
;
798 rdmsrl(msr_fixed_0
, fixed_bits_0
);
799 rdmsrl(msr_fixed_1
, fixed_bits_1
);
800 if ((cr
& fixed_bits_0
) != fixed_bits_0
) {
801 vcpu_printf(vcpu
, "%s: %s (%lx) has one of %llx unset\n",
802 __FUNCTION__
, reg
, cr
, fixed_bits_0
);
805 if ((~cr
& ~fixed_bits_1
) != ~fixed_bits_1
) {
806 vcpu_printf(vcpu
, "%s: %s (%lx) has one of %llx set\n",
807 __FUNCTION__
, reg
, cr
, ~fixed_bits_1
);
813 static int phys_addr_width(void)
815 unsigned eax
, ebx
, ecx
, edx
;
817 cpuid(0x80000008, &eax
, &ebx
, &ecx
, &edx
);
821 static int check_canonical(struct kvm_vcpu
*vcpu
, const char *name
,
827 if (sizeof(reg
) == 4)
830 if (!(x
== 0 || x
== ~0UL)) {
831 vcpu_printf(vcpu
, "%s: %s (%lx) not canonical\n",
832 __FUNCTION__
, name
, reg
);
839 static int check_selector(struct kvm_vcpu
*vcpu
, const char *name
,
840 int rpl_ti
, int null
,
843 if (rpl_ti
&& (sel
& 7)) {
844 vcpu_printf(vcpu
, "%s: %s (%x) nonzero rpl or ti\n",
845 __FUNCTION__
, name
, sel
);
849 vcpu_printf(vcpu
, "%s: %s (%x) zero\n",
850 __FUNCTION__
, name
, sel
);
856 #define MSR_IA32_VMX_CR0_FIXED0 0x486
857 #define MSR_IA32_VMX_CR0_FIXED1 0x487
859 #define MSR_IA32_VMX_CR4_FIXED0 0x488
860 #define MSR_IA32_VMX_CR4_FIXED1 0x489
862 int vm_entry_test_host(struct kvm_vcpu
*vcpu
)
865 unsigned long cr0
= vmcs_readl(HOST_CR0
);
866 unsigned long cr4
= vmcs_readl(HOST_CR4
);
867 unsigned long cr3
= vmcs_readl(HOST_CR3
);
870 host_64
= vmcs_read32(VM_EXIT_CONTROLS
) & VM_EXIT_HOST_ADD_SPACE_SIZE
;
873 r
&= check_fixed_bits(vcpu
, "host cr0", cr0
, MSR_IA32_VMX_CR0_FIXED0
,
874 MSR_IA32_VMX_CR0_FIXED1
);
876 r
&= check_fixed_bits(vcpu
, "host cr0", cr4
, MSR_IA32_VMX_CR4_FIXED0
,
877 MSR_IA32_VMX_CR4_FIXED1
);
878 if ((u64
)cr3
>> phys_addr_width()) {
879 vcpu_printf(vcpu
, "%s: cr3 (%lx) vs phys addr width\n",
884 r
&= check_canonical(vcpu
, "host ia32_sysenter_eip",
885 vmcs_readl(HOST_IA32_SYSENTER_EIP
));
886 r
&= check_canonical(vcpu
, "host ia32_sysenter_esp",
887 vmcs_readl(HOST_IA32_SYSENTER_ESP
));
890 r
&= check_selector(vcpu
, "host cs", 1, 1,
891 vmcs_read16(HOST_CS_SELECTOR
));
892 r
&= check_selector(vcpu
, "host ss", 1, !host_64
,
893 vmcs_read16(HOST_SS_SELECTOR
));
894 r
&= check_selector(vcpu
, "host ds", 1, 0,
895 vmcs_read16(HOST_DS_SELECTOR
));
896 r
&= check_selector(vcpu
, "host es", 1, 0,
897 vmcs_read16(HOST_ES_SELECTOR
));
898 r
&= check_selector(vcpu
, "host fs", 1, 0,
899 vmcs_read16(HOST_FS_SELECTOR
));
900 r
&= check_selector(vcpu
, "host gs", 1, 0,
901 vmcs_read16(HOST_GS_SELECTOR
));
902 r
&= check_selector(vcpu
, "host tr", 1, 1,
903 vmcs_read16(HOST_TR_SELECTOR
));
906 r
&= check_canonical(vcpu
, "host fs base",
907 vmcs_readl(HOST_FS_BASE
));
908 r
&= check_canonical(vcpu
, "host gs base",
909 vmcs_readl(HOST_GS_BASE
));
910 r
&= check_canonical(vcpu
, "host gdtr base",
911 vmcs_readl(HOST_GDTR_BASE
));
912 r
&= check_canonical(vcpu
, "host idtr base",
913 vmcs_readl(HOST_IDTR_BASE
));
919 vcpu_printf(vcpu
, "%s: vm exit controls: !64 bit host\n",
923 if (!(cr4
& CR4_PAE_MASK
)) {
924 vcpu_printf(vcpu
, "%s: cr4 (%lx): !pae\n",
928 r
&= check_canonical(vcpu
, "host rip", vmcs_readl(HOST_RIP
));
934 int vm_entry_test(struct kvm_vcpu
*vcpu
)
938 rg
= vm_entry_test_guest(vcpu
);
939 rh
= vm_entry_test_host(vcpu
);
943 void vmcs_dump(struct kvm_vcpu
*vcpu
)
945 vcpu_printf(vcpu
, "************************ vmcs_dump ************************\n");
946 vcpu_printf(vcpu
, "VM_ENTRY_CONTROLS 0x%x\n", vmcs_read32(VM_ENTRY_CONTROLS
));
948 vcpu_printf(vcpu
, "GUEST_CR0 0x%lx\n", vmcs_readl(GUEST_CR0
));
949 vcpu_printf(vcpu
, "GUEST_CR3 0x%lx\n", vmcs_readl(GUEST_CR3
));
950 vcpu_printf(vcpu
, "GUEST_CR4 0x%lx\n", vmcs_readl(GUEST_CR4
));
952 vcpu_printf(vcpu
, "GUEST_SYSENTER_ESP 0x%lx\n", vmcs_readl(GUEST_SYSENTER_ESP
));
953 vcpu_printf(vcpu
, "GUEST_SYSENTER_EIP 0x%lx\n", vmcs_readl(GUEST_SYSENTER_EIP
));
956 vcpu_printf(vcpu
, "GUEST_IA32_DEBUGCTL 0x%llx\n", vmcs_read64(GUEST_IA32_DEBUGCTL
));
957 vcpu_printf(vcpu
, "GUEST_DR7 0x%lx\n", vmcs_readl(GUEST_DR7
));
959 vcpu_printf(vcpu
, "GUEST_RFLAGS 0x%lx\n", vmcs_readl(GUEST_RFLAGS
));
960 vcpu_printf(vcpu
, "GUEST_RIP 0x%lx\n", vmcs_readl(GUEST_RIP
));
962 vcpu_printf(vcpu
, "GUEST_CS_SELECTOR 0x%x\n", vmcs_read16(GUEST_CS_SELECTOR
));
963 vcpu_printf(vcpu
, "GUEST_DS_SELECTOR 0x%x\n", vmcs_read16(GUEST_DS_SELECTOR
));
964 vcpu_printf(vcpu
, "GUEST_ES_SELECTOR 0x%x\n", vmcs_read16(GUEST_ES_SELECTOR
));
965 vcpu_printf(vcpu
, "GUEST_FS_SELECTOR 0x%x\n", vmcs_read16(GUEST_FS_SELECTOR
));
966 vcpu_printf(vcpu
, "GUEST_GS_SELECTOR 0x%x\n", vmcs_read16(GUEST_GS_SELECTOR
));
967 vcpu_printf(vcpu
, "GUEST_SS_SELECTOR 0x%x\n", vmcs_read16(GUEST_SS_SELECTOR
));
969 vcpu_printf(vcpu
, "GUEST_TR_SELECTOR 0x%x\n", vmcs_read16(GUEST_TR_SELECTOR
));
970 vcpu_printf(vcpu
, "GUEST_LDTR_SELECTOR 0x%x\n", vmcs_read16(GUEST_LDTR_SELECTOR
));
972 vcpu_printf(vcpu
, "GUEST_CS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_CS_AR_BYTES
));
973 vcpu_printf(vcpu
, "GUEST_DS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_DS_AR_BYTES
));
974 vcpu_printf(vcpu
, "GUEST_ES_AR_BYTES 0x%x\n", vmcs_read32(GUEST_ES_AR_BYTES
));
975 vcpu_printf(vcpu
, "GUEST_FS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_FS_AR_BYTES
));
976 vcpu_printf(vcpu
, "GUEST_GS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_GS_AR_BYTES
));
977 vcpu_printf(vcpu
, "GUEST_SS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_SS_AR_BYTES
));
979 vcpu_printf(vcpu
, "GUEST_LDTR_AR_BYTES 0x%x\n", vmcs_read32(GUEST_LDTR_AR_BYTES
));
980 vcpu_printf(vcpu
, "GUEST_TR_AR_BYTES 0x%x\n", vmcs_read32(GUEST_TR_AR_BYTES
));
982 vcpu_printf(vcpu
, "GUEST_CS_BASE 0x%lx\n", vmcs_readl(GUEST_CS_BASE
));
983 vcpu_printf(vcpu
, "GUEST_DS_BASE 0x%lx\n", vmcs_readl(GUEST_DS_BASE
));
984 vcpu_printf(vcpu
, "GUEST_ES_BASE 0x%lx\n", vmcs_readl(GUEST_ES_BASE
));
985 vcpu_printf(vcpu
, "GUEST_FS_BASE 0x%lx\n", vmcs_readl(GUEST_FS_BASE
));
986 vcpu_printf(vcpu
, "GUEST_GS_BASE 0x%lx\n", vmcs_readl(GUEST_GS_BASE
));
987 vcpu_printf(vcpu
, "GUEST_SS_BASE 0x%lx\n", vmcs_readl(GUEST_SS_BASE
));
990 vcpu_printf(vcpu
, "GUEST_LDTR_BASE 0x%lx\n", vmcs_readl(GUEST_LDTR_BASE
));
991 vcpu_printf(vcpu
, "GUEST_TR_BASE 0x%lx\n", vmcs_readl(GUEST_TR_BASE
));
993 vcpu_printf(vcpu
, "GUEST_CS_LIMIT 0x%x\n", vmcs_read32(GUEST_CS_LIMIT
));
994 vcpu_printf(vcpu
, "GUEST_DS_LIMIT 0x%x\n", vmcs_read32(GUEST_DS_LIMIT
));
995 vcpu_printf(vcpu
, "GUEST_ES_LIMIT 0x%x\n", vmcs_read32(GUEST_ES_LIMIT
));
996 vcpu_printf(vcpu
, "GUEST_FS_LIMIT 0x%x\n", vmcs_read32(GUEST_FS_LIMIT
));
997 vcpu_printf(vcpu
, "GUEST_GS_LIMIT 0x%x\n", vmcs_read32(GUEST_GS_LIMIT
));
998 vcpu_printf(vcpu
, "GUEST_SS_LIMIT 0x%x\n", vmcs_read32(GUEST_SS_LIMIT
));
1000 vcpu_printf(vcpu
, "GUEST_LDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_LDTR_LIMIT
));
1001 vcpu_printf(vcpu
, "GUEST_TR_LIMIT 0x%x\n", vmcs_read32(GUEST_TR_LIMIT
));
1003 vcpu_printf(vcpu
, "GUEST_GDTR_BASE 0x%lx\n", vmcs_readl(GUEST_GDTR_BASE
));
1004 vcpu_printf(vcpu
, "GUEST_IDTR_BASE 0x%lx\n", vmcs_readl(GUEST_IDTR_BASE
));
1006 vcpu_printf(vcpu
, "GUEST_GDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_GDTR_LIMIT
));
1007 vcpu_printf(vcpu
, "GUEST_IDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_IDTR_LIMIT
));
1009 vcpu_printf(vcpu
, "EXCEPTION_BITMAP 0x%x\n", vmcs_read32(EXCEPTION_BITMAP
));
1010 vcpu_printf(vcpu
, "***********************************************************\n");
1013 void regs_dump(struct kvm_vcpu
*vcpu
)
1015 #define REG_DUMP(reg) \
1016 vcpu_printf(vcpu, #reg" = 0x%lx(VCPU)\n", vcpu->regs[VCPU_REGS_##reg])
1017 #define VMCS_REG_DUMP(reg) \
1018 vcpu_printf(vcpu, #reg" = 0x%lx(VMCS)\n", vmcs_readl(GUEST_##reg))
1020 vcpu_printf(vcpu
, "************************ regs_dump ************************\n");
1040 VMCS_REG_DUMP(RFLAGS
);
1042 vcpu_printf(vcpu
, "***********************************************************\n");
1045 void sregs_dump(struct kvm_vcpu
*vcpu
)
1047 vcpu_printf(vcpu
, "************************ sregs_dump ************************\n");
1048 vcpu_printf(vcpu
, "cr0 = 0x%lx\n", vcpu
->cr0
);
1049 vcpu_printf(vcpu
, "cr2 = 0x%lx\n", vcpu
->cr2
);
1050 vcpu_printf(vcpu
, "cr3 = 0x%lx\n", vcpu
->cr3
);
1051 vcpu_printf(vcpu
, "cr4 = 0x%lx\n", vcpu
->cr4
);
1052 vcpu_printf(vcpu
, "cr8 = 0x%lx\n", vcpu
->cr8
);
1053 vcpu_printf(vcpu
, "shadow_efer = 0x%llx\n", vcpu
->shadow_efer
);
1054 vcpu_printf(vcpu
, "***********************************************************\n");
1057 void show_pending_interrupts(struct kvm_vcpu
*vcpu
)
1060 vcpu_printf(vcpu
, "************************ pending interrupts ****************\n");
1061 vcpu_printf(vcpu
, "sumamry = 0x%lx\n", vcpu
->irq_summary
);
1062 for (i
=0 ; i
< NR_IRQ_WORDS
; i
++)
1063 vcpu_printf(vcpu
, "%lx ", vcpu
->irq_pending
[i
]);
1064 vcpu_printf(vcpu
, "\n");
1065 vcpu_printf(vcpu
, "************************************************************\n");
1068 void vcpu_dump(struct kvm_vcpu
*vcpu
)
1074 show_pending_interrupts(vcpu
);