kvm: external module: compatibility for pagefault_{enable,disable}()
[kvm-userspace.git] / kernel / vmx-debug.c
blob29316a0e54612cdbda48c50aeba55174dc592aca
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Debug support
9 * Copyright (C) 2006 Qumranet, Inc.
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
17 #include <linux/highmem.h>
19 #include <linux/kvm_host.h>
20 #include "debug.h"
22 #ifdef KVM_DEBUG
24 static const char *vmx_msr_name[] = {
25 "MSR_EFER", "MSR_STAR", "MSR_CSTAR",
26 "MSR_KERNEL_GS_BASE", "MSR_SYSCALL_MASK", "MSR_LSTAR"
29 #define NR_VMX_MSR (sizeof(vmx_msr_name) / sizeof(char*))
31 static unsigned long vmcs_readl(unsigned long field)
33 unsigned long value;
35 asm volatile (ASM_VMX_VMREAD_RDX_RAX
36 : "=a"(value) : "d"(field) : "cc");
37 return value;
40 static u16 vmcs_read16(unsigned long field)
42 return vmcs_readl(field);
45 static u32 vmcs_read32(unsigned long field)
47 return vmcs_readl(field);
50 static u64 vmcs_read64(unsigned long field)
52 #ifdef CONFIG_X86_64
53 return vmcs_readl(field);
54 #else
55 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
56 #endif
59 void show_msrs(struct kvm_vcpu *vcpu)
61 int i;
63 for (i = 0; i < NR_VMX_MSR; ++i) {
64 vcpu_printf(vcpu, "%s: %s=0x%llx\n",
65 __FUNCTION__,
66 vmx_msr_name[i],
67 vcpu->guest_msrs[i].data);
71 void show_code(struct kvm_vcpu *vcpu)
73 gva_t rip = vmcs_readl(GUEST_RIP);
74 u8 code[50];
75 char buf[30 + 3 * sizeof code];
76 int i;
78 if (!is_long_mode(vcpu))
79 rip += vmcs_readl(GUEST_CS_BASE);
81 kvm_read_guest(vcpu, rip, sizeof code, code);
82 for (i = 0; i < sizeof code; ++i)
83 sprintf(buf + i * 3, " %02x", code[i]);
84 vcpu_printf(vcpu, "code: %lx%s\n", rip, buf);
87 struct gate_struct {
88 u16 offset_low;
89 u16 segment;
90 unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
91 u16 offset_middle;
92 u32 offset_high;
93 u32 zero1;
94 } __attribute__((packed));
96 void show_irq(struct kvm_vcpu *vcpu, int irq)
98 unsigned long idt_base = vmcs_readl(GUEST_IDTR_BASE);
99 unsigned long idt_limit = vmcs_readl(GUEST_IDTR_LIMIT);
100 struct gate_struct gate;
102 if (!is_long_mode(vcpu))
103 vcpu_printf(vcpu, "%s: not in long mode\n", __FUNCTION__);
105 if (!is_long_mode(vcpu) || idt_limit < irq * sizeof(gate)) {
106 vcpu_printf(vcpu, "%s: 0x%x read_guest err\n",
107 __FUNCTION__,
108 irq);
109 return;
112 if (kvm_read_guest(vcpu, idt_base + irq * sizeof(gate), sizeof(gate), &gate) != sizeof(gate)) {
113 vcpu_printf(vcpu, "%s: 0x%x read_guest err\n",
114 __FUNCTION__,
115 irq);
116 return;
118 vcpu_printf(vcpu, "%s: 0x%x handler 0x%llx\n",
119 __FUNCTION__,
120 irq,
121 ((u64)gate.offset_high << 32) |
122 ((u64)gate.offset_middle << 16) |
123 gate.offset_low);
126 void show_page(struct kvm_vcpu *vcpu,
127 gva_t addr)
129 u64 *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
131 if (!buf)
132 return;
134 addr &= PAGE_MASK;
135 if (kvm_read_guest(vcpu, addr, PAGE_SIZE, buf)) {
136 int i;
137 for (i = 0; i < PAGE_SIZE / sizeof(u64) ; i++) {
138 u8 *ptr = (u8*)&buf[i];
139 int j;
140 vcpu_printf(vcpu, " 0x%16.16lx:",
141 addr + i * sizeof(u64));
142 for (j = 0; j < sizeof(u64) ; j++)
143 vcpu_printf(vcpu, " 0x%2.2x", ptr[j]);
144 vcpu_printf(vcpu, "\n");
147 kfree(buf);
150 void show_u64(struct kvm_vcpu *vcpu, gva_t addr)
152 u64 buf;
154 if (kvm_read_guest(vcpu, addr, sizeof(u64), &buf) == sizeof(u64)) {
155 u8 *ptr = (u8*)&buf;
156 int j;
157 vcpu_printf(vcpu, " 0x%16.16lx:", addr);
158 for (j = 0; j < sizeof(u64) ; j++)
159 vcpu_printf(vcpu, " 0x%2.2x", ptr[j]);
160 vcpu_printf(vcpu, "\n");
164 #define IA32_DEBUGCTL_RESERVED_BITS 0xfffffffffffffe3cULL
166 static int is_canonical(unsigned long addr)
168 return addr == ((long)addr << 16) >> 16;
171 int vm_entry_test_guest(struct kvm_vcpu *vcpu)
173 unsigned long cr0;
174 unsigned long cr4;
175 unsigned long cr3;
176 unsigned long dr7;
177 u64 ia32_debugctl;
178 unsigned long sysenter_esp;
179 unsigned long sysenter_eip;
180 unsigned long rflags;
182 int long_mode;
183 int virtual8086;
185 #define RFLAGS_VM (1 << 17)
186 #define RFLAGS_RF (1 << 9)
189 #define VIR8086_SEG_BASE_TEST(seg)\
190 if (vmcs_readl(GUEST_##seg##_BASE) != \
191 (unsigned long)vmcs_read16(GUEST_##seg##_SELECTOR) << 4) {\
192 vcpu_printf(vcpu, "%s: "#seg" base 0x%lx in "\
193 "virtual8086 is not "#seg" selector 0x%x"\
194 " shifted right 4 bits\n",\
195 __FUNCTION__,\
196 vmcs_readl(GUEST_##seg##_BASE),\
197 vmcs_read16(GUEST_##seg##_SELECTOR));\
198 return 0;\
201 #define VIR8086_SEG_LIMIT_TEST(seg)\
202 if (vmcs_readl(GUEST_##seg##_LIMIT) != 0x0ffff) { \
203 vcpu_printf(vcpu, "%s: "#seg" limit 0x%lx in "\
204 "virtual8086 is not 0xffff\n",\
205 __FUNCTION__,\
206 vmcs_readl(GUEST_##seg##_LIMIT));\
207 return 0;\
210 #define VIR8086_SEG_AR_TEST(seg)\
211 if (vmcs_read32(GUEST_##seg##_AR_BYTES) != 0x0f3) { \
212 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x in "\
213 "virtual8086 is not 0xf3\n",\
214 __FUNCTION__,\
215 vmcs_read32(GUEST_##seg##_AR_BYTES));\
216 return 0;\
220 cr0 = vmcs_readl(GUEST_CR0);
222 if (!(cr0 & CR0_PG_MASK)) {
223 vcpu_printf(vcpu, "%s: cr0 0x%lx, PG is not set\n",
224 __FUNCTION__, cr0);
225 return 0;
228 if (!(cr0 & CR0_PE_MASK)) {
229 vcpu_printf(vcpu, "%s: cr0 0x%lx, PE is not set\n",
230 __FUNCTION__, cr0);
231 return 0;
234 if (!(cr0 & CR0_NE_MASK)) {
235 vcpu_printf(vcpu, "%s: cr0 0x%lx, NE is not set\n",
236 __FUNCTION__, cr0);
237 return 0;
240 if (!(cr0 & CR0_WP_MASK)) {
241 vcpu_printf(vcpu, "%s: cr0 0x%lx, WP is not set\n",
242 __FUNCTION__, cr0);
245 cr4 = vmcs_readl(GUEST_CR4);
247 if (!(cr4 & CR4_VMXE_MASK)) {
248 vcpu_printf(vcpu, "%s: cr4 0x%lx, VMXE is not set\n",
249 __FUNCTION__, cr4);
250 return 0;
253 if (!(cr4 & CR4_PAE_MASK)) {
254 vcpu_printf(vcpu, "%s: cr4 0x%lx, PAE is not set\n",
255 __FUNCTION__, cr4);
258 ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
260 if (ia32_debugctl & IA32_DEBUGCTL_RESERVED_BITS ) {
261 vcpu_printf(vcpu, "%s: ia32_debugctl 0x%llx, reserve bits\n",
262 __FUNCTION__, ia32_debugctl);
263 return 0;
266 long_mode = is_long_mode(vcpu);
268 if (long_mode) {
271 if ( long_mode && !(cr4 & CR4_PAE_MASK)) {
272 vcpu_printf(vcpu, "%s: long mode and not PAE\n",
273 __FUNCTION__);
274 return 0;
277 cr3 = vmcs_readl(GUEST_CR3);
279 if (cr3 & CR3_L_MODE_RESEVED_BITS) {
280 vcpu_printf(vcpu, "%s: cr3 0x%lx, reserved bits\n",
281 __FUNCTION__, cr3);
282 return 0;
285 if ( !long_mode && (cr4 & CR4_PAE_MASK)) {
286 /* check the 4 PDPTEs for reserved bits */
287 unsigned long pdpt_pfn = cr3 >> PAGE_SHIFT;
288 int i;
289 u64 pdpte;
290 unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5;
291 u64 *pdpt = kmap_atomic(pfn_to_page(pdpt_pfn), KM_USER0);
293 for (i = 0; i < 4; ++i) {
294 pdpte = pdpt[offset + i];
295 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull))
296 break;
299 kunmap_atomic(pdpt, KM_USER0);
301 if (i != 4) {
302 vcpu_printf(vcpu, "%s: pae cr3[%d] 0x%llx, reserved bits\n",
303 __FUNCTION__, i, pdpte);
304 return 0;
308 dr7 = vmcs_readl(GUEST_DR7);
310 if (dr7 & ~((1ULL << 32) - 1)) {
311 vcpu_printf(vcpu, "%s: dr7 0x%lx, reserved bits\n",
312 __FUNCTION__, dr7);
313 return 0;
316 sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
318 if (!is_canonical(sysenter_esp)) {
319 vcpu_printf(vcpu, "%s: sysenter_esp 0x%lx, not canonical\n",
320 __FUNCTION__, sysenter_esp);
321 return 0;
324 sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
326 if (!is_canonical(sysenter_eip)) {
327 vcpu_printf(vcpu, "%s: sysenter_eip 0x%lx, not canonical\n",
328 __FUNCTION__, sysenter_eip);
329 return 0;
332 rflags = vmcs_readl(GUEST_RFLAGS);
333 virtual8086 = rflags & RFLAGS_VM;
336 if (vmcs_read16(GUEST_TR_SELECTOR) & SELECTOR_TI_MASK) {
337 vcpu_printf(vcpu, "%s: tr selctor 0x%x, TI is set\n",
338 __FUNCTION__, vmcs_read16(GUEST_TR_SELECTOR));
339 return 0;
342 if (!(vmcs_read32(GUEST_LDTR_AR_BYTES) & AR_UNUSABLE_MASK) &&
343 vmcs_read16(GUEST_LDTR_SELECTOR) & SELECTOR_TI_MASK) {
344 vcpu_printf(vcpu, "%s: ldtr selctor 0x%x,"
345 " is usable and TI is set\n",
346 __FUNCTION__, vmcs_read16(GUEST_LDTR_SELECTOR));
347 return 0;
350 if (!virtual8086 &&
351 (vmcs_read16(GUEST_SS_SELECTOR) & SELECTOR_RPL_MASK) !=
352 (vmcs_read16(GUEST_CS_SELECTOR) & SELECTOR_RPL_MASK)) {
353 vcpu_printf(vcpu, "%s: ss selctor 0x%x cs selctor 0x%x,"
354 " not same RPL\n",
355 __FUNCTION__,
356 vmcs_read16(GUEST_SS_SELECTOR),
357 vmcs_read16(GUEST_CS_SELECTOR));
358 return 0;
361 if (virtual8086) {
362 VIR8086_SEG_BASE_TEST(CS);
363 VIR8086_SEG_BASE_TEST(SS);
364 VIR8086_SEG_BASE_TEST(DS);
365 VIR8086_SEG_BASE_TEST(ES);
366 VIR8086_SEG_BASE_TEST(FS);
367 VIR8086_SEG_BASE_TEST(GS);
370 if (!is_canonical(vmcs_readl(GUEST_TR_BASE)) ||
371 !is_canonical(vmcs_readl(GUEST_FS_BASE)) ||
372 !is_canonical(vmcs_readl(GUEST_GS_BASE)) ) {
373 vcpu_printf(vcpu, "%s: TR 0x%lx FS 0x%lx or GS 0x%lx base"
374 " is not canonical\n",
375 __FUNCTION__,
376 vmcs_readl(GUEST_TR_BASE),
377 vmcs_readl(GUEST_FS_BASE),
378 vmcs_readl(GUEST_GS_BASE));
379 return 0;
383 if (!(vmcs_read32(GUEST_LDTR_AR_BYTES) & AR_UNUSABLE_MASK) &&
384 !is_canonical(vmcs_readl(GUEST_LDTR_BASE))) {
385 vcpu_printf(vcpu, "%s: LDTR base 0x%lx, usable and is not"
386 " canonical\n",
387 __FUNCTION__,
388 vmcs_readl(GUEST_LDTR_BASE));
389 return 0;
392 if ((vmcs_readl(GUEST_CS_BASE) & ~((1ULL << 32) - 1))) {
393 vcpu_printf(vcpu, "%s: CS base 0x%lx, not all bits 63-32"
394 " are zero\n",
395 __FUNCTION__,
396 vmcs_readl(GUEST_CS_BASE));
397 return 0;
400 #define SEG_BASE_TEST(seg)\
401 if ( !(vmcs_read32(GUEST_##seg##_AR_BYTES) & AR_UNUSABLE_MASK) &&\
402 (vmcs_readl(GUEST_##seg##_BASE) & ~((1ULL << 32) - 1))) {\
403 vcpu_printf(vcpu, "%s: "#seg" base 0x%lx, is usable and not"\
404 " all bits 63-32 are zero\n",\
405 __FUNCTION__,\
406 vmcs_readl(GUEST_##seg##_BASE));\
407 return 0;\
409 SEG_BASE_TEST(SS);
410 SEG_BASE_TEST(DS);
411 SEG_BASE_TEST(ES);
413 if (virtual8086) {
414 VIR8086_SEG_LIMIT_TEST(CS);
415 VIR8086_SEG_LIMIT_TEST(SS);
416 VIR8086_SEG_LIMIT_TEST(DS);
417 VIR8086_SEG_LIMIT_TEST(ES);
418 VIR8086_SEG_LIMIT_TEST(FS);
419 VIR8086_SEG_LIMIT_TEST(GS);
422 if (virtual8086) {
423 VIR8086_SEG_AR_TEST(CS);
424 VIR8086_SEG_AR_TEST(SS);
425 VIR8086_SEG_AR_TEST(DS);
426 VIR8086_SEG_AR_TEST(ES);
427 VIR8086_SEG_AR_TEST(FS);
428 VIR8086_SEG_AR_TEST(GS);
429 } else {
431 u32 cs_ar = vmcs_read32(GUEST_CS_AR_BYTES);
432 u32 ss_ar = vmcs_read32(GUEST_SS_AR_BYTES);
433 u32 tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
434 u32 ldtr_ar = vmcs_read32(GUEST_LDTR_AR_BYTES);
436 #define SEG_G_TEST(seg) { \
437 u32 lim = vmcs_read32(GUEST_##seg##_LIMIT); \
438 u32 ar = vmcs_read32(GUEST_##seg##_AR_BYTES); \
439 int err = 0; \
440 if (((lim & ~PAGE_MASK) != ~PAGE_MASK) && (ar & AR_G_MASK)) \
441 err = 1; \
442 if ((lim & ~((1u << 20) - 1)) && !(ar & AR_G_MASK)) \
443 err = 1; \
444 if (err) { \
445 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, G err. lim" \
446 " is 0x%x\n", \
447 __FUNCTION__, \
448 ar, lim); \
449 return 0; \
454 if (!(cs_ar & AR_TYPE_ACCESSES_MASK)) {
455 vcpu_printf(vcpu, "%s: cs AR 0x%x, accesses is clear\n",
456 __FUNCTION__,
457 cs_ar);
458 return 0;
461 if (!(cs_ar & AR_TYPE_CODE_MASK)) {
462 vcpu_printf(vcpu, "%s: cs AR 0x%x, code is clear\n",
463 __FUNCTION__,
464 cs_ar);
465 return 0;
468 if (!(cs_ar & AR_S_MASK)) {
469 vcpu_printf(vcpu, "%s: cs AR 0x%x, type is sys\n",
470 __FUNCTION__,
471 cs_ar);
472 return 0;
475 if ((cs_ar & AR_TYPE_MASK) >= 8 && (cs_ar & AR_TYPE_MASK) < 12 &&
476 AR_DPL(cs_ar) !=
477 (vmcs_read16(GUEST_CS_SELECTOR) & SELECTOR_RPL_MASK) ) {
478 vcpu_printf(vcpu, "%s: cs AR 0x%x, "
479 "DPL(0x%x) not as RPL(0x%x)\n",
480 __FUNCTION__,
481 cs_ar, AR_DPL(cs_ar), vmcs_read16(GUEST_CS_SELECTOR) & SELECTOR_RPL_MASK);
482 return 0;
485 if ((cs_ar & AR_TYPE_MASK) >= 13 && (cs_ar & AR_TYPE_MASK) < 16 &&
486 AR_DPL(cs_ar) >
487 (vmcs_read16(GUEST_CS_SELECTOR) & SELECTOR_RPL_MASK) ) {
488 vcpu_printf(vcpu, "%s: cs AR 0x%x, "
489 "DPL greater than RPL\n",
490 __FUNCTION__,
491 cs_ar);
492 return 0;
495 if (!(cs_ar & AR_P_MASK)) {
496 vcpu_printf(vcpu, "%s: CS AR 0x%x, not "
497 "present\n",
498 __FUNCTION__,
499 cs_ar);
500 return 0;
503 if ((cs_ar & AR_RESERVD_MASK)) {
504 vcpu_printf(vcpu, "%s: CS AR 0x%x, reseved"
505 " bits are set\n",
506 __FUNCTION__,
507 cs_ar);
508 return 0;
511 if (long_mode & (cs_ar & AR_L_MASK) && (cs_ar & AR_DB_MASK)) {
512 vcpu_printf(vcpu, "%s: CS AR 0x%x, DB and L are set"
513 " in long mode\n",
514 __FUNCTION__,
515 cs_ar);
516 return 0;
520 SEG_G_TEST(CS);
522 if (!(ss_ar & AR_UNUSABLE_MASK)) {
523 if ((ss_ar & AR_TYPE_MASK) != 3 &&
524 (ss_ar & AR_TYPE_MASK) != 7 ) {
525 vcpu_printf(vcpu, "%s: ss AR 0x%x, usable and type"
526 " is not 3 or 7\n",
527 __FUNCTION__,
528 ss_ar);
529 return 0;
532 if (!(ss_ar & AR_S_MASK)) {
533 vcpu_printf(vcpu, "%s: ss AR 0x%x, usable and"
534 " is sys\n",
535 __FUNCTION__,
536 ss_ar);
537 return 0;
539 if (!(ss_ar & AR_P_MASK)) {
540 vcpu_printf(vcpu, "%s: SS AR 0x%x, usable"
541 " and not present\n",
542 __FUNCTION__,
543 ss_ar);
544 return 0;
547 if ((ss_ar & AR_RESERVD_MASK)) {
548 vcpu_printf(vcpu, "%s: SS AR 0x%x, reseved"
549 " bits are set\n",
550 __FUNCTION__,
551 ss_ar);
552 return 0;
555 SEG_G_TEST(SS);
559 if (AR_DPL(ss_ar) !=
560 (vmcs_read16(GUEST_SS_SELECTOR) & SELECTOR_RPL_MASK) ) {
561 vcpu_printf(vcpu, "%s: SS AR 0x%x, "
562 "DPL not as RPL\n",
563 __FUNCTION__,
564 ss_ar);
565 return 0;
568 #define SEG_AR_TEST(seg) {\
569 u32 ar = vmcs_read32(GUEST_##seg##_AR_BYTES);\
570 if (!(ar & AR_UNUSABLE_MASK)) {\
571 if (!(ar & AR_TYPE_ACCESSES_MASK)) {\
572 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
573 "usable and not accesses\n",\
574 __FUNCTION__,\
575 ar);\
576 return 0;\
578 if ((ar & AR_TYPE_CODE_MASK) &&\
579 !(ar & AR_TYPE_READABLE_MASK)) {\
580 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
581 "code and not readable\n",\
582 __FUNCTION__,\
583 ar);\
584 return 0;\
586 if (!(ar & AR_S_MASK)) {\
587 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, usable and"\
588 " is sys\n",\
589 __FUNCTION__,\
590 ar);\
591 return 0;\
593 if ((ar & AR_TYPE_MASK) >= 0 && \
594 (ar & AR_TYPE_MASK) < 12 && \
595 AR_DPL(ar) < (vmcs_read16(GUEST_##seg##_SELECTOR) & \
596 SELECTOR_RPL_MASK) ) {\
597 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, "\
598 "DPL less than RPL\n",\
599 __FUNCTION__,\
600 ar);\
601 return 0;\
603 if (!(ar & AR_P_MASK)) {\
604 vcpu_printf(vcpu, "%s: "#seg" AR 0x%x, usable and"\
605 " not present\n",\
606 __FUNCTION__,\
607 ar);\
608 return 0;\
610 if ((ar & AR_RESERVD_MASK)) {\
611 vcpu_printf(vcpu, "%s: "#seg" AR"\
612 " 0x%x, reseved"\
613 " bits are set\n",\
614 __FUNCTION__,\
615 ar);\
616 return 0;\
618 SEG_G_TEST(seg)\
622 #undef DS
623 #undef ES
624 #undef FS
625 #undef GS
627 SEG_AR_TEST(DS);
628 SEG_AR_TEST(ES);
629 SEG_AR_TEST(FS);
630 SEG_AR_TEST(GS);
632 // TR test
633 if (long_mode) {
634 if ((tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
635 vcpu_printf(vcpu, "%s: TR AR 0x%x, long"
636 " mode and not 64bit busy"
637 " tss\n",
638 __FUNCTION__,
639 tr_ar);
640 return 0;
642 } else {
643 if ((tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_32_TSS &&
644 (tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_16_TSS) {
645 vcpu_printf(vcpu, "%s: TR AR 0x%x, legacy"
646 " mode and not 16/32bit "
647 "busy tss\n",
648 __FUNCTION__,
649 tr_ar);
650 return 0;
654 if ((tr_ar & AR_S_MASK)) {
655 vcpu_printf(vcpu, "%s: TR AR 0x%x, S is set\n",
656 __FUNCTION__,
657 tr_ar);
658 return 0;
660 if (!(tr_ar & AR_P_MASK)) {
661 vcpu_printf(vcpu, "%s: TR AR 0x%x, P is not set\n",
662 __FUNCTION__,
663 tr_ar);
664 return 0;
667 if ((tr_ar & (AR_RESERVD_MASK| AR_UNUSABLE_MASK))) {
668 vcpu_printf(vcpu, "%s: TR AR 0x%x, reserved bit are"
669 " set\n",
670 __FUNCTION__,
671 tr_ar);
672 return 0;
674 SEG_G_TEST(TR);
676 // TR test
677 if (!(ldtr_ar & AR_UNUSABLE_MASK)) {
679 if ((ldtr_ar & AR_TYPE_MASK) != AR_TYPE_LDT) {
680 vcpu_printf(vcpu, "%s: LDTR AR 0x%x,"
681 " bad type\n",
682 __FUNCTION__,
683 ldtr_ar);
684 return 0;
687 if ((ldtr_ar & AR_S_MASK)) {
688 vcpu_printf(vcpu, "%s: LDTR AR 0x%x,"
689 " S is set\n",
690 __FUNCTION__,
691 ldtr_ar);
692 return 0;
695 if (!(ldtr_ar & AR_P_MASK)) {
696 vcpu_printf(vcpu, "%s: LDTR AR 0x%x,"
697 " P is not set\n",
698 __FUNCTION__,
699 ldtr_ar);
700 return 0;
702 if ((ldtr_ar & AR_RESERVD_MASK)) {
703 vcpu_printf(vcpu, "%s: LDTR AR 0x%x,"
704 " reserved bit are set\n",
705 __FUNCTION__,
706 ldtr_ar);
707 return 0;
709 SEG_G_TEST(LDTR);
713 // GDTR and IDTR
716 #define IDT_GDT_TEST(reg)\
717 if (!is_canonical(vmcs_readl(GUEST_##reg##_BASE))) {\
718 vcpu_printf(vcpu, "%s: "#reg" BASE 0x%lx, not canonical\n",\
719 __FUNCTION__,\
720 vmcs_readl(GUEST_##reg##_BASE));\
721 return 0;\
723 if (vmcs_read32(GUEST_##reg##_LIMIT) >> 16) {\
724 vcpu_printf(vcpu, "%s: "#reg" LIMIT 0x%x, size err\n",\
725 __FUNCTION__,\
726 vmcs_read32(GUEST_##reg##_LIMIT));\
727 return 0;\
730 IDT_GDT_TEST(GDTR);
731 IDT_GDT_TEST(IDTR);
734 // RIP
736 if ((!long_mode || !(vmcs_read32(GUEST_CS_AR_BYTES) & AR_L_MASK)) &&
737 vmcs_readl(GUEST_RIP) & ~((1ULL << 32) - 1) ){
738 vcpu_printf(vcpu, "%s: RIP 0x%lx, size err\n",
739 __FUNCTION__,
740 vmcs_readl(GUEST_RIP));
741 return 0;
744 if (!is_canonical(vmcs_readl(GUEST_RIP))) {
745 vcpu_printf(vcpu, "%s: RIP 0x%lx, not canonical\n",
746 __FUNCTION__,
747 vmcs_readl(GUEST_RIP));
748 return 0;
751 // RFLAGS
752 #define RFLAGS_RESEVED_CLEAR_BITS\
753 (~((1ULL << 22) - 1) | (1ULL << 15) | (1ULL << 5) | (1ULL << 3))
754 #define RFLAGS_RESEVED_SET_BITS (1 << 1)
756 if ((rflags & RFLAGS_RESEVED_CLEAR_BITS) ||
757 !(rflags & RFLAGS_RESEVED_SET_BITS)) {
758 vcpu_printf(vcpu, "%s: RFLAGS 0x%lx, reserved bits 0x%llx 0x%x\n",
759 __FUNCTION__,
760 rflags,
761 RFLAGS_RESEVED_CLEAR_BITS,
762 RFLAGS_RESEVED_SET_BITS);
763 return 0;
766 if (long_mode && virtual8086) {
767 vcpu_printf(vcpu, "%s: RFLAGS 0x%lx, vm and long mode\n",
768 __FUNCTION__,
769 rflags);
770 return 0;
774 if (!(rflags & RFLAGS_RF)) {
775 u32 vm_entry_info = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
776 if ((vm_entry_info & INTR_INFO_VALID_MASK) &&
777 (vm_entry_info & INTR_INFO_INTR_TYPE_MASK) ==
778 INTR_TYPE_EXT_INTR) {
779 vcpu_printf(vcpu, "%s: RFLAGS 0x%lx, external"
780 " interrupt and RF is clear\n",
781 __FUNCTION__,
782 rflags);
783 return 0;
788 // to be continued from Checks on Guest Non-Register State (22.3.1.5)
789 return 1;
792 static int check_fixed_bits(struct kvm_vcpu *vcpu, const char *reg,
793 unsigned long cr,
794 u32 msr_fixed_0, u32 msr_fixed_1)
796 u64 fixed_bits_0, fixed_bits_1;
798 rdmsrl(msr_fixed_0, fixed_bits_0);
799 rdmsrl(msr_fixed_1, fixed_bits_1);
800 if ((cr & fixed_bits_0) != fixed_bits_0) {
801 vcpu_printf(vcpu, "%s: %s (%lx) has one of %llx unset\n",
802 __FUNCTION__, reg, cr, fixed_bits_0);
803 return 0;
805 if ((~cr & ~fixed_bits_1) != ~fixed_bits_1) {
806 vcpu_printf(vcpu, "%s: %s (%lx) has one of %llx set\n",
807 __FUNCTION__, reg, cr, ~fixed_bits_1);
808 return 0;
810 return 1;
813 static int phys_addr_width(void)
815 unsigned eax, ebx, ecx, edx;
817 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
818 return eax & 0xff;
821 static int check_canonical(struct kvm_vcpu *vcpu, const char *name,
822 unsigned long reg)
824 #ifdef CONFIG_X86_64
825 unsigned long x;
827 if (sizeof(reg) == 4)
828 return 1;
829 x = (long)reg >> 48;
830 if (!(x == 0 || x == ~0UL)) {
831 vcpu_printf(vcpu, "%s: %s (%lx) not canonical\n",
832 __FUNCTION__, name, reg);
833 return 0;
835 #endif
836 return 1;
839 static int check_selector(struct kvm_vcpu *vcpu, const char *name,
840 int rpl_ti, int null,
841 u16 sel)
843 if (rpl_ti && (sel & 7)) {
844 vcpu_printf(vcpu, "%s: %s (%x) nonzero rpl or ti\n",
845 __FUNCTION__, name, sel);
846 return 0;
848 if (null && !sel) {
849 vcpu_printf(vcpu, "%s: %s (%x) zero\n",
850 __FUNCTION__, name, sel);
851 return 0;
853 return 1;
856 #define MSR_IA32_VMX_CR0_FIXED0 0x486
857 #define MSR_IA32_VMX_CR0_FIXED1 0x487
859 #define MSR_IA32_VMX_CR4_FIXED0 0x488
860 #define MSR_IA32_VMX_CR4_FIXED1 0x489
862 int vm_entry_test_host(struct kvm_vcpu *vcpu)
864 int r = 0;
865 unsigned long cr0 = vmcs_readl(HOST_CR0);
866 unsigned long cr4 = vmcs_readl(HOST_CR4);
867 unsigned long cr3 = vmcs_readl(HOST_CR3);
868 int host_64;
870 host_64 = vmcs_read32(VM_EXIT_CONTROLS) & VM_EXIT_HOST_ADD_SPACE_SIZE;
872 /* 22.2.2 */
873 r &= check_fixed_bits(vcpu, "host cr0", cr0, MSR_IA32_VMX_CR0_FIXED0,
874 MSR_IA32_VMX_CR0_FIXED1);
876 r &= check_fixed_bits(vcpu, "host cr0", cr4, MSR_IA32_VMX_CR4_FIXED0,
877 MSR_IA32_VMX_CR4_FIXED1);
878 if ((u64)cr3 >> phys_addr_width()) {
879 vcpu_printf(vcpu, "%s: cr3 (%lx) vs phys addr width\n",
880 __FUNCTION__, cr3);
881 r = 0;
884 r &= check_canonical(vcpu, "host ia32_sysenter_eip",
885 vmcs_readl(HOST_IA32_SYSENTER_EIP));
886 r &= check_canonical(vcpu, "host ia32_sysenter_esp",
887 vmcs_readl(HOST_IA32_SYSENTER_ESP));
889 /* 22.2.3 */
890 r &= check_selector(vcpu, "host cs", 1, 1,
891 vmcs_read16(HOST_CS_SELECTOR));
892 r &= check_selector(vcpu, "host ss", 1, !host_64,
893 vmcs_read16(HOST_SS_SELECTOR));
894 r &= check_selector(vcpu, "host ds", 1, 0,
895 vmcs_read16(HOST_DS_SELECTOR));
896 r &= check_selector(vcpu, "host es", 1, 0,
897 vmcs_read16(HOST_ES_SELECTOR));
898 r &= check_selector(vcpu, "host fs", 1, 0,
899 vmcs_read16(HOST_FS_SELECTOR));
900 r &= check_selector(vcpu, "host gs", 1, 0,
901 vmcs_read16(HOST_GS_SELECTOR));
902 r &= check_selector(vcpu, "host tr", 1, 1,
903 vmcs_read16(HOST_TR_SELECTOR));
905 #ifdef CONFIG_X86_64
906 r &= check_canonical(vcpu, "host fs base",
907 vmcs_readl(HOST_FS_BASE));
908 r &= check_canonical(vcpu, "host gs base",
909 vmcs_readl(HOST_GS_BASE));
910 r &= check_canonical(vcpu, "host gdtr base",
911 vmcs_readl(HOST_GDTR_BASE));
912 r &= check_canonical(vcpu, "host idtr base",
913 vmcs_readl(HOST_IDTR_BASE));
914 #endif
916 /* 22.2.4 */
917 #ifdef CONFIG_X86_64
918 if (!host_64) {
919 vcpu_printf(vcpu, "%s: vm exit controls: !64 bit host\n",
920 __FUNCTION__);
921 r = 0;
923 if (!(cr4 & CR4_PAE_MASK)) {
924 vcpu_printf(vcpu, "%s: cr4 (%lx): !pae\n",
925 __FUNCTION__, cr4);
926 r = 0;
928 r &= check_canonical(vcpu, "host rip", vmcs_readl(HOST_RIP));
929 #endif
931 return r;
934 int vm_entry_test(struct kvm_vcpu *vcpu)
936 int rg, rh;
938 rg = vm_entry_test_guest(vcpu);
939 rh = vm_entry_test_host(vcpu);
940 return rg && rh;
943 void vmcs_dump(struct kvm_vcpu *vcpu)
945 vcpu_printf(vcpu, "************************ vmcs_dump ************************\n");
946 vcpu_printf(vcpu, "VM_ENTRY_CONTROLS 0x%x\n", vmcs_read32(VM_ENTRY_CONTROLS));
948 vcpu_printf(vcpu, "GUEST_CR0 0x%lx\n", vmcs_readl(GUEST_CR0));
949 vcpu_printf(vcpu, "GUEST_CR3 0x%lx\n", vmcs_readl(GUEST_CR3));
950 vcpu_printf(vcpu, "GUEST_CR4 0x%lx\n", vmcs_readl(GUEST_CR4));
952 vcpu_printf(vcpu, "GUEST_SYSENTER_ESP 0x%lx\n", vmcs_readl(GUEST_SYSENTER_ESP));
953 vcpu_printf(vcpu, "GUEST_SYSENTER_EIP 0x%lx\n", vmcs_readl(GUEST_SYSENTER_EIP));
956 vcpu_printf(vcpu, "GUEST_IA32_DEBUGCTL 0x%llx\n", vmcs_read64(GUEST_IA32_DEBUGCTL));
957 vcpu_printf(vcpu, "GUEST_DR7 0x%lx\n", vmcs_readl(GUEST_DR7));
959 vcpu_printf(vcpu, "GUEST_RFLAGS 0x%lx\n", vmcs_readl(GUEST_RFLAGS));
960 vcpu_printf(vcpu, "GUEST_RIP 0x%lx\n", vmcs_readl(GUEST_RIP));
962 vcpu_printf(vcpu, "GUEST_CS_SELECTOR 0x%x\n", vmcs_read16(GUEST_CS_SELECTOR));
963 vcpu_printf(vcpu, "GUEST_DS_SELECTOR 0x%x\n", vmcs_read16(GUEST_DS_SELECTOR));
964 vcpu_printf(vcpu, "GUEST_ES_SELECTOR 0x%x\n", vmcs_read16(GUEST_ES_SELECTOR));
965 vcpu_printf(vcpu, "GUEST_FS_SELECTOR 0x%x\n", vmcs_read16(GUEST_FS_SELECTOR));
966 vcpu_printf(vcpu, "GUEST_GS_SELECTOR 0x%x\n", vmcs_read16(GUEST_GS_SELECTOR));
967 vcpu_printf(vcpu, "GUEST_SS_SELECTOR 0x%x\n", vmcs_read16(GUEST_SS_SELECTOR));
969 vcpu_printf(vcpu, "GUEST_TR_SELECTOR 0x%x\n", vmcs_read16(GUEST_TR_SELECTOR));
970 vcpu_printf(vcpu, "GUEST_LDTR_SELECTOR 0x%x\n", vmcs_read16(GUEST_LDTR_SELECTOR));
972 vcpu_printf(vcpu, "GUEST_CS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_CS_AR_BYTES));
973 vcpu_printf(vcpu, "GUEST_DS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_DS_AR_BYTES));
974 vcpu_printf(vcpu, "GUEST_ES_AR_BYTES 0x%x\n", vmcs_read32(GUEST_ES_AR_BYTES));
975 vcpu_printf(vcpu, "GUEST_FS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_FS_AR_BYTES));
976 vcpu_printf(vcpu, "GUEST_GS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_GS_AR_BYTES));
977 vcpu_printf(vcpu, "GUEST_SS_AR_BYTES 0x%x\n", vmcs_read32(GUEST_SS_AR_BYTES));
979 vcpu_printf(vcpu, "GUEST_LDTR_AR_BYTES 0x%x\n", vmcs_read32(GUEST_LDTR_AR_BYTES));
980 vcpu_printf(vcpu, "GUEST_TR_AR_BYTES 0x%x\n", vmcs_read32(GUEST_TR_AR_BYTES));
982 vcpu_printf(vcpu, "GUEST_CS_BASE 0x%lx\n", vmcs_readl(GUEST_CS_BASE));
983 vcpu_printf(vcpu, "GUEST_DS_BASE 0x%lx\n", vmcs_readl(GUEST_DS_BASE));
984 vcpu_printf(vcpu, "GUEST_ES_BASE 0x%lx\n", vmcs_readl(GUEST_ES_BASE));
985 vcpu_printf(vcpu, "GUEST_FS_BASE 0x%lx\n", vmcs_readl(GUEST_FS_BASE));
986 vcpu_printf(vcpu, "GUEST_GS_BASE 0x%lx\n", vmcs_readl(GUEST_GS_BASE));
987 vcpu_printf(vcpu, "GUEST_SS_BASE 0x%lx\n", vmcs_readl(GUEST_SS_BASE));
990 vcpu_printf(vcpu, "GUEST_LDTR_BASE 0x%lx\n", vmcs_readl(GUEST_LDTR_BASE));
991 vcpu_printf(vcpu, "GUEST_TR_BASE 0x%lx\n", vmcs_readl(GUEST_TR_BASE));
993 vcpu_printf(vcpu, "GUEST_CS_LIMIT 0x%x\n", vmcs_read32(GUEST_CS_LIMIT));
994 vcpu_printf(vcpu, "GUEST_DS_LIMIT 0x%x\n", vmcs_read32(GUEST_DS_LIMIT));
995 vcpu_printf(vcpu, "GUEST_ES_LIMIT 0x%x\n", vmcs_read32(GUEST_ES_LIMIT));
996 vcpu_printf(vcpu, "GUEST_FS_LIMIT 0x%x\n", vmcs_read32(GUEST_FS_LIMIT));
997 vcpu_printf(vcpu, "GUEST_GS_LIMIT 0x%x\n", vmcs_read32(GUEST_GS_LIMIT));
998 vcpu_printf(vcpu, "GUEST_SS_LIMIT 0x%x\n", vmcs_read32(GUEST_SS_LIMIT));
1000 vcpu_printf(vcpu, "GUEST_LDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_LDTR_LIMIT));
1001 vcpu_printf(vcpu, "GUEST_TR_LIMIT 0x%x\n", vmcs_read32(GUEST_TR_LIMIT));
1003 vcpu_printf(vcpu, "GUEST_GDTR_BASE 0x%lx\n", vmcs_readl(GUEST_GDTR_BASE));
1004 vcpu_printf(vcpu, "GUEST_IDTR_BASE 0x%lx\n", vmcs_readl(GUEST_IDTR_BASE));
1006 vcpu_printf(vcpu, "GUEST_GDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_GDTR_LIMIT));
1007 vcpu_printf(vcpu, "GUEST_IDTR_LIMIT 0x%x\n", vmcs_read32(GUEST_IDTR_LIMIT));
1009 vcpu_printf(vcpu, "EXCEPTION_BITMAP 0x%x\n", vmcs_read32(EXCEPTION_BITMAP));
1010 vcpu_printf(vcpu, "***********************************************************\n");
1013 void regs_dump(struct kvm_vcpu *vcpu)
1015 #define REG_DUMP(reg) \
1016 vcpu_printf(vcpu, #reg" = 0x%lx(VCPU)\n", vcpu->regs[VCPU_REGS_##reg])
1017 #define VMCS_REG_DUMP(reg) \
1018 vcpu_printf(vcpu, #reg" = 0x%lx(VMCS)\n", vmcs_readl(GUEST_##reg))
1020 vcpu_printf(vcpu, "************************ regs_dump ************************\n");
1021 REG_DUMP(RAX);
1022 REG_DUMP(RBX);
1023 REG_DUMP(RCX);
1024 REG_DUMP(RDX);
1025 REG_DUMP(RSP);
1026 REG_DUMP(RBP);
1027 REG_DUMP(RSI);
1028 REG_DUMP(RDI);
1029 REG_DUMP(R8);
1030 REG_DUMP(R9);
1031 REG_DUMP(R10);
1032 REG_DUMP(R11);
1033 REG_DUMP(R12);
1034 REG_DUMP(R13);
1035 REG_DUMP(R14);
1036 REG_DUMP(R15);
1038 VMCS_REG_DUMP(RSP);
1039 VMCS_REG_DUMP(RIP);
1040 VMCS_REG_DUMP(RFLAGS);
1042 vcpu_printf(vcpu, "***********************************************************\n");
1045 void sregs_dump(struct kvm_vcpu *vcpu)
1047 vcpu_printf(vcpu, "************************ sregs_dump ************************\n");
1048 vcpu_printf(vcpu, "cr0 = 0x%lx\n", vcpu->cr0);
1049 vcpu_printf(vcpu, "cr2 = 0x%lx\n", vcpu->cr2);
1050 vcpu_printf(vcpu, "cr3 = 0x%lx\n", vcpu->cr3);
1051 vcpu_printf(vcpu, "cr4 = 0x%lx\n", vcpu->cr4);
1052 vcpu_printf(vcpu, "cr8 = 0x%lx\n", vcpu->cr8);
1053 vcpu_printf(vcpu, "shadow_efer = 0x%llx\n", vcpu->shadow_efer);
1054 vcpu_printf(vcpu, "***********************************************************\n");
1057 void show_pending_interrupts(struct kvm_vcpu *vcpu)
1059 int i;
1060 vcpu_printf(vcpu, "************************ pending interrupts ****************\n");
1061 vcpu_printf(vcpu, "sumamry = 0x%lx\n", vcpu->irq_summary);
1062 for (i=0 ; i < NR_IRQ_WORDS ; i++)
1063 vcpu_printf(vcpu, "%lx ", vcpu->irq_pending[i]);
1064 vcpu_printf(vcpu, "\n");
1065 vcpu_printf(vcpu, "************************************************************\n");
1068 void vcpu_dump(struct kvm_vcpu *vcpu)
1070 regs_dump(vcpu);
1071 sregs_dump(vcpu);
1072 vmcs_dump(vcpu);
1073 show_msrs(vcpu);
1074 show_pending_interrupts(vcpu);
1075 /* more ... */
1077 #endif