5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/list.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/preempt.h>
17 #include <asm/signal.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_para.h>
22 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
23 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
24 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL)
26 #define KVM_GUEST_CR0_MASK \
27 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
28 | X86_CR0_NW | X86_CR0_CD)
29 #define KVM_VM_CR0_ALWAYS_ON \
30 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
32 #define KVM_GUEST_CR4_MASK \
33 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
34 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
35 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
37 #define INVALID_PAGE (~(hpa_t)0)
38 #define UNMAPPED_GVA (~(gpa_t)0)
40 #define KVM_MAX_VCPUS 4
41 #define KVM_ALIAS_SLOTS 4
42 #define KVM_MEMORY_SLOTS 8
43 #define KVM_NUM_MMU_PAGES 1024
44 #define KVM_MIN_FREE_MMU_PAGES 5
45 #define KVM_REFILL_PAGES 25
46 #define KVM_MAX_CPUID_ENTRIES 40
57 #define SELECTOR_TI_MASK (1 << 2)
58 #define SELECTOR_RPL_MASK 0x03
62 #define KVM_PIO_PAGE_OFFSET 1
65 * vcpu->requests bit members
67 #define KVM_TLB_FLUSH 0
72 * gva - guest virtual address
73 * gpa - guest physical address
74 * gfn - guest frame number
75 * hva - host virtual address
76 * hpa - host physical address
77 * hfn - host frame number
80 typedef unsigned long gva_t
;
82 typedef unsigned long gfn_t
;
84 typedef unsigned long hva_t
;
86 typedef unsigned long hfn_t
;
88 #define NR_PTE_CHAIN_ENTRIES 5
90 struct kvm_pte_chain
{
91 u64
*parent_ptes
[NR_PTE_CHAIN_ENTRIES
];
92 struct hlist_node link
;
96 * kvm_mmu_page_role, below, is defined as:
98 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
99 * bits 4:7 - page table level for this shadow (1-4)
100 * bits 8:9 - page table quadrant for 2-level guests
101 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
102 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
104 union kvm_mmu_page_role
{
107 unsigned glevels
: 4;
109 unsigned quadrant
: 2;
110 unsigned pad_for_nice_hex_output
: 6;
111 unsigned metaphysical
: 1;
112 unsigned hugepage_access
: 3;
116 struct kvm_mmu_page
{
117 struct list_head link
;
118 struct hlist_node hash_link
;
121 * The following two entries are used to key the shadow page in the
125 union kvm_mmu_page_role role
;
128 unsigned long slot_bitmap
; /* One bit set per slot which has memory
129 * in this shadow page.
131 int multimapped
; /* More than one parent_pte? */
132 int root_count
; /* Currently serving as active root */
134 u64
*parent_pte
; /* !multimapped */
135 struct hlist_head parent_ptes
; /* multimapped, kvm_pte_chain */
140 extern struct kmem_cache
*kvm_vcpu_cache
;
143 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
144 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
148 void (*new_cr3
)(struct kvm_vcpu
*vcpu
);
149 int (*page_fault
)(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 err
);
150 void (*free
)(struct kvm_vcpu
*vcpu
);
151 gpa_t (*gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t gva
);
154 int shadow_root_level
;
159 #define KVM_NR_MEM_OBJS 20
161 struct kvm_mmu_memory_cache
{
163 void *objects
[KVM_NR_MEM_OBJS
];
167 * We don't want allocation failures within the mmu code, so we preallocate
168 * enough memory for a single page fault in a cache.
170 struct kvm_guest_debug
{
209 struct kvm_pio_request
{
212 struct page
*guest_pages
[2];
213 unsigned guest_page_offset
;
232 u32 irq_window_exits
;
235 u32 request_irq_exits
;
241 struct kvm_io_device
{
242 void (*read
)(struct kvm_io_device
*this,
246 void (*write
)(struct kvm_io_device
*this,
250 int (*in_range
)(struct kvm_io_device
*this, gpa_t addr
);
251 void (*destructor
)(struct kvm_io_device
*this);
256 static inline void kvm_iodevice_read(struct kvm_io_device
*dev
,
261 dev
->read(dev
, addr
, len
, val
);
264 static inline void kvm_iodevice_write(struct kvm_io_device
*dev
,
269 dev
->write(dev
, addr
, len
, val
);
272 static inline int kvm_iodevice_inrange(struct kvm_io_device
*dev
, gpa_t addr
)
274 return dev
->in_range(dev
, addr
);
277 static inline void kvm_iodevice_destructor(struct kvm_io_device
*dev
)
280 dev
->destructor(dev
);
284 * It would be nice to use something smarter than a linear search, TBD...
285 * Thankfully we dont expect many devices to register (famous last words :),
286 * so until then it will suffice. At least its abstracted so we can change
291 #define NR_IOBUS_DEVS 6
292 struct kvm_io_device
*devs
[NR_IOBUS_DEVS
];
295 void kvm_io_bus_init(struct kvm_io_bus
*bus
);
296 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
297 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
);
298 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
299 struct kvm_io_device
*dev
);
303 struct preempt_notifier preempt_notifier
;
309 int interrupt_window_open
;
311 unsigned long requests
;
312 unsigned long irq_summary
; /* bit vector: 1 per word in irq_pending */
313 DECLARE_BITMAP(irq_pending
, KVM_NR_INTERRUPTS
);
314 unsigned long regs
[NR_VCPU_REGS
]; /* for rsp: vcpu_load_rsp_rip() */
315 unsigned long rip
; /* needs vcpu_load_rsp_rip() */
320 gpa_t para_state_gpa
;
321 struct page
*para_state_page
;
325 u64 pdptrs
[4]; /* pae */
328 struct kvm_lapic
*apic
; /* kernel irqchip context */
329 #define VCPU_MP_STATE_RUNNABLE 0
330 #define VCPU_MP_STATE_UNINITIALIZED 1
331 #define VCPU_MP_STATE_INIT_RECEIVED 2
332 #define VCPU_MP_STATE_SIPI_RECEIVED 3
333 #define VCPU_MP_STATE_HALTED 4
336 u64 ia32_misc_enable_msr
;
340 struct kvm_mmu_memory_cache mmu_pte_chain_cache
;
341 struct kvm_mmu_memory_cache mmu_rmap_desc_cache
;
342 struct kvm_mmu_memory_cache mmu_page_cache
;
343 struct kvm_mmu_memory_cache mmu_page_header_cache
;
345 gfn_t last_pt_write_gfn
;
346 int last_pt_write_count
;
348 struct kvm_guest_debug guest_debug
;
350 struct i387_fxsave_struct host_fx_image
;
351 struct i387_fxsave_struct guest_fx_image
;
353 int guest_fpu_loaded
;
356 int mmio_read_completed
;
359 unsigned char mmio_data
[8];
360 gpa_t mmio_phys_addr
;
361 gva_t mmio_fault_cr2
;
362 struct kvm_pio_request pio
;
364 wait_queue_head_t wq
;
369 struct kvm_stat stat
;
374 struct kvm_save_segment
{
379 } tr
, es
, ds
, fs
, gs
;
381 int halt_request
; /* real mode on Intel only */
384 struct kvm_cpuid_entry cpuid_entries
[KVM_MAX_CPUID_ENTRIES
];
387 struct kvm_mem_alias
{
389 unsigned long npages
;
393 struct kvm_memory_slot
{
395 unsigned long npages
;
397 struct page
**phys_mem
;
398 unsigned long *dirty_bitmap
;
402 struct mutex lock
; /* protects everything except vcpus */
404 struct kvm_mem_alias aliases
[KVM_ALIAS_SLOTS
];
406 struct kvm_memory_slot memslots
[KVM_MEMORY_SLOTS
];
408 * Hash table of struct kvm_mmu_page.
410 struct list_head active_mmu_pages
;
411 int n_free_mmu_pages
;
412 struct hlist_head mmu_page_hash
[KVM_NUM_MMU_PAGES
];
413 struct kvm_vcpu
*vcpus
[KVM_MAX_VCPUS
];
414 unsigned long rmap_overflow
;
415 struct list_head vm_list
;
417 struct kvm_io_bus mmio_bus
;
418 struct kvm_io_bus pio_bus
;
419 struct kvm_pic
*vpic
;
420 struct kvm_ioapic
*vioapic
;
421 int round_robin_prev_vcpu
;
424 static inline struct kvm_pic
*pic_irqchip(struct kvm
*kvm
)
429 static inline struct kvm_ioapic
*ioapic_irqchip(struct kvm
*kvm
)
434 static inline int irqchip_in_kernel(struct kvm
*kvm
)
436 return pic_irqchip(kvm
) != 0;
439 struct descriptor_table
{
442 } __attribute__((packed
));
445 int (*cpu_has_kvm_support
)(void); /* __init */
446 int (*disabled_by_bios
)(void); /* __init */
447 void (*hardware_enable
)(void *dummy
); /* __init */
448 void (*hardware_disable
)(void *dummy
);
449 void (*check_processor_compatibility
)(void *rtn
);
450 int (*hardware_setup
)(void); /* __init */
451 void (*hardware_unsetup
)(void); /* __exit */
453 /* Create, but do not attach this VCPU */
454 struct kvm_vcpu
*(*vcpu_create
)(struct kvm
*kvm
, unsigned id
);
455 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
456 void (*vcpu_reset
)(struct kvm_vcpu
*vcpu
);
458 void (*prepare_guest_switch
)(struct kvm_vcpu
*vcpu
);
459 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
460 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
461 void (*vcpu_decache
)(struct kvm_vcpu
*vcpu
);
463 int (*set_guest_debug
)(struct kvm_vcpu
*vcpu
,
464 struct kvm_debug_guest
*dbg
);
465 void (*guest_debug_pre
)(struct kvm_vcpu
*vcpu
);
466 int (*get_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
);
467 int (*set_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
468 u64 (*get_segment_base
)(struct kvm_vcpu
*vcpu
, int seg
);
469 void (*get_segment
)(struct kvm_vcpu
*vcpu
,
470 struct kvm_segment
*var
, int seg
);
471 void (*set_segment
)(struct kvm_vcpu
*vcpu
,
472 struct kvm_segment
*var
, int seg
);
473 void (*get_cs_db_l_bits
)(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
474 void (*decache_cr4_guest_bits
)(struct kvm_vcpu
*vcpu
);
475 void (*set_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
476 void (*set_cr3
)(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
477 void (*set_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
478 void (*set_efer
)(struct kvm_vcpu
*vcpu
, u64 efer
);
479 void (*get_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
480 void (*set_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
481 void (*get_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
482 void (*set_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
483 unsigned long (*get_dr
)(struct kvm_vcpu
*vcpu
, int dr
);
484 void (*set_dr
)(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
486 void (*cache_regs
)(struct kvm_vcpu
*vcpu
);
487 void (*decache_regs
)(struct kvm_vcpu
*vcpu
);
488 unsigned long (*get_rflags
)(struct kvm_vcpu
*vcpu
);
489 void (*set_rflags
)(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
491 void (*tlb_flush
)(struct kvm_vcpu
*vcpu
);
492 void (*inject_page_fault
)(struct kvm_vcpu
*vcpu
,
493 unsigned long addr
, u32 err_code
);
495 void (*inject_gp
)(struct kvm_vcpu
*vcpu
, unsigned err_code
);
497 void (*run
)(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
498 int (*handle_exit
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
499 void (*skip_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
500 void (*patch_hypercall
)(struct kvm_vcpu
*vcpu
,
501 unsigned char *hypercall_addr
);
502 int (*get_irq
)(struct kvm_vcpu
*vcpu
);
503 void (*set_irq
)(struct kvm_vcpu
*vcpu
, int vec
);
504 void (*inject_pending_irq
)(struct kvm_vcpu
*vcpu
);
505 void (*inject_pending_vectors
)(struct kvm_vcpu
*vcpu
,
506 struct kvm_run
*run
);
509 extern struct kvm_x86_ops
*kvm_x86_ops
;
511 /* The guest did something we don't support. */
512 #define pr_unimpl(vcpu, fmt, ...) \
514 if (printk_ratelimit()) \
515 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
516 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
519 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
520 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
522 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
);
523 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
);
525 int kvm_init_x86(struct kvm_x86_ops
*ops
, unsigned int vcpu_size
,
526 struct module
*module
);
527 void kvm_exit_x86(void);
529 int kvm_mmu_module_init(void);
530 void kvm_mmu_module_exit(void);
532 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
);
533 int kvm_mmu_create(struct kvm_vcpu
*vcpu
);
534 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
);
536 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
);
537 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
);
538 void kvm_mmu_zap_all(struct kvm
*kvm
);
540 hpa_t
gpa_to_hpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
);
541 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
542 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
543 static inline int is_error_hpa(hpa_t hpa
) { return hpa
>> HPA_MSB
; }
544 hpa_t
gva_to_hpa(struct kvm_vcpu
*vcpu
, gva_t gva
);
545 struct page
*gva_to_page(struct kvm_vcpu
*vcpu
, gva_t gva
);
547 extern hpa_t bad_page_address
;
549 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
);
550 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
);
551 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
);
553 enum emulation_result
{
554 EMULATE_DONE
, /* no further processing */
555 EMULATE_DO_MMIO
, /* kvm_run filled with mmio request */
556 EMULATE_FAIL
, /* can't emulate this instruction */
559 int emulate_instruction(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
560 unsigned long cr2
, u16 error_code
);
561 void kvm_report_emulation_failure(struct kvm_vcpu
*cvpu
, const char *context
);
562 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
563 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
564 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
565 unsigned long *rflags
);
567 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
);
568 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long value
,
569 unsigned long *rflags
);
570 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*data
);
571 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
573 struct x86_emulate_ctxt
;
575 int kvm_emulate_pio (struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
576 int size
, unsigned port
);
577 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
578 int size
, unsigned long count
, int down
,
579 gva_t address
, int rep
, unsigned port
);
580 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
);
581 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
);
582 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
);
583 int emulate_clts(struct kvm_vcpu
*vcpu
);
584 int emulator_get_dr(struct x86_emulate_ctxt
* ctxt
, int dr
,
585 unsigned long *dest
);
586 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
587 unsigned long value
);
589 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
590 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
591 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
592 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
593 unsigned long get_cr8(struct kvm_vcpu
*vcpu
);
594 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
);
595 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
597 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
598 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
600 void fx_init(struct kvm_vcpu
*vcpu
);
602 void kvm_resched(struct kvm_vcpu
*vcpu
);
603 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
);
604 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
);
605 void kvm_flush_remote_tlbs(struct kvm
*kvm
);
607 int emulator_read_std(unsigned long addr
,
610 struct kvm_vcpu
*vcpu
);
611 int emulator_write_emulated(unsigned long addr
,
614 struct kvm_vcpu
*vcpu
);
616 unsigned long segment_base(u16 selector
);
618 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
619 const u8
*new, int bytes
);
620 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
);
621 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
);
622 int kvm_mmu_load(struct kvm_vcpu
*vcpu
);
623 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
);
625 int kvm_hypercall(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
627 static inline void kvm_guest_enter(void)
629 current
->flags
|= PF_VCPU
;
632 static inline void kvm_guest_exit(void)
634 current
->flags
&= ~PF_VCPU
;
637 static inline int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
,
640 return vcpu
->mmu
.page_fault(vcpu
, gva
, error_code
);
643 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
)
645 if (unlikely(vcpu
->kvm
->n_free_mmu_pages
< KVM_MIN_FREE_MMU_PAGES
))
646 __kvm_mmu_free_some_pages(vcpu
);
649 static inline int kvm_mmu_reload(struct kvm_vcpu
*vcpu
)
651 if (likely(vcpu
->mmu
.root_hpa
!= INVALID_PAGE
))
654 return kvm_mmu_load(vcpu
);
657 static inline int is_long_mode(struct kvm_vcpu
*vcpu
)
660 return vcpu
->shadow_efer
& EFER_LME
;
666 static inline int is_pae(struct kvm_vcpu
*vcpu
)
668 return vcpu
->cr4
& X86_CR4_PAE
;
671 static inline int is_pse(struct kvm_vcpu
*vcpu
)
673 return vcpu
->cr4
& X86_CR4_PSE
;
676 static inline int is_paging(struct kvm_vcpu
*vcpu
)
678 return vcpu
->cr0
& X86_CR0_PG
;
681 static inline int memslot_id(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
683 return slot
- kvm
->memslots
;
686 static inline struct kvm_mmu_page
*page_header(hpa_t shadow_page
)
688 struct page
*page
= pfn_to_page(shadow_page
>> PAGE_SHIFT
);
690 return (struct kvm_mmu_page
*)page_private(page
);
693 static inline u16
read_fs(void)
696 asm ("mov %%fs, %0" : "=g"(seg
));
700 static inline u16
read_gs(void)
703 asm ("mov %%gs, %0" : "=g"(seg
));
707 static inline u16
read_ldt(void)
710 asm ("sldt %0" : "=g"(ldt
));
714 static inline void load_fs(u16 sel
)
716 asm ("mov %0, %%fs" : : "rm"(sel
));
719 static inline void load_gs(u16 sel
)
721 asm ("mov %0, %%gs" : : "rm"(sel
));
725 static inline void load_ldt(u16 sel
)
727 asm ("lldt %0" : : "rm"(sel
));
731 static inline void get_idt(struct descriptor_table
*table
)
733 asm ("sidt %0" : "=m"(*table
));
736 static inline void get_gdt(struct descriptor_table
*table
)
738 asm ("sgdt %0" : "=m"(*table
));
741 static inline unsigned long read_tr_base(void)
744 asm ("str %0" : "=g"(tr
));
745 return segment_base(tr
);
749 static inline unsigned long read_msr(unsigned long msr
)
758 static inline void fx_save(struct i387_fxsave_struct
*image
)
760 asm ("fxsave (%0)":: "r" (image
));
763 static inline void fx_restore(struct i387_fxsave_struct
*image
)
765 asm ("fxrstor (%0)":: "r" (image
));
768 static inline void fpu_init(void)
773 static inline u32
get_rdx_init_val(void)
775 return 0x600; /* P6 family */
778 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
779 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
780 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
781 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
782 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
783 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
784 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
785 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
786 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
788 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
790 #define TSS_IOPB_BASE_OFFSET 0x66
791 #define TSS_BASE_SIZE 0x68
792 #define TSS_IOPB_SIZE (65536 / 8)
793 #define TSS_REDIRECTION_SIZE (256 / 8)
794 #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)