x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / x86 / include / asm / kvm_host.h
blob74ef58c8ff53301ba1cb798aa4f47922812b53be
1 /*
2 * Kernel-based Virtual Machine driver for Linux
4 * This header defines architecture specific interfaces, x86 version
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
9 */
11 #ifndef _ASM_X86_KVM_HOST_H
12 #define _ASM_X86_KVM_HOST_H
14 #include <linux/types.h>
15 #include <linux/mm.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/tracepoint.h>
18 #include <linux/cpumask.h>
19 #include <linux/irq_work.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_para.h>
23 #include <linux/kvm_types.h>
24 #include <linux/perf_event.h>
25 #include <linux/pvclock_gtod.h>
26 #include <linux/clocksource.h>
27 #include <linux/irqbypass.h>
28 #include <linux/hyperv.h>
30 #include <asm/apic.h>
31 #include <asm/pvclock-abi.h>
32 #include <asm/desc.h>
33 #include <asm/mtrr.h>
34 #include <asm/msr-index.h>
35 #include <asm/asm.h>
36 #include <asm/kvm_page_track.h>
38 #define KVM_MAX_VCPUS 288
39 #define KVM_SOFT_MAX_VCPUS 240
40 #define KVM_MAX_VCPU_ID 1023
41 #define KVM_USER_MEM_SLOTS 509
42 /* memory slots that are not exposed to userspace */
43 #define KVM_PRIVATE_MEM_SLOTS 3
44 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
46 #define KVM_PIO_PAGE_OFFSET 1
47 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2
48 #define KVM_HALT_POLL_NS_DEFAULT 400000
50 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
52 /* x86-specific vcpu->requests bit members */
53 #define KVM_REQ_MIGRATE_TIMER 8
54 #define KVM_REQ_REPORT_TPR_ACCESS 9
55 #define KVM_REQ_TRIPLE_FAULT 10
56 #define KVM_REQ_MMU_SYNC 11
57 #define KVM_REQ_CLOCK_UPDATE 12
58 #define KVM_REQ_EVENT 14
59 #define KVM_REQ_APF_HALT 15
60 #define KVM_REQ_STEAL_UPDATE 16
61 #define KVM_REQ_NMI 17
62 #define KVM_REQ_PMU 18
63 #define KVM_REQ_PMI 19
64 #define KVM_REQ_SMI 20
65 #define KVM_REQ_MASTERCLOCK_UPDATE 21
66 #define KVM_REQ_MCLOCK_INPROGRESS 22
67 #define KVM_REQ_SCAN_IOAPIC 23
68 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
69 #define KVM_REQ_APIC_PAGE_RELOAD 25
70 #define KVM_REQ_HV_CRASH 26
71 #define KVM_REQ_IOAPIC_EOI_EXIT 27
72 #define KVM_REQ_HV_RESET 28
73 #define KVM_REQ_HV_EXIT 29
74 #define KVM_REQ_HV_STIMER 30
76 #define CR0_RESERVED_BITS \
77 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
78 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
79 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
81 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
82 #define CR3_PCID_INVD BIT_64(63)
83 #define CR4_RESERVED_BITS \
84 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
85 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
86 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
87 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
88 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP \
89 | X86_CR4_PKE))
91 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
95 #define INVALID_PAGE (~(hpa_t)0)
96 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
98 #define UNMAPPED_GVA (~(gpa_t)0)
100 /* KVM Hugepage definitions for x86 */
101 #define KVM_NR_PAGE_SIZES 3
102 #define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
103 #define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
104 #define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
105 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
106 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
108 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
110 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
111 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
112 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
115 #define KVM_PERMILLE_MMU_PAGES 20
116 #define KVM_MIN_ALLOC_MMU_PAGES 64
117 #define KVM_MMU_HASH_SHIFT 12
118 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
119 #define KVM_MIN_FREE_MMU_PAGES 5
120 #define KVM_REFILL_PAGES 25
121 #define KVM_MAX_CPUID_ENTRIES 80
122 #define KVM_NR_FIXED_MTRR_REGION 88
123 #define KVM_NR_VAR_MTRR 8
125 #define ASYNC_PF_PER_VCPU 64
127 enum kvm_reg {
128 VCPU_REGS_RAX = 0,
129 VCPU_REGS_RCX = 1,
130 VCPU_REGS_RDX = 2,
131 VCPU_REGS_RBX = 3,
132 VCPU_REGS_RSP = 4,
133 VCPU_REGS_RBP = 5,
134 VCPU_REGS_RSI = 6,
135 VCPU_REGS_RDI = 7,
136 #ifdef CONFIG_X86_64
137 VCPU_REGS_R8 = 8,
138 VCPU_REGS_R9 = 9,
139 VCPU_REGS_R10 = 10,
140 VCPU_REGS_R11 = 11,
141 VCPU_REGS_R12 = 12,
142 VCPU_REGS_R13 = 13,
143 VCPU_REGS_R14 = 14,
144 VCPU_REGS_R15 = 15,
145 #endif
146 VCPU_REGS_RIP,
147 NR_VCPU_REGS
150 enum kvm_reg_ex {
151 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
152 VCPU_EXREG_CR3,
153 VCPU_EXREG_RFLAGS,
154 VCPU_EXREG_SEGMENTS,
157 enum {
158 VCPU_SREG_ES,
159 VCPU_SREG_CS,
160 VCPU_SREG_SS,
161 VCPU_SREG_DS,
162 VCPU_SREG_FS,
163 VCPU_SREG_GS,
164 VCPU_SREG_TR,
165 VCPU_SREG_LDTR,
168 #include <asm/kvm_emulate.h>
170 #define KVM_NR_MEM_OBJS 40
172 #define KVM_NR_DB_REGS 4
174 #define DR6_BD (1 << 13)
175 #define DR6_BS (1 << 14)
176 #define DR6_RTM (1 << 16)
177 #define DR6_FIXED_1 0xfffe0ff0
178 #define DR6_INIT 0xffff0ff0
179 #define DR6_VOLATILE 0x0001e00f
181 #define DR7_BP_EN_MASK 0x000000ff
182 #define DR7_GE (1 << 9)
183 #define DR7_GD (1 << 13)
184 #define DR7_FIXED_1 0x00000400
185 #define DR7_VOLATILE 0xffff2bff
187 #define PFERR_PRESENT_BIT 0
188 #define PFERR_WRITE_BIT 1
189 #define PFERR_USER_BIT 2
190 #define PFERR_RSVD_BIT 3
191 #define PFERR_FETCH_BIT 4
192 #define PFERR_PK_BIT 5
193 #define PFERR_GUEST_FINAL_BIT 32
194 #define PFERR_GUEST_PAGE_BIT 33
196 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
197 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
198 #define PFERR_USER_MASK (1U << PFERR_USER_BIT)
199 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
200 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
201 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
202 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
203 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
205 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \
206 PFERR_USER_MASK | \
207 PFERR_WRITE_MASK | \
208 PFERR_PRESENT_MASK)
211 * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
212 * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
213 * with the SVE bit in EPT PTEs.
215 #define SPTE_SPECIAL_MASK (1ULL << 62)
217 /* apic attention bits */
218 #define KVM_APIC_CHECK_VAPIC 0
220 * The following bit is set with PV-EOI, unset on EOI.
221 * We detect PV-EOI changes by guest by comparing
222 * this bit with PV-EOI in guest memory.
223 * See the implementation in apic_update_pv_eoi.
225 #define KVM_APIC_PV_EOI_PENDING 1
227 struct kvm_kernel_irq_routing_entry;
230 * We don't want allocation failures within the mmu code, so we preallocate
231 * enough memory for a single page fault in a cache.
233 struct kvm_mmu_memory_cache {
234 int nobjs;
235 void *objects[KVM_NR_MEM_OBJS];
239 * the pages used as guest page table on soft mmu are tracked by
240 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
241 * by indirect shadow page can not be more than 15 bits.
243 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
244 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
246 union kvm_mmu_page_role {
247 unsigned word;
248 struct {
249 unsigned level:4;
250 unsigned cr4_pae:1;
251 unsigned quadrant:2;
252 unsigned direct:1;
253 unsigned access:3;
254 unsigned invalid:1;
255 unsigned nxe:1;
256 unsigned cr0_wp:1;
257 unsigned smep_andnot_wp:1;
258 unsigned smap_andnot_wp:1;
259 unsigned :8;
262 * This is left at the top of the word so that
263 * kvm_memslots_for_spte_role can extract it with a
264 * simple shift. While there is room, give it a whole
265 * byte so it is also faster to load it from memory.
267 unsigned smm:8;
271 struct kvm_rmap_head {
272 unsigned long val;
275 struct kvm_mmu_page {
276 struct list_head link;
277 struct hlist_node hash_link;
280 * The following two entries are used to key the shadow page in the
281 * hash table.
283 gfn_t gfn;
284 union kvm_mmu_page_role role;
286 u64 *spt;
287 /* hold the gfn of each spte inside spt */
288 gfn_t *gfns;
289 bool unsync;
290 int root_count; /* Currently serving as active root */
291 unsigned int unsync_children;
292 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
294 /* The page is obsolete if mmu_valid_gen != kvm->arch.mmu_valid_gen. */
295 unsigned long mmu_valid_gen;
297 DECLARE_BITMAP(unsync_child_bitmap, 512);
299 #ifdef CONFIG_X86_32
301 * Used out of the mmu-lock to avoid reading spte values while an
302 * update is in progress; see the comments in __get_spte_lockless().
304 int clear_spte_count;
305 #endif
307 /* Number of writes since the last time traversal visited this page. */
308 atomic_t write_flooding_count;
311 struct kvm_pio_request {
312 unsigned long count;
313 int in;
314 int port;
315 int size;
318 struct rsvd_bits_validate {
319 u64 rsvd_bits_mask[2][4];
320 u64 bad_mt_xwr;
324 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
325 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
326 * mode.
328 struct kvm_mmu {
329 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
330 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
331 u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
332 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
333 bool prefault);
334 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
335 struct x86_exception *fault);
336 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
337 struct x86_exception *exception);
338 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
339 struct x86_exception *exception);
340 int (*sync_page)(struct kvm_vcpu *vcpu,
341 struct kvm_mmu_page *sp);
342 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
343 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
344 u64 *spte, const void *pte);
345 hpa_t root_hpa;
346 int root_level;
347 int shadow_root_level;
348 union kvm_mmu_page_role base_role;
349 bool direct_map;
352 * Bitmap; bit set = permission fault
353 * Byte index: page fault error code [4:1]
354 * Bit index: pte permissions in ACC_* format
356 u8 permissions[16];
359 * The pkru_mask indicates if protection key checks are needed. It
360 * consists of 16 domains indexed by page fault error code bits [4:1],
361 * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
362 * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
364 u32 pkru_mask;
366 u64 *pae_root;
367 u64 *lm_root;
370 * check zero bits on shadow page table entries, these
371 * bits include not only hardware reserved bits but also
372 * the bits spte never used.
374 struct rsvd_bits_validate shadow_zero_check;
376 struct rsvd_bits_validate guest_rsvd_check;
378 /* Can have large pages at levels 2..last_nonleaf_level-1. */
379 u8 last_nonleaf_level;
381 bool nx;
383 u64 pdptrs[4]; /* pae */
386 enum pmc_type {
387 KVM_PMC_GP = 0,
388 KVM_PMC_FIXED,
391 struct kvm_pmc {
392 enum pmc_type type;
393 u8 idx;
394 u64 counter;
395 u64 eventsel;
396 struct perf_event *perf_event;
397 struct kvm_vcpu *vcpu;
400 struct kvm_pmu {
401 unsigned nr_arch_gp_counters;
402 unsigned nr_arch_fixed_counters;
403 unsigned available_event_types;
404 u64 fixed_ctr_ctrl;
405 u64 global_ctrl;
406 u64 global_status;
407 u64 global_ovf_ctrl;
408 u64 counter_bitmask[2];
409 u64 global_ctrl_mask;
410 u64 reserved_bits;
411 u8 version;
412 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
413 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
414 struct irq_work irq_work;
415 u64 reprogram_pmi;
418 struct kvm_pmu_ops;
420 enum {
421 KVM_DEBUGREG_BP_ENABLED = 1,
422 KVM_DEBUGREG_WONT_EXIT = 2,
423 KVM_DEBUGREG_RELOAD = 4,
426 struct kvm_mtrr_range {
427 u64 base;
428 u64 mask;
429 struct list_head node;
432 struct kvm_mtrr {
433 struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
434 mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
435 u64 deftype;
437 struct list_head head;
440 /* Hyper-V SynIC timer */
441 struct kvm_vcpu_hv_stimer {
442 struct hrtimer timer;
443 int index;
444 u64 config;
445 u64 count;
446 u64 exp_time;
447 struct hv_message msg;
448 bool msg_pending;
451 /* Hyper-V synthetic interrupt controller (SynIC)*/
452 struct kvm_vcpu_hv_synic {
453 u64 version;
454 u64 control;
455 u64 msg_page;
456 u64 evt_page;
457 atomic64_t sint[HV_SYNIC_SINT_COUNT];
458 atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
459 DECLARE_BITMAP(auto_eoi_bitmap, 256);
460 DECLARE_BITMAP(vec_bitmap, 256);
461 bool active;
464 /* Hyper-V per vcpu emulation context */
465 struct kvm_vcpu_hv {
466 u64 hv_vapic;
467 s64 runtime_offset;
468 struct kvm_vcpu_hv_synic synic;
469 struct kvm_hyperv_exit exit;
470 struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
471 DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
474 struct kvm_vcpu_arch {
476 * rip and regs accesses must go through
477 * kvm_{register,rip}_{read,write} functions.
479 unsigned long regs[NR_VCPU_REGS];
480 u32 regs_avail;
481 u32 regs_dirty;
483 unsigned long cr0;
484 unsigned long cr0_guest_owned_bits;
485 unsigned long cr2;
486 unsigned long cr3;
487 unsigned long cr4;
488 unsigned long cr4_guest_owned_bits;
489 unsigned long cr8;
490 u32 hflags;
491 u64 efer;
492 u64 apic_base;
493 struct kvm_lapic *apic; /* kernel irqchip context */
494 bool apicv_active;
495 DECLARE_BITMAP(ioapic_handled_vectors, 256);
496 unsigned long apic_attention;
497 int32_t apic_arb_prio;
498 int mp_state;
499 u64 ia32_misc_enable_msr;
500 u64 smbase;
501 bool tpr_access_reporting;
502 u64 ia32_xss;
505 * Paging state of the vcpu
507 * If the vcpu runs in guest mode with two level paging this still saves
508 * the paging mode of the l1 guest. This context is always used to
509 * handle faults.
511 struct kvm_mmu mmu;
514 * Paging state of an L2 guest (used for nested npt)
516 * This context will save all necessary information to walk page tables
517 * of the an L2 guest. This context is only initialized for page table
518 * walking and not for faulting since we never handle l2 page faults on
519 * the host.
521 struct kvm_mmu nested_mmu;
524 * Pointer to the mmu context currently used for
525 * gva_to_gpa translations.
527 struct kvm_mmu *walk_mmu;
529 struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
530 struct kvm_mmu_memory_cache mmu_page_cache;
531 struct kvm_mmu_memory_cache mmu_page_header_cache;
533 struct fpu guest_fpu;
534 u64 xcr0;
535 u64 guest_supported_xcr0;
536 u32 guest_xstate_size;
538 struct kvm_pio_request pio;
539 void *pio_data;
541 u8 event_exit_inst_len;
543 struct kvm_queued_exception {
544 bool pending;
545 bool has_error_code;
546 bool reinject;
547 u8 nr;
548 u32 error_code;
549 } exception;
551 struct kvm_queued_interrupt {
552 bool pending;
553 bool soft;
554 u8 nr;
555 } interrupt;
557 int halt_request; /* real mode on Intel only */
559 int cpuid_nent;
560 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
562 int maxphyaddr;
564 /* emulate context */
566 struct x86_emulate_ctxt emulate_ctxt;
567 bool emulate_regs_need_sync_to_vcpu;
568 bool emulate_regs_need_sync_from_vcpu;
569 int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
571 gpa_t time;
572 struct pvclock_vcpu_time_info hv_clock;
573 unsigned int hw_tsc_khz;
574 struct gfn_to_hva_cache pv_time;
575 bool pv_time_enabled;
576 /* set guest stopped flag in pvclock flags field */
577 bool pvclock_set_guest_stopped_request;
579 struct {
580 u64 msr_val;
581 u64 last_steal;
582 struct gfn_to_hva_cache stime;
583 struct kvm_steal_time steal;
584 } st;
586 u64 tsc_offset;
587 u64 last_guest_tsc;
588 u64 last_host_tsc;
589 u64 tsc_offset_adjustment;
590 u64 this_tsc_nsec;
591 u64 this_tsc_write;
592 u64 this_tsc_generation;
593 bool tsc_catchup;
594 bool tsc_always_catchup;
595 s8 virtual_tsc_shift;
596 u32 virtual_tsc_mult;
597 u32 virtual_tsc_khz;
598 s64 ia32_tsc_adjust_msr;
599 u64 tsc_scaling_ratio;
601 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
602 unsigned nmi_pending; /* NMI queued after currently running handler */
603 bool nmi_injected; /* Trying to inject an NMI this entry */
604 bool smi_pending; /* SMI queued after currently running handler */
606 struct kvm_mtrr mtrr_state;
607 u64 pat;
609 unsigned switch_db_regs;
610 unsigned long db[KVM_NR_DB_REGS];
611 unsigned long dr6;
612 unsigned long dr7;
613 unsigned long eff_db[KVM_NR_DB_REGS];
614 unsigned long guest_debug_dr7;
616 u64 mcg_cap;
617 u64 mcg_status;
618 u64 mcg_ctl;
619 u64 mcg_ext_ctl;
620 u64 *mce_banks;
622 /* Cache MMIO info */
623 u64 mmio_gva;
624 unsigned access;
625 gfn_t mmio_gfn;
626 u64 mmio_gen;
628 struct kvm_pmu pmu;
630 /* used for guest single stepping over the given code position */
631 unsigned long singlestep_rip;
633 struct kvm_vcpu_hv hyperv;
635 cpumask_var_t wbinvd_dirty_mask;
637 unsigned long last_retry_eip;
638 unsigned long last_retry_addr;
640 struct {
641 bool halted;
642 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
643 struct gfn_to_hva_cache data;
644 u64 msr_val;
645 u32 id;
646 bool send_user_only;
647 } apf;
649 /* OSVW MSRs (AMD only) */
650 struct {
651 u64 length;
652 u64 status;
653 } osvw;
655 struct {
656 u64 msr_val;
657 struct gfn_to_hva_cache data;
658 } pv_eoi;
661 * Indicate whether the access faults on its page table in guest
662 * which is set when fix page fault and used to detect unhandeable
663 * instruction.
665 bool write_fault_to_shadow_pgtable;
667 /* set at EPT violation at this point */
668 unsigned long exit_qualification;
670 /* pv related host specific info */
671 struct {
672 bool pv_unhalted;
673 } pv;
675 int pending_ioapic_eoi;
676 int pending_external_vector;
678 /* GPA available (AMD only) */
679 bool gpa_available;
682 struct kvm_lpage_info {
683 int disallow_lpage;
686 struct kvm_arch_memory_slot {
687 struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
688 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
689 unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
693 * We use as the mode the number of bits allocated in the LDR for the
694 * logical processor ID. It happens that these are all powers of two.
695 * This makes it is very easy to detect cases where the APICs are
696 * configured for multiple modes; in that case, we cannot use the map and
697 * hence cannot use kvm_irq_delivery_to_apic_fast either.
699 #define KVM_APIC_MODE_XAPIC_CLUSTER 4
700 #define KVM_APIC_MODE_XAPIC_FLAT 8
701 #define KVM_APIC_MODE_X2APIC 16
703 struct kvm_apic_map {
704 struct rcu_head rcu;
705 u8 mode;
706 u32 max_apic_id;
707 union {
708 struct kvm_lapic *xapic_flat_map[8];
709 struct kvm_lapic *xapic_cluster_map[16][4];
711 struct kvm_lapic *phys_map[];
714 /* Hyper-V emulation context */
715 struct kvm_hv {
716 struct mutex hv_lock;
717 u64 hv_guest_os_id;
718 u64 hv_hypercall;
719 u64 hv_tsc_page;
721 /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
722 u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
723 u64 hv_crash_ctl;
725 HV_REFERENCE_TSC_PAGE tsc_ref;
728 enum kvm_irqchip_mode {
729 KVM_IRQCHIP_NONE,
730 KVM_IRQCHIP_KERNEL, /* created with KVM_CREATE_IRQCHIP */
731 KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
734 struct kvm_arch {
735 unsigned int n_used_mmu_pages;
736 unsigned int n_requested_mmu_pages;
737 unsigned int n_max_mmu_pages;
738 unsigned int indirect_shadow_pages;
739 unsigned long mmu_valid_gen;
740 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
742 * Hash table of struct kvm_mmu_page.
744 struct list_head active_mmu_pages;
745 struct list_head zapped_obsolete_pages;
746 struct kvm_page_track_notifier_node mmu_sp_tracker;
747 struct kvm_page_track_notifier_head track_notifier_head;
749 struct list_head assigned_dev_head;
750 struct iommu_domain *iommu_domain;
751 bool iommu_noncoherent;
752 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
753 atomic_t noncoherent_dma_count;
754 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
755 atomic_t assigned_device_count;
756 struct kvm_pic *vpic;
757 struct kvm_ioapic *vioapic;
758 struct kvm_pit *vpit;
759 atomic_t vapics_in_nmi_mode;
760 struct mutex apic_map_lock;
761 struct kvm_apic_map *apic_map;
763 unsigned int tss_addr;
764 bool apic_access_page_done;
766 gpa_t wall_clock;
768 bool ept_identity_pagetable_done;
769 gpa_t ept_identity_map_addr;
771 unsigned long irq_sources_bitmap;
772 s64 kvmclock_offset;
773 raw_spinlock_t tsc_write_lock;
774 u64 last_tsc_nsec;
775 u64 last_tsc_write;
776 u32 last_tsc_khz;
777 u64 cur_tsc_nsec;
778 u64 cur_tsc_write;
779 u64 cur_tsc_offset;
780 u64 cur_tsc_generation;
781 int nr_vcpus_matched_tsc;
783 spinlock_t pvclock_gtod_sync_lock;
784 bool use_master_clock;
785 u64 master_kernel_ns;
786 u64 master_cycle_now;
787 struct delayed_work kvmclock_update_work;
788 struct delayed_work kvmclock_sync_work;
790 struct kvm_xen_hvm_config xen_hvm_config;
792 /* reads protected by irq_srcu, writes by irq_lock */
793 struct hlist_head mask_notifier_list;
795 struct kvm_hv hyperv;
797 #ifdef CONFIG_KVM_MMU_AUDIT
798 int audit_point;
799 #endif
801 bool boot_vcpu_runs_old_kvmclock;
802 u32 bsp_vcpu_id;
804 u64 disabled_quirks;
806 enum kvm_irqchip_mode irqchip_mode;
807 u8 nr_reserved_ioapic_pins;
809 bool disabled_lapic_found;
811 /* Struct members for AVIC */
812 u32 avic_vm_id;
813 u32 ldr_mode;
814 struct page *avic_logical_id_table_page;
815 struct page *avic_physical_id_table_page;
816 struct hlist_node hnode;
818 bool x2apic_format;
819 bool x2apic_broadcast_quirk_disabled;
822 struct kvm_vm_stat {
823 ulong mmu_shadow_zapped;
824 ulong mmu_pte_write;
825 ulong mmu_pte_updated;
826 ulong mmu_pde_zapped;
827 ulong mmu_flooded;
828 ulong mmu_recycled;
829 ulong mmu_cache_miss;
830 ulong mmu_unsync;
831 ulong remote_tlb_flush;
832 ulong lpages;
833 ulong max_mmu_page_hash_collisions;
836 struct kvm_vcpu_stat {
837 u64 pf_fixed;
838 u64 pf_guest;
839 u64 tlb_flush;
840 u64 invlpg;
842 u64 exits;
843 u64 io_exits;
844 u64 mmio_exits;
845 u64 signal_exits;
846 u64 irq_window_exits;
847 u64 nmi_window_exits;
848 u64 halt_exits;
849 u64 halt_successful_poll;
850 u64 halt_attempted_poll;
851 u64 halt_poll_invalid;
852 u64 halt_wakeup;
853 u64 request_irq_exits;
854 u64 irq_exits;
855 u64 host_state_reload;
856 u64 efer_reload;
857 u64 fpu_reload;
858 u64 insn_emulation;
859 u64 insn_emulation_fail;
860 u64 hypercalls;
861 u64 irq_injections;
862 u64 nmi_injections;
863 u64 req_event;
866 struct x86_instruction_info;
868 struct msr_data {
869 bool host_initiated;
870 u32 index;
871 u64 data;
874 struct kvm_lapic_irq {
875 u32 vector;
876 u16 delivery_mode;
877 u16 dest_mode;
878 bool level;
879 u16 trig_mode;
880 u32 shorthand;
881 u32 dest_id;
882 bool msi_redir_hint;
885 struct kvm_x86_ops {
886 int (*cpu_has_kvm_support)(void); /* __init */
887 int (*disabled_by_bios)(void); /* __init */
888 int (*hardware_enable)(void);
889 void (*hardware_disable)(void);
890 void (*check_processor_compatibility)(void *rtn);
891 int (*hardware_setup)(void); /* __init */
892 void (*hardware_unsetup)(void); /* __exit */
893 bool (*cpu_has_accelerated_tpr)(void);
894 bool (*cpu_has_high_real_mode_segbase)(void);
895 void (*cpuid_update)(struct kvm_vcpu *vcpu);
897 int (*vm_init)(struct kvm *kvm);
898 void (*vm_destroy)(struct kvm *kvm);
900 /* Create, but do not attach this VCPU */
901 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
902 void (*vcpu_free)(struct kvm_vcpu *vcpu);
903 void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
905 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
906 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
907 void (*vcpu_put)(struct kvm_vcpu *vcpu);
909 void (*update_bp_intercept)(struct kvm_vcpu *vcpu);
910 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
911 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
912 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
913 void (*get_segment)(struct kvm_vcpu *vcpu,
914 struct kvm_segment *var, int seg);
915 int (*get_cpl)(struct kvm_vcpu *vcpu);
916 void (*set_segment)(struct kvm_vcpu *vcpu,
917 struct kvm_segment *var, int seg);
918 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
919 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
920 void (*decache_cr3)(struct kvm_vcpu *vcpu);
921 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
922 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
923 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
924 int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
925 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
926 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
927 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
928 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
929 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
930 u64 (*get_dr6)(struct kvm_vcpu *vcpu);
931 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
932 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
933 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
934 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
935 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
936 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
937 u32 (*get_pkru)(struct kvm_vcpu *vcpu);
939 void (*tlb_flush)(struct kvm_vcpu *vcpu);
941 void (*run)(struct kvm_vcpu *vcpu);
942 int (*handle_exit)(struct kvm_vcpu *vcpu);
943 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
944 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
945 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
946 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
947 unsigned char *hypercall_addr);
948 void (*set_irq)(struct kvm_vcpu *vcpu);
949 void (*set_nmi)(struct kvm_vcpu *vcpu);
950 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
951 bool has_error_code, u32 error_code,
952 bool reinject);
953 void (*cancel_injection)(struct kvm_vcpu *vcpu);
954 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
955 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
956 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
957 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
958 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
959 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
960 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
961 bool (*get_enable_apicv)(void);
962 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
963 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
964 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
965 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
966 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
967 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
968 void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
969 int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
970 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
971 int (*get_tdp_level)(void);
972 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
973 int (*get_lpage_level)(void);
974 bool (*rdtscp_supported)(void);
975 bool (*invpcid_supported)(void);
977 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
979 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
981 bool (*has_wbinvd_exit)(void);
983 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
985 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
987 int (*check_intercept)(struct kvm_vcpu *vcpu,
988 struct x86_instruction_info *info,
989 enum x86_intercept_stage stage);
990 void (*handle_external_intr)(struct kvm_vcpu *vcpu);
991 bool (*mpx_supported)(void);
992 bool (*xsaves_supported)(void);
994 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
996 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
999 * Arch-specific dirty logging hooks. These hooks are only supposed to
1000 * be valid if the specific arch has hardware-accelerated dirty logging
1001 * mechanism. Currently only for PML on VMX.
1003 * - slot_enable_log_dirty:
1004 * called when enabling log dirty mode for the slot.
1005 * - slot_disable_log_dirty:
1006 * called when disabling log dirty mode for the slot.
1007 * also called when slot is created with log dirty disabled.
1008 * - flush_log_dirty:
1009 * called before reporting dirty_bitmap to userspace.
1010 * - enable_log_dirty_pt_masked:
1011 * called when reenabling log dirty for the GFNs in the mask after
1012 * corresponding bits are cleared in slot->dirty_bitmap.
1014 void (*slot_enable_log_dirty)(struct kvm *kvm,
1015 struct kvm_memory_slot *slot);
1016 void (*slot_disable_log_dirty)(struct kvm *kvm,
1017 struct kvm_memory_slot *slot);
1018 void (*flush_log_dirty)(struct kvm *kvm);
1019 void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
1020 struct kvm_memory_slot *slot,
1021 gfn_t offset, unsigned long mask);
1022 /* pmu operations of sub-arch */
1023 const struct kvm_pmu_ops *pmu_ops;
1026 * Architecture specific hooks for vCPU blocking due to
1027 * HLT instruction.
1028 * Returns for .pre_block():
1029 * - 0 means continue to block the vCPU.
1030 * - 1 means we cannot block the vCPU since some event
1031 * happens during this period, such as, 'ON' bit in
1032 * posted-interrupts descriptor is set.
1034 int (*pre_block)(struct kvm_vcpu *vcpu);
1035 void (*post_block)(struct kvm_vcpu *vcpu);
1037 void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1038 void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1040 int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1041 uint32_t guest_irq, bool set);
1042 void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1044 int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
1045 void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1047 void (*setup_mce)(struct kvm_vcpu *vcpu);
1050 struct kvm_arch_async_pf {
1051 u32 token;
1052 gfn_t gfn;
1053 unsigned long cr3;
1054 bool direct_map;
1057 extern struct kvm_x86_ops *kvm_x86_ops;
1059 int kvm_mmu_module_init(void);
1060 void kvm_mmu_module_exit(void);
1062 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1063 int kvm_mmu_create(struct kvm_vcpu *vcpu);
1064 void kvm_mmu_setup(struct kvm_vcpu *vcpu);
1065 void kvm_mmu_init_vm(struct kvm *kvm);
1066 void kvm_mmu_uninit_vm(struct kvm *kvm);
1067 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1068 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1069 u64 acc_track_mask);
1071 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1072 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1073 struct kvm_memory_slot *memslot);
1074 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1075 const struct kvm_memory_slot *memslot);
1076 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1077 struct kvm_memory_slot *memslot);
1078 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1079 struct kvm_memory_slot *memslot);
1080 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1081 struct kvm_memory_slot *memslot);
1082 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1083 struct kvm_memory_slot *slot,
1084 gfn_t gfn_offset, unsigned long mask);
1085 void kvm_mmu_zap_all(struct kvm *kvm);
1086 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots);
1087 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1088 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1090 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1091 bool pdptrs_changed(struct kvm_vcpu *vcpu);
1093 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1094 const void *val, int bytes);
1096 struct kvm_irq_mask_notifier {
1097 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1098 int irq;
1099 struct hlist_node link;
1102 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1103 struct kvm_irq_mask_notifier *kimn);
1104 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1105 struct kvm_irq_mask_notifier *kimn);
1106 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1107 bool mask);
1109 extern bool tdp_enabled;
1111 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1113 /* control of guest tsc rate supported? */
1114 extern bool kvm_has_tsc_control;
1115 /* maximum supported tsc_khz for guests */
1116 extern u32 kvm_max_guest_tsc_khz;
1117 /* number of bits of the fractional part of the TSC scaling ratio */
1118 extern u8 kvm_tsc_scaling_ratio_frac_bits;
1119 /* maximum allowed value of TSC scaling ratio */
1120 extern u64 kvm_max_tsc_scaling_ratio;
1121 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1122 extern u64 kvm_default_tsc_scaling_ratio;
1124 extern u64 kvm_mce_cap_supported;
1126 enum emulation_result {
1127 EMULATE_DONE, /* no further processing */
1128 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
1129 EMULATE_FAIL, /* can't emulate this instruction */
1132 #define EMULTYPE_NO_DECODE (1 << 0)
1133 #define EMULTYPE_TRAP_UD (1 << 1)
1134 #define EMULTYPE_SKIP (1 << 2)
1135 #define EMULTYPE_RETRY (1 << 3)
1136 #define EMULTYPE_NO_REEXECUTE (1 << 4)
1137 int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
1138 int emulation_type, void *insn, int insn_len);
1140 static inline int emulate_instruction(struct kvm_vcpu *vcpu,
1141 int emulation_type)
1143 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
1146 void kvm_enable_efer_bits(u64);
1147 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1148 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1149 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
1151 struct x86_emulate_ctxt;
1153 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
1154 int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
1155 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1156 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1157 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1158 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1160 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1161 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1162 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1164 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1165 int reason, bool has_error_code, u32 error_code);
1167 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1168 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1169 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1170 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1171 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1172 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1173 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1174 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1175 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1176 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1178 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1179 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1181 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1182 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1183 bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1185 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1186 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1187 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1188 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1189 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1190 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1191 gfn_t gfn, void *data, int offset, int len,
1192 u32 access);
1193 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1194 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1196 static inline int __kvm_irq_line_state(unsigned long *irq_state,
1197 int irq_source_id, int level)
1199 /* Logical OR for level trig interrupt */
1200 if (level)
1201 __set_bit(irq_source_id, irq_state);
1202 else
1203 __clear_bit(irq_source_id, irq_state);
1205 return !!(*irq_state);
1208 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1209 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1211 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1213 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1214 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1215 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1216 int kvm_mmu_load(struct kvm_vcpu *vcpu);
1217 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1218 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1219 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1220 struct x86_exception *exception);
1221 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1222 struct x86_exception *exception);
1223 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1224 struct x86_exception *exception);
1225 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1226 struct x86_exception *exception);
1227 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1228 struct x86_exception *exception);
1230 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1232 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1234 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
1235 void *insn, int insn_len);
1236 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1237 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
1239 void kvm_enable_tdp(void);
1240 void kvm_disable_tdp(void);
1242 static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1243 struct x86_exception *exception)
1245 return gpa;
1248 static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
1250 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
1252 return (struct kvm_mmu_page *)page_private(page);
1255 static inline u16 kvm_read_ldt(void)
1257 u16 ldt;
1258 asm("sldt %0" : "=g"(ldt));
1259 return ldt;
1262 static inline void kvm_load_ldt(u16 sel)
1264 asm("lldt %0" : : "rm"(sel));
1267 #ifdef CONFIG_X86_64
1268 static inline unsigned long read_msr(unsigned long msr)
1270 u64 value;
1272 rdmsrl(msr, value);
1273 return value;
1275 #endif
1277 static inline u32 get_rdx_init_val(void)
1279 return 0x600; /* P6 family */
1282 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1284 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1287 static inline u64 get_canonical(u64 la)
1289 return ((int64_t)la << 16) >> 16;
1292 static inline bool is_noncanonical_address(u64 la)
1294 #ifdef CONFIG_X86_64
1295 return get_canonical(la) != la;
1296 #else
1297 return false;
1298 #endif
1301 #define TSS_IOPB_BASE_OFFSET 0x66
1302 #define TSS_BASE_SIZE 0x68
1303 #define TSS_IOPB_SIZE (65536 / 8)
1304 #define TSS_REDIRECTION_SIZE (256 / 8)
1305 #define RMODE_TSS_SIZE \
1306 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1308 enum {
1309 TASK_SWITCH_CALL = 0,
1310 TASK_SWITCH_IRET = 1,
1311 TASK_SWITCH_JMP = 2,
1312 TASK_SWITCH_GATE = 3,
1315 #define HF_GIF_MASK (1 << 0)
1316 #define HF_HIF_MASK (1 << 1)
1317 #define HF_VINTR_MASK (1 << 2)
1318 #define HF_NMI_MASK (1 << 3)
1319 #define HF_IRET_MASK (1 << 4)
1320 #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
1321 #define HF_SMM_MASK (1 << 6)
1322 #define HF_SMM_INSIDE_NMI_MASK (1 << 7)
1324 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1325 #define KVM_ADDRESS_SPACE_NUM 2
1327 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1328 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1331 * Hardware virtualization extension instructions may fault if a
1332 * reboot turns off virtualization while processes are running.
1333 * Trap the fault and ignore the instruction if that happens.
1335 asmlinkage void kvm_spurious_fault(void);
1337 #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
1338 "666: " insn "\n\t" \
1339 "668: \n\t" \
1340 ".pushsection .fixup, \"ax\" \n" \
1341 "667: \n\t" \
1342 cleanup_insn "\n\t" \
1343 "cmpb $0, kvm_rebooting \n\t" \
1344 "jne 668b \n\t" \
1345 __ASM_SIZE(push) " $666b \n\t" \
1346 "call kvm_spurious_fault \n\t" \
1347 ".popsection \n\t" \
1348 _ASM_EXTABLE(666b, 667b)
1350 #define __kvm_handle_fault_on_reboot(insn) \
1351 ____kvm_handle_fault_on_reboot(insn, "")
1353 #define KVM_ARCH_WANT_MMU_NOTIFIER
1354 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1355 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1356 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1357 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1358 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1359 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1360 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1361 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1362 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1363 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1364 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1365 void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1366 unsigned long address);
1368 void kvm_define_shared_msr(unsigned index, u32 msr);
1369 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1371 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1372 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1374 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1375 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1377 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1378 void kvm_make_scan_ioapic_request(struct kvm *kvm);
1380 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1381 struct kvm_async_pf *work);
1382 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1383 struct kvm_async_pf *work);
1384 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1385 struct kvm_async_pf *work);
1386 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
1387 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1389 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1390 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1392 int kvm_is_in_guest(void);
1394 int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1395 int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
1396 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1397 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1399 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1400 struct kvm_vcpu **dest_vcpu);
1402 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1403 struct kvm_lapic_irq *irq);
1405 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1407 if (kvm_x86_ops->vcpu_blocking)
1408 kvm_x86_ops->vcpu_blocking(vcpu);
1411 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1413 if (kvm_x86_ops->vcpu_unblocking)
1414 kvm_x86_ops->vcpu_unblocking(vcpu);
1417 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1419 static inline int kvm_cpu_get_apicid(int mps_cpu)
1421 #ifdef CONFIG_X86_LOCAL_APIC
1422 return __default_cpu_present_to_apicid(mps_cpu);
1423 #else
1424 WARN_ON_ONCE(1);
1425 return BAD_APICID;
1426 #endif
1429 #endif /* _ASM_X86_KVM_HOST_H */