1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_X86_OPS_H
3 #define __KVM_X86_VMX_X86_OPS_H
5 #include <linux/kvm_host.h>
9 __init
int vmx_hardware_setup(void);
11 extern struct kvm_x86_ops vt_x86_ops __initdata
;
12 extern struct kvm_x86_init_ops vt_init_ops __initdata
;
14 void vmx_hardware_unsetup(void);
15 int vmx_check_processor_compat(void);
16 int vmx_enable_virtualization_cpu(void);
17 void vmx_disable_virtualization_cpu(void);
18 void vmx_emergency_disable_virtualization_cpu(void);
19 int vmx_vm_init(struct kvm
*kvm
);
20 void vmx_vm_destroy(struct kvm
*kvm
);
21 int vmx_vcpu_precreate(struct kvm
*kvm
);
22 int vmx_vcpu_create(struct kvm_vcpu
*vcpu
);
23 int vmx_vcpu_pre_run(struct kvm_vcpu
*vcpu
);
24 fastpath_t
vmx_vcpu_run(struct kvm_vcpu
*vcpu
, bool force_immediate_exit
);
25 void vmx_vcpu_free(struct kvm_vcpu
*vcpu
);
26 void vmx_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
);
27 void vmx_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
28 void vmx_vcpu_put(struct kvm_vcpu
*vcpu
);
29 int vmx_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
);
30 void vmx_handle_exit_irqoff(struct kvm_vcpu
*vcpu
);
31 int vmx_skip_emulated_instruction(struct kvm_vcpu
*vcpu
);
32 void vmx_update_emulated_instruction(struct kvm_vcpu
*vcpu
);
33 int vmx_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
35 int vmx_smi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
);
36 int vmx_enter_smm(struct kvm_vcpu
*vcpu
, union kvm_smram
*smram
);
37 int vmx_leave_smm(struct kvm_vcpu
*vcpu
, const union kvm_smram
*smram
);
38 void vmx_enable_smi_window(struct kvm_vcpu
*vcpu
);
40 int vmx_check_emulate_instruction(struct kvm_vcpu
*vcpu
, int emul_type
,
41 void *insn
, int insn_len
);
42 int vmx_check_intercept(struct kvm_vcpu
*vcpu
,
43 struct x86_instruction_info
*info
,
44 enum x86_intercept_stage stage
,
45 struct x86_exception
*exception
);
46 bool vmx_apic_init_signal_blocked(struct kvm_vcpu
*vcpu
);
47 void vmx_migrate_timers(struct kvm_vcpu
*vcpu
);
48 void vmx_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
49 void vmx_apicv_pre_state_restore(struct kvm_vcpu
*vcpu
);
50 void vmx_hwapic_irr_update(struct kvm_vcpu
*vcpu
, int max_irr
);
51 void vmx_hwapic_isr_update(int max_isr
);
52 int vmx_sync_pir_to_irr(struct kvm_vcpu
*vcpu
);
53 void vmx_deliver_interrupt(struct kvm_lapic
*apic
, int delivery_mode
,
54 int trig_mode
, int vector
);
55 void vmx_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
);
56 bool vmx_has_emulated_msr(struct kvm
*kvm
, u32 index
);
57 void vmx_msr_filter_changed(struct kvm_vcpu
*vcpu
);
58 void vmx_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
);
59 void vmx_update_exception_bitmap(struct kvm_vcpu
*vcpu
);
60 int vmx_get_feature_msr(u32 msr
, u64
*data
);
61 int vmx_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
62 u64
vmx_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
);
63 void vmx_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
64 void vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
65 int vmx_get_cpl(struct kvm_vcpu
*vcpu
);
66 void vmx_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
67 bool vmx_is_valid_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
68 void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
69 void vmx_load_mmu_pgd(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
, int root_level
);
70 void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
71 bool vmx_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
72 int vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
73 void vmx_get_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
74 void vmx_set_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
75 void vmx_get_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
76 void vmx_set_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
);
77 void vmx_set_dr7(struct kvm_vcpu
*vcpu
, unsigned long val
);
78 void vmx_sync_dirty_debug_regs(struct kvm_vcpu
*vcpu
);
79 void vmx_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
);
80 unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
);
81 void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
82 bool vmx_get_if_flag(struct kvm_vcpu
*vcpu
);
83 void vmx_flush_tlb_all(struct kvm_vcpu
*vcpu
);
84 void vmx_flush_tlb_current(struct kvm_vcpu
*vcpu
);
85 void vmx_flush_tlb_gva(struct kvm_vcpu
*vcpu
, gva_t addr
);
86 void vmx_flush_tlb_guest(struct kvm_vcpu
*vcpu
);
87 void vmx_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
);
88 u32
vmx_get_interrupt_shadow(struct kvm_vcpu
*vcpu
);
89 void vmx_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
);
90 void vmx_inject_irq(struct kvm_vcpu
*vcpu
, bool reinjected
);
91 void vmx_inject_nmi(struct kvm_vcpu
*vcpu
);
92 void vmx_inject_exception(struct kvm_vcpu
*vcpu
);
93 void vmx_cancel_injection(struct kvm_vcpu
*vcpu
);
94 int vmx_interrupt_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
);
95 int vmx_nmi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
);
96 bool vmx_get_nmi_mask(struct kvm_vcpu
*vcpu
);
97 void vmx_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
);
98 void vmx_enable_nmi_window(struct kvm_vcpu
*vcpu
);
99 void vmx_enable_irq_window(struct kvm_vcpu
*vcpu
);
100 void vmx_update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
);
101 void vmx_set_apic_access_page_addr(struct kvm_vcpu
*vcpu
);
102 void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
);
103 void vmx_load_eoi_exitmap(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
);
104 int vmx_set_tss_addr(struct kvm
*kvm
, unsigned int addr
);
105 int vmx_set_identity_map_addr(struct kvm
*kvm
, u64 ident_addr
);
106 u8
vmx_get_mt_mask(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool is_mmio
);
107 void vmx_get_exit_info(struct kvm_vcpu
*vcpu
, u32
*reason
,
108 u64
*info1
, u64
*info2
, u32
*intr_info
, u32
*error_code
);
109 u64
vmx_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
);
110 u64
vmx_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
);
111 void vmx_write_tsc_offset(struct kvm_vcpu
*vcpu
);
112 void vmx_write_tsc_multiplier(struct kvm_vcpu
*vcpu
);
113 void vmx_update_cpu_dirty_logging(struct kvm_vcpu
*vcpu
);
115 int vmx_set_hv_timer(struct kvm_vcpu
*vcpu
, u64 guest_deadline_tsc
,
117 void vmx_cancel_hv_timer(struct kvm_vcpu
*vcpu
);
119 void vmx_setup_mce(struct kvm_vcpu
*vcpu
);
121 #endif /* __KVM_X86_VMX_X86_OPS_H */