1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_LAPIC_H
3 #define __KVM_X86_LAPIC_H
7 #include <linux/kvm_host.h>
9 #define KVM_APIC_INIT 0
10 #define KVM_APIC_SIPI 1
11 #define KVM_APIC_LVT_NUM 6
13 #define KVM_APIC_SHORT_MASK 0xc0000
14 #define KVM_APIC_DEST_MASK 0x800
16 #define APIC_BUS_CYCLE_NS 1
17 #define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS)
21 s64 period
; /* unit: ns */
22 ktime_t target_expiration
;
26 u64 expired_tscdeadline
;
27 atomic_t pending
; /* accumulated triggered timers */
32 unsigned long base_address
;
33 struct kvm_io_device dev
;
34 struct kvm_timer lapic_timer
;
36 struct kvm_vcpu
*vcpu
;
39 bool lvt0_in_nmi_mode
;
40 /* Number of bits set in ISR. */
42 /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
43 int highest_isr_cache
;
45 * APIC register page. The layout matches the register layout seen by
46 * the guest 1:1, because it is accessed by the vmx microcode.
47 * Note: Only one register, the TPR, is used by the microcode.
51 struct gfn_to_hva_cache vapic_cache
;
52 unsigned long pending_events
;
53 unsigned int sipi_vector
;
58 int kvm_create_lapic(struct kvm_vcpu
*vcpu
);
59 void kvm_free_lapic(struct kvm_vcpu
*vcpu
);
61 int kvm_apic_has_interrupt(struct kvm_vcpu
*vcpu
);
62 int kvm_apic_accept_pic_intr(struct kvm_vcpu
*vcpu
);
63 int kvm_get_apic_interrupt(struct kvm_vcpu
*vcpu
);
64 void kvm_apic_accept_events(struct kvm_vcpu
*vcpu
);
65 void kvm_lapic_reset(struct kvm_vcpu
*vcpu
, bool init_event
);
66 u64
kvm_lapic_get_cr8(struct kvm_vcpu
*vcpu
);
67 void kvm_lapic_set_tpr(struct kvm_vcpu
*vcpu
, unsigned long cr8
);
68 void kvm_lapic_set_eoi(struct kvm_vcpu
*vcpu
);
69 void kvm_lapic_set_base(struct kvm_vcpu
*vcpu
, u64 value
);
70 u64
kvm_lapic_get_base(struct kvm_vcpu
*vcpu
);
71 void kvm_apic_set_version(struct kvm_vcpu
*vcpu
);
72 int kvm_lapic_reg_write(struct kvm_lapic
*apic
, u32 reg
, u32 val
);
73 int kvm_lapic_reg_read(struct kvm_lapic
*apic
, u32 offset
, int len
,
75 bool kvm_apic_match_dest(struct kvm_vcpu
*vcpu
, struct kvm_lapic
*source
,
76 int short_hand
, unsigned int dest
, int dest_mode
);
78 bool __kvm_apic_update_irr(u32
*pir
, void *regs
, int *max_irr
);
79 bool kvm_apic_update_irr(struct kvm_vcpu
*vcpu
, u32
*pir
, int *max_irr
);
80 void kvm_apic_update_ppr(struct kvm_vcpu
*vcpu
);
81 int kvm_apic_set_irq(struct kvm_vcpu
*vcpu
, struct kvm_lapic_irq
*irq
,
82 struct dest_map
*dest_map
);
83 int kvm_apic_local_deliver(struct kvm_lapic
*apic
, int lvt_type
);
85 bool kvm_irq_delivery_to_apic_fast(struct kvm
*kvm
, struct kvm_lapic
*src
,
86 struct kvm_lapic_irq
*irq
, int *r
, struct dest_map
*dest_map
);
88 u64
kvm_get_apic_base(struct kvm_vcpu
*vcpu
);
89 int kvm_set_apic_base(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
);
90 int kvm_apic_get_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
);
91 int kvm_apic_set_state(struct kvm_vcpu
*vcpu
, struct kvm_lapic_state
*s
);
92 int kvm_lapic_find_highest_irr(struct kvm_vcpu
*vcpu
);
94 u64
kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
);
95 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu
*vcpu
, u64 data
);
97 void kvm_apic_write_nodecode(struct kvm_vcpu
*vcpu
, u32 offset
);
98 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu
*vcpu
, int vector
);
100 int kvm_lapic_set_vapic_addr(struct kvm_vcpu
*vcpu
, gpa_t vapic_addr
);
101 void kvm_lapic_sync_from_vapic(struct kvm_vcpu
*vcpu
);
102 void kvm_lapic_sync_to_vapic(struct kvm_vcpu
*vcpu
);
104 int kvm_x2apic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
105 int kvm_x2apic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
);
107 int kvm_hv_vapic_msr_write(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
108 int kvm_hv_vapic_msr_read(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*data
);
110 static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu
*vcpu
)
112 return vcpu
->arch
.hyperv
.hv_vapic
& HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE
;
115 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu
*vcpu
, u64 data
);
116 void kvm_lapic_init(void);
117 void kvm_lapic_exit(void);
119 #define VEC_POS(v) ((v) & (32 - 1))
120 #define REG_POS(v) (((v) >> 5) << 4)
122 static inline void kvm_lapic_set_vector(int vec
, void *bitmap
)
124 set_bit(VEC_POS(vec
), (bitmap
) + REG_POS(vec
));
127 static inline void kvm_lapic_set_irr(int vec
, struct kvm_lapic
*apic
)
129 kvm_lapic_set_vector(vec
, apic
->regs
+ APIC_IRR
);
131 * irr_pending must be true if any interrupt is pending; set it after
132 * APIC_IRR to avoid race with apic_clear_irr
134 apic
->irr_pending
= true;
137 static inline u32
kvm_lapic_get_reg(struct kvm_lapic
*apic
, int reg_off
)
139 return *((u32
*) (apic
->regs
+ reg_off
));
142 static inline void kvm_lapic_set_reg(struct kvm_lapic
*apic
, int reg_off
, u32 val
)
144 *((u32
*) (apic
->regs
+ reg_off
)) = val
;
147 extern struct static_key kvm_no_apic_vcpu
;
149 static inline bool lapic_in_kernel(struct kvm_vcpu
*vcpu
)
151 if (static_key_false(&kvm_no_apic_vcpu
))
152 return vcpu
->arch
.apic
;
156 extern struct static_key_deferred apic_hw_disabled
;
158 static inline int kvm_apic_hw_enabled(struct kvm_lapic
*apic
)
160 if (static_key_false(&apic_hw_disabled
.key
))
161 return apic
->vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
;
162 return MSR_IA32_APICBASE_ENABLE
;
165 extern struct static_key_deferred apic_sw_disabled
;
167 static inline bool kvm_apic_sw_enabled(struct kvm_lapic
*apic
)
169 if (static_key_false(&apic_sw_disabled
.key
))
170 return apic
->sw_enabled
;
174 static inline bool kvm_apic_present(struct kvm_vcpu
*vcpu
)
176 return lapic_in_kernel(vcpu
) && kvm_apic_hw_enabled(vcpu
->arch
.apic
);
179 static inline int kvm_lapic_enabled(struct kvm_vcpu
*vcpu
)
181 return kvm_apic_present(vcpu
) && kvm_apic_sw_enabled(vcpu
->arch
.apic
);
184 static inline int apic_x2apic_mode(struct kvm_lapic
*apic
)
186 return apic
->vcpu
->arch
.apic_base
& X2APIC_ENABLE
;
189 static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu
*vcpu
)
191 return vcpu
->arch
.apic
&& vcpu
->arch
.apicv_active
;
194 static inline bool kvm_apic_has_events(struct kvm_vcpu
*vcpu
)
196 return lapic_in_kernel(vcpu
) && vcpu
->arch
.apic
->pending_events
;
199 static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq
*irq
)
201 return (irq
->delivery_mode
== APIC_DM_LOWEST
||
202 irq
->msi_redir_hint
);
205 static inline int kvm_lapic_latched_init(struct kvm_vcpu
*vcpu
)
207 return lapic_in_kernel(vcpu
) && test_bit(KVM_APIC_INIT
, &vcpu
->arch
.apic
->pending_events
);
210 bool kvm_apic_pending_eoi(struct kvm_vcpu
*vcpu
, int vector
);
212 void wait_lapic_expire(struct kvm_vcpu
*vcpu
);
214 bool kvm_intr_is_single_vcpu_fast(struct kvm
*kvm
, struct kvm_lapic_irq
*irq
,
215 struct kvm_vcpu
**dest_vcpu
);
216 int kvm_vector_to_index(u32 vector
, u32 dest_vcpus
,
217 const unsigned long *bitmap
, u32 bitmap_size
);
218 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu
*vcpu
);
219 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu
*vcpu
);
220 void kvm_lapic_expired_hv_timer(struct kvm_vcpu
*vcpu
);
221 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu
*vcpu
);
222 void kvm_lapic_restart_hv_timer(struct kvm_vcpu
*vcpu
);