1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
9 #ifndef __ARCH_X86_KVM_XEN_H__
10 #define __ARCH_X86_KVM_XEN_H__
12 #include <asm/xen/hypervisor.h>
15 #include <linux/jump_label_ratelimit.h>
17 extern struct static_key_false_deferred kvm_xen_enabled
;
19 int __kvm_xen_has_interrupt(struct kvm_vcpu
*vcpu
);
20 void kvm_xen_inject_pending_events(struct kvm_vcpu
*vcpu
);
21 void kvm_xen_inject_vcpu_vector(struct kvm_vcpu
*vcpu
);
22 int kvm_xen_vcpu_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_xen_vcpu_attr
*data
);
23 int kvm_xen_vcpu_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_xen_vcpu_attr
*data
);
24 int kvm_xen_hvm_set_attr(struct kvm
*kvm
, struct kvm_xen_hvm_attr
*data
);
25 int kvm_xen_hvm_get_attr(struct kvm
*kvm
, struct kvm_xen_hvm_attr
*data
);
26 int kvm_xen_hvm_evtchn_send(struct kvm
*kvm
, struct kvm_irq_routing_xen_evtchn
*evt
);
27 int kvm_xen_write_hypercall_page(struct kvm_vcpu
*vcpu
, u64 data
);
28 int kvm_xen_hvm_config(struct kvm
*kvm
, struct kvm_xen_hvm_config
*xhc
);
29 void kvm_xen_init_vm(struct kvm
*kvm
);
30 void kvm_xen_destroy_vm(struct kvm
*kvm
);
31 void kvm_xen_init_vcpu(struct kvm_vcpu
*vcpu
);
32 void kvm_xen_destroy_vcpu(struct kvm_vcpu
*vcpu
);
33 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn
*xe
,
35 int kvm_xen_setup_evtchn(struct kvm
*kvm
,
36 struct kvm_kernel_irq_routing_entry
*e
,
37 const struct kvm_irq_routing_entry
*ue
);
38 void kvm_xen_update_tsc_info(struct kvm_vcpu
*vcpu
);
40 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu
*vcpu
)
43 * The local APIC is being enabled. If the per-vCPU upcall vector is
44 * set and the vCPU's evtchn_upcall_pending flag is set, inject the
47 if (static_branch_unlikely(&kvm_xen_enabled
.key
) &&
48 vcpu
->arch
.xen
.vcpu_info_cache
.active
&&
49 vcpu
->arch
.xen
.upcall_vector
&& __kvm_xen_has_interrupt(vcpu
))
50 kvm_xen_inject_vcpu_vector(vcpu
);
53 static inline bool kvm_xen_msr_enabled(struct kvm
*kvm
)
55 return static_branch_unlikely(&kvm_xen_enabled
.key
) &&
56 kvm
->arch
.xen_hvm_config
.msr
;
59 static inline bool kvm_xen_hypercall_enabled(struct kvm
*kvm
)
61 return static_branch_unlikely(&kvm_xen_enabled
.key
) &&
62 (kvm
->arch
.xen_hvm_config
.flags
&
63 KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL
);
66 static inline int kvm_xen_has_interrupt(struct kvm_vcpu
*vcpu
)
68 if (static_branch_unlikely(&kvm_xen_enabled
.key
) &&
69 vcpu
->arch
.xen
.vcpu_info_cache
.active
&&
70 vcpu
->kvm
->arch
.xen
.upcall_vector
)
71 return __kvm_xen_has_interrupt(vcpu
);
76 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu
*vcpu
)
78 return static_branch_unlikely(&kvm_xen_enabled
.key
) &&
79 vcpu
->arch
.xen
.evtchn_pending_sel
;
82 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu
*vcpu
)
84 return !!vcpu
->arch
.xen
.timer_virq
;
87 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu
*vcpu
)
89 if (kvm_xen_hypercall_enabled(vcpu
->kvm
) && kvm_xen_timer_enabled(vcpu
))
90 return atomic_read(&vcpu
->arch
.xen
.timer_pending
);
95 void kvm_xen_inject_timer_irqs(struct kvm_vcpu
*vcpu
);
97 static inline int kvm_xen_write_hypercall_page(struct kvm_vcpu
*vcpu
, u64 data
)
102 static inline void kvm_xen_init_vm(struct kvm
*kvm
)
106 static inline void kvm_xen_destroy_vm(struct kvm
*kvm
)
110 static inline void kvm_xen_init_vcpu(struct kvm_vcpu
*vcpu
)
114 static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu
*vcpu
)
118 static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu
*vcpu
)
122 static inline bool kvm_xen_msr_enabled(struct kvm
*kvm
)
127 static inline bool kvm_xen_hypercall_enabled(struct kvm
*kvm
)
132 static inline int kvm_xen_has_interrupt(struct kvm_vcpu
*vcpu
)
137 static inline void kvm_xen_inject_pending_events(struct kvm_vcpu
*vcpu
)
141 static inline bool kvm_xen_has_pending_events(struct kvm_vcpu
*vcpu
)
146 static inline int kvm_xen_has_pending_timer(struct kvm_vcpu
*vcpu
)
151 static inline void kvm_xen_inject_timer_irqs(struct kvm_vcpu
*vcpu
)
155 static inline bool kvm_xen_timer_enabled(struct kvm_vcpu
*vcpu
)
160 static inline void kvm_xen_update_tsc_info(struct kvm_vcpu
*vcpu
)
165 int kvm_xen_hypercall(struct kvm_vcpu
*vcpu
);
167 #include <asm/pvclock-abi.h>
168 #include <asm/xen/interface.h>
169 #include <xen/interface/vcpu.h>
171 void kvm_xen_update_runstate(struct kvm_vcpu
*vcpu
, int state
);
173 static inline void kvm_xen_runstate_set_running(struct kvm_vcpu
*vcpu
)
175 kvm_xen_update_runstate(vcpu
, RUNSTATE_running
);
178 static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu
*vcpu
)
181 * If the vCPU wasn't preempted but took a normal exit for
182 * some reason (hypercalls, I/O, etc.), that is accounted as
183 * still RUNSTATE_running, as the VMM is still operating on
184 * behalf of the vCPU. Only if the VMM does actually block
185 * does it need to enter RUNSTATE_blocked.
187 if (WARN_ON_ONCE(!vcpu
->preempted
))
190 kvm_xen_update_runstate(vcpu
, RUNSTATE_runnable
);
193 /* 32-bit compatibility definitions, also used natively in 32-bit build */
194 struct compat_arch_vcpu_info
{
199 struct compat_vcpu_info
{
200 uint8_t evtchn_upcall_pending
;
201 uint8_t evtchn_upcall_mask
;
203 uint32_t evtchn_pending_sel
;
204 struct compat_arch_vcpu_info arch
;
205 struct pvclock_vcpu_time_info time
;
206 }; /* 64 bytes (x86) */
208 struct compat_arch_shared_info
{
209 unsigned int max_pfn
;
210 unsigned int pfn_to_mfn_frame_list_list
;
211 unsigned int nmi_reason
;
212 unsigned int p2m_cr3
;
213 unsigned int p2m_vaddr
;
214 unsigned int p2m_generation
;
218 struct compat_shared_info
{
219 struct compat_vcpu_info vcpu_info
[MAX_VIRT_CPUS
];
220 uint32_t evtchn_pending
[32];
221 uint32_t evtchn_mask
[32];
222 struct pvclock_wall_clock wc
;
223 struct compat_arch_shared_info arch
;
226 #define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
227 sizeof_field(struct compat_shared_info, \
229 struct compat_vcpu_runstate_info
{
231 uint64_t state_entry_time
;
233 } __attribute__((packed
));
235 struct compat_sched_poll
{
236 /* This is actually a guest virtual address which points to ports. */
238 unsigned int nr_ports
;
242 #endif /* __ARCH_X86_KVM_XEN_H__ */