1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_SVM_OPS_H
3 #define __KVM_X86_SVM_OPS_H
5 #include <linux/compiler_types.h>
9 #define svm_asm(insn, clobber...) \
11 asm goto("1: " __stringify(insn) "\n\t" \
12 _ASM_EXTABLE(1b, %l[fault]) \
13 ::: clobber : fault); \
16 kvm_spurious_fault(); \
19 #define svm_asm1(insn, op1, clobber...) \
21 asm goto("1: " __stringify(insn) " %0\n\t" \
22 _ASM_EXTABLE(1b, %l[fault]) \
23 :: op1 : clobber : fault); \
26 kvm_spurious_fault(); \
29 #define svm_asm2(insn, op1, op2, clobber...) \
31 asm goto("1: " __stringify(insn) " %1, %0\n\t" \
32 _ASM_EXTABLE(1b, %l[fault]) \
33 :: op1, op2 : clobber : fault); \
36 kvm_spurious_fault(); \
39 static inline void clgi(void)
44 static inline void stgi(void)
49 static inline void invlpga(unsigned long addr
, u32 asid
)
51 svm_asm2(invlpga
, "c"(asid
), "a"(addr
));
55 * Despite being a physical address, the portion of rAX that is consumed by
56 * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
57 * hence 'unsigned long' instead of 'hpa_t'.
59 static __always_inline
void vmsave(unsigned long pa
)
61 svm_asm1(vmsave
, "a" (pa
), "memory");
64 #endif /* __KVM_X86_SVM_OPS_H */