1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM64_KVM_HYP_H__
8 #define __ARM64_KVM_HYP_H__
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12 #include <asm/alternative.h>
13 #include <asm/kvm_mmu.h>
14 #include <asm/sysreg.h>
16 #define __hyp_text __section(.hyp.text) notrace
18 #define read_sysreg_elx(r,nvh,vh) \
21 asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
22 __mrs_s("%0", r##vh), \
23 ARM64_HAS_VIRT_HOST_EXTN) \
28 #define write_sysreg_elx(v,r,nvh,vh) \
30 u64 __val = (u64)(v); \
31 asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
32 __msr_s(r##vh, "%x0"), \
33 ARM64_HAS_VIRT_HOST_EXTN) \
38 * Unified accessors for registers that have a different encoding
39 * between VHE and non-VHE. They must be specified without their "ELx"
40 * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
43 #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
44 #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
45 #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
46 #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
47 #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
48 #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
51 * hyp_alternate_select - Generates patchable code sequences that are
52 * used to switch between two implementations of a function, depending
53 * on the availability of a feature.
55 * @fname: a symbol name that will be defined as a function returning a
56 * function pointer whose type will match @orig and @alt
57 * @orig: A pointer to the default function, as returned by @fname when
59 * @alt: A pointer to the alternate function, as returned by @fname
61 * @cond: a CPU feature (as described in asm/cpufeature.h)
63 #define hyp_alternate_select(fname, orig, alt, cond) \
64 typeof(orig) * __hyp_text fname(void) \
66 typeof(alt) *val = orig; \
67 asm volatile(ALTERNATIVE("nop \n", \
70 : "+r" (val) : "r" (alt)); \
74 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu
*vcpu
);
76 void __vgic_v3_save_state(struct kvm_vcpu
*vcpu
);
77 void __vgic_v3_restore_state(struct kvm_vcpu
*vcpu
);
78 void __vgic_v3_activate_traps(struct kvm_vcpu
*vcpu
);
79 void __vgic_v3_deactivate_traps(struct kvm_vcpu
*vcpu
);
80 void __vgic_v3_save_aprs(struct kvm_vcpu
*vcpu
);
81 void __vgic_v3_restore_aprs(struct kvm_vcpu
*vcpu
);
82 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu
*vcpu
);
84 void __timer_enable_traps(struct kvm_vcpu
*vcpu
);
85 void __timer_disable_traps(struct kvm_vcpu
*vcpu
);
87 void __sysreg_save_state_nvhe(struct kvm_cpu_context
*ctxt
);
88 void __sysreg_restore_state_nvhe(struct kvm_cpu_context
*ctxt
);
89 void sysreg_save_host_state_vhe(struct kvm_cpu_context
*ctxt
);
90 void sysreg_restore_host_state_vhe(struct kvm_cpu_context
*ctxt
);
91 void sysreg_save_guest_state_vhe(struct kvm_cpu_context
*ctxt
);
92 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context
*ctxt
);
93 void __sysreg32_save_state(struct kvm_vcpu
*vcpu
);
94 void __sysreg32_restore_state(struct kvm_vcpu
*vcpu
);
96 void __debug_switch_to_guest(struct kvm_vcpu
*vcpu
);
97 void __debug_switch_to_host(struct kvm_vcpu
*vcpu
);
99 void __fpsimd_save_state(struct user_fpsimd_state
*fp_regs
);
100 void __fpsimd_restore_state(struct user_fpsimd_state
*fp_regs
);
102 void activate_traps_vhe_load(struct kvm_vcpu
*vcpu
);
103 void deactivate_traps_vhe_put(void);
105 u64
__guest_enter(struct kvm_vcpu
*vcpu
, struct kvm_cpu_context
*host_ctxt
);
106 void __noreturn
__hyp_do_panic(unsigned long, ...);
109 * Must be called from hyp code running at EL2 with an updated VTTBR
110 * and interrupts disabled.
112 static __always_inline
void __hyp_text
__load_guest_stage2(struct kvm
*kvm
)
114 write_sysreg(kvm
->arch
.vtcr
, vtcr_el2
);
115 write_sysreg(kvm_get_vttbr(kvm
), vttbr_el2
);
118 * ARM erratum 1165522 requires the actual execution of the above
119 * before we can switch to the EL1/EL0 translation regime used by
122 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522
));
125 #endif /* __ARM64_KVM_HYP_H__ */