1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/linkage.h>
9 #include <asm/alternative.h>
10 #include <asm/assembler.h>
11 #include <asm/fpsimdmacros.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
16 #include <asm/kvm_mte.h>
17 #include <asm/kvm_ptrauth.h>
22 * u64 __guest_enter(struct kvm_vcpu *vcpu);
24 SYM_FUNC_START(__guest_enter)
26 // x1-x17: clobbered by macros
29 adr_this_cpu x1, kvm_hyp_ctxt, x2
32 save_callee_saved_regs x1
37 // Now the hyp state is stored if we have a pending RAS SError it must
38 // affect the host or hyp. If any asynchronous exception is pending we
39 // defer the guest entry. The DSB isn't necessary before v8.2 as any
40 // SError would be fatal.
41 alternative_if ARM64_HAS_RAS_EXTN
44 alternative_else_nop_endif
47 mov x0, #ARM_EXCEPTION_IRQ
51 set_loaded_vcpu x0, x1, x2
53 add x29, x0, #VCPU_CONTEXT
55 // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
56 mte_switch_to_guest x29, x1, x2
58 // Macro ptrauth_switch_to_guest format:
59 // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
60 // The below macro to restore guest keys is not implemented in C code
61 // as it may cause Pointer Authentication key signing mismatch errors
62 // when this feature is enabled for kernel code.
63 ptrauth_switch_to_guest x29, x0, x1, x2
65 // Restore the guest's sp_el0
66 restore_sp_el0 x29, x0
68 // Restore guest regs x0-x17
69 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
70 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
71 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
72 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
73 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
74 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
75 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
76 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
77 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
79 // Restore guest regs x18-x29, lr
80 restore_callee_saved_regs x29
82 // Do not touch any register after this!
86 SYM_INNER_LABEL(__guest_exit_restore_elr_and_panic, SYM_L_GLOBAL)
87 // x2-x29,lr: vcpu regs
88 // vcpu x0-x1 on the stack
90 adr_this_cpu x0, kvm_hyp_ctxt, x1
91 ldr x0, [x0, #CPU_ELR_EL2]
94 SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
95 // x2-x29,lr: vcpu regs
96 // vcpu x0-x1 on the stack
98 // If the hyp context is loaded, go straight to hyp_panic
99 get_loaded_vcpu x0, x1
104 // The hyp context is saved so make sure it is restored to allow
105 // hyp_panic to run at hyp and, subsequently, panic to run in the host.
106 // This makes use of __guest_exit to avoid duplication but sets the
107 // return address to tail call into hyp_panic. As a side effect, the
108 // current state is saved to the guest context but it will only be
109 // accurate if the guest had been completely restored.
110 adr_this_cpu x0, kvm_hyp_ctxt, x1
112 str x1, [x0, #CPU_XREG_OFFSET(30)]
116 SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
119 // x2-x29,lr: vcpu regs
120 // vcpu x0-x1 on the stack
122 add x1, x1, #VCPU_CONTEXT
124 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
126 // Store the guest regs x2 and x3
127 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
129 // Retrieve the guest regs x0-x1 from the stack
130 ldp x2, x3, [sp], #16 // x0, x1
132 // Store the guest regs x0-x1 and x4-x17
133 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
134 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
135 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
136 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
137 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
138 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
139 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
140 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
142 // Store the guest regs x18-x29, lr
143 save_callee_saved_regs x1
145 // Store the guest's sp_el0
148 adr_this_cpu x2, kvm_hyp_ctxt, x3
150 // Macro ptrauth_switch_to_hyp format:
151 // ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
152 // The below macro to save/restore keys is not implemented in C code
153 // as it may cause Pointer Authentication key signing mismatch errors
154 // when this feature is enabled for kernel code.
155 ptrauth_switch_to_hyp x1, x2, x3, x4, x5
157 // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
158 mte_switch_to_hyp x1, x2, x3
160 // Restore hyp's sp_el0
161 restore_sp_el0 x2, x3
163 // Now restore the hyp regs
164 restore_callee_saved_regs x2
166 set_loaded_vcpu xzr, x2, x3
168 alternative_if ARM64_HAS_RAS_EXTN
169 // If we have the RAS extensions we can consume a pending error
170 // without an unmask-SError and isb. The ESB-instruction consumed any
171 // pending guest error when we took the exception from the guest.
172 mrs_s x2, SYS_DISR_EL1
173 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
175 msr_s SYS_DISR_EL1, xzr
176 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
179 dsb sy // Synchronize against in-flight ld/st
180 isb // Prevent an early read of side-effect free ISR
182 tbnz x2, #ISR_EL1_A_SHIFT, 2f
187 // We know we have a pending asynchronous abort, now is the
188 // time to flush it out. From your VAXorcist book, page 666:
189 // "Threaten me not, oh Evil one! For I speak with
190 // the power of DEC, and I command thee to show thyself!"
196 msr daifclr, #4 // Unmask aborts
198 // This is our single instruction exception window. A pending
199 // SError is guaranteed to occur at the earliest when we unmask
200 // it, and at the latest just after the ISB.
201 abort_guest_exit_start:
205 abort_guest_exit_end:
207 msr daifset, #4 // Mask aborts
210 _kvm_extable abort_guest_exit_start, 9997f
211 _kvm_extable abort_guest_exit_end, 9997f
213 msr daifset, #4 // Mask aborts
214 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
216 // restore the EL1 exception context so that we can report some
217 // information. Merge the exception code with the SError pending bit.
223 SYM_FUNC_END(__guest_enter)