1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
9 #include <asm/kvm_arm.h>
10 #include <asm/kvm_asm.h>
15 .pushsection .hyp.text, "ax"
18 mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
21 /********************************************************************
22 * Hypervisor exception vector and handlers
25 * The KVM/ARM Hypervisor ABI is defined as follows:
27 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
28 * instruction is issued since all traps are disabled when running the host
29 * kernel as per the Hyp-mode initialization at boot time.
31 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
32 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
33 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
34 * instructions are called from within Hyp-mode.
36 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
37 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
38 * exception vector code will check that the HVC comes from VMID==0.
39 * - r0 contains a pointer to a HYP function
40 * - r1, r2, and r3 contain arguments to the above function.
41 * - The HYP function will be called with its arguments in r0, r1 and r2.
42 * On HYP function return, we return directly to SVC.
44 * Note that the above is used to execute code in Hyp-mode from a host-kernel
45 * point of view, and is a different concept from performing a world-switch and
46 * executing guest code SVC mode (with a VMID != 0).
51 .global __kvm_hyp_vector
53 @ Hyp-mode exception vector
63 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
65 __kvm_hyp_vector_ic_inv:
66 .global __kvm_hyp_vector_ic_inv
69 * We encode the exception entry in the bottom 3 bits of
70 * SP, and we have to guarantee to be 8 bytes aligned.
72 W(add) sp, sp, #1 /* Reset 7 */
73 W(add) sp, sp, #1 /* Undef 6 */
74 W(add) sp, sp, #1 /* Syscall 5 */
75 W(add) sp, sp, #1 /* Prefetch abort 4 */
76 W(add) sp, sp, #1 /* Data abort 3 */
77 W(add) sp, sp, #1 /* HVC 2 */
78 W(add) sp, sp, #1 /* IRQ 1 */
81 mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
87 __kvm_hyp_vector_bp_inv:
88 .global __kvm_hyp_vector_bp_inv
91 * We encode the exception entry in the bottom 3 bits of
92 * SP, and we have to guarantee to be 8 bytes aligned.
94 W(add) sp, sp, #1 /* Reset 7 */
95 W(add) sp, sp, #1 /* Undef 6 */
96 W(add) sp, sp, #1 /* Syscall 5 */
97 W(add) sp, sp, #1 /* Prefetch abort 4 */
98 W(add) sp, sp, #1 /* Data abort 3 */
99 W(add) sp, sp, #1 /* HVC 2 */
100 W(add) sp, sp, #1 /* IRQ 1 */
103 mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
108 #ifdef CONFIG_THUMB2_KERNEL
110 * Yet another silly hack: Use VPIDR as a temp register.
111 * Thumb2 is really a pain, as SP cannot be used with most
112 * of the bitwise instructions. The vect_br macro ensures
113 * things gets cleaned-up.
115 mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
121 mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
122 mrc p15, 0, r2, c0, c0, 0 /* MIDR */
123 mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
126 .macro vect_br val, targ
127 ARM( eor sp, sp, #\val )
129 ARM( eorne sp, sp, #\val )
131 THUMB( cmp r1, #\val )
132 THUMB( popeq {r1, r2} )
147 .macro invalid_vector label, cause
149 \label: mov r0, #\cause
153 invalid_vector hyp_reset ARM_EXCEPTION_RESET
154 invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
155 invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
156 invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
157 invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
159 ENTRY(__hyp_do_panic)
161 bic lr, lr, #MODE_MASK
162 orr lr, lr, #SVC_MODE
163 THUMB( orr lr, lr, #PSR_T_BIT )
167 ldr lr, =__kvm_call_hyp
170 ENDPROC(__hyp_do_panic)
174 * Getting here is either because of a trap from a guest,
175 * or from executing HVC from the host kernel, which means
176 * "do something in Hyp mode".
180 @ Check syndrome register
181 mrc p15, 4, r1, c5, c2, 0 @ HSR
182 lsr r0, r1, #HSR_EC_SHIFT
184 bne guest_trap @ Not HVC instr.
187 * Let's check if the HVC came from VMID 0 and allow simple
190 mrrc p15, 6, r0, r2, c2
194 bne guest_hvc_trap @ Guest called HVC
197 * Getting here means host called HVC, we shift parameters and branch
203 * Check if we have a kernel function, which is guaranteed to be
204 * bigger than the maximum hyp stub hypercall
206 cmp r0, #HVC_STUB_HCALL_NR
210 * Not a kernel function, treat it as a stub hypercall.
211 * Compute the physical address for __kvm_handle_stub_hvc
212 * (as the code lives in the idmaped page) and branch there.
213 * We hijack ip (r12) as a tmp register.
216 ldr r1, =kimage_voffset
218 ldr ip, =__kvm_handle_stub_hvc
226 * Pushing r2 here is just a way of keeping the stack aligned to
227 * 8 bytes on any path that can trigger a HYP exception. Here,
228 * we may well be about to jump into the guest, and the guest
229 * exit would otherwise be badly decoded by our fancy
230 * "decode-exception-without-a-branch" code...
240 blx lr @ Call the HYP function
246 movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
247 movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
248 ldr r0, [sp] @ Guest's r0
254 @ r1 = HSR value (perfectly predictable)
255 @ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
260 load_vcpu r0 @ Load VCPU pointer to r0
263 @ Check for a VFP access
264 lsr r1, r1, #HSR_EC_SHIFT
265 cmp r1, #HSR_EC_CP_0_13
266 beq __vfp_guest_restore
269 mov r1, #ARM_EXCEPTION_HVC
274 mov r1, #ARM_EXCEPTION_IRQ
275 load_vcpu r0 @ Load VCPU pointer to r0
281 ldr r1, =abort_guest_exit_start
282 THUMB( add r1, r1, #1)
284 ldrne r1, =abort_guest_exit_end
285 THUMB( addne r1, r1, #1)
290 orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)