1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
19 .pushsection .hyp.text, "ax"
23 * Shuffle the parameters before calling the function
24 * pointed to in x0. Assumes parameters in x[1,2,3].
35 el1_sync: // Guest trapped into EL2
38 lsr x0, x0, #ESR_ELx_EC_SHIFT
39 cmp x0, #ESR_ELx_EC_HVC64
40 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
43 mrs x1, vttbr_el2 // If vttbr is valid, the guest
44 cbnz x1, el1_hvc_guest // called HVC
46 /* Here, we're pretty sure the host called HVC. */
49 /* Check for a stub HVC call */
50 cmp x0, #HVC_STUB_HCALL_NR
54 * Compute the idmap address of __kvm_handle_stub_hvc and
55 * jump there. Since we use kimage_voffset, do not use the
56 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57 * (by loading it from the constant pool).
59 * Preserve x0-x4, which may contain stub parameters.
61 ldr x5, =__kvm_handle_stub_hvc
62 ldr_l x6, kimage_voffset
70 * Perform the EL2 call
80 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
81 * The workaround has already been applied on the host,
82 * so let's quickly get back to the guest. We don't bother
83 * restoring x1, as it can be clobbered anyway.
85 ldr x1, [sp] // Guest's x0
86 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
89 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
90 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
91 ARM_SMCCC_ARCH_WORKAROUND_2)
94 #ifdef CONFIG_ARM64_SSBD
95 alternative_cb arm64_enable_wa2_handling
99 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
101 // Sanitize the argument and update the guest flags
102 ldr x1, [sp, #8] // Guest's x1
103 clz w1, w1 // Murphy's device:
104 lsr w1, w1, #5 // w1 = !!w1 without using
105 eor w1, w1, #1 // the flags...
106 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
107 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
109 /* Check that we actually need to perform the call */
110 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
113 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
116 /* Don't leak data from the SMC call */
131 mov x0, #ARM_EXCEPTION_TRAP
136 mov x0, #ARM_EXCEPTION_IRQ
141 mov x0, #ARM_EXCEPTION_EL1_SERROR
145 /* Check for illegal exception return, otherwise panic */
148 /* if this was something else, then panic! */
152 /* Let's attempt a recovery from the illegal exception return */
154 mov x0, #ARM_EXCEPTION_IL
159 ldp x0, x1, [sp], #16
162 * Only two possibilities:
163 * 1) Either we come from the exit path, having just unmasked
164 * PSTATE.A: change the return code to an EL2 fault, and
165 * carry on, as we're already in a sane state to handle it.
166 * 2) Or we come from anywhere else, and that's a bug: we panic.
168 * For (1), x0 contains the original return code and x1 doesn't
169 * contain anything meaningful at that stage. We can reuse them
171 * For (2), who cares?
174 adr x1, abort_guest_exit_start
176 adr x1, abort_guest_exit_end
179 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
183 ENTRY(__hyp_do_panic)
184 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
191 ENDPROC(__hyp_do_panic)
198 .macro invalid_vector label, target = __hyp_panic
205 /* None of these should ever happen */
206 invalid_vector el2t_sync_invalid
207 invalid_vector el2t_irq_invalid
208 invalid_vector el2t_fiq_invalid
209 invalid_vector el2t_error_invalid
210 invalid_vector el2h_sync_invalid
211 invalid_vector el2h_irq_invalid
212 invalid_vector el2h_fiq_invalid
213 invalid_vector el1_fiq_invalid
219 .macro check_preamble_length start, end
220 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
221 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
222 .error "KVM vector preamble length mismatch"
226 .macro valid_vect target
230 stp x0, x1, [sp, #-16]!
234 check_preamble_length 661b, 662b
237 .macro invalid_vect target
243 ldp x0, x1, [sp], #16
246 check_preamble_length 661b, 662b
249 ENTRY(__kvm_hyp_vector)
250 invalid_vect el2t_sync_invalid // Synchronous EL2t
251 invalid_vect el2t_irq_invalid // IRQ EL2t
252 invalid_vect el2t_fiq_invalid // FIQ EL2t
253 invalid_vect el2t_error_invalid // Error EL2t
255 valid_vect el2_sync // Synchronous EL2h
256 invalid_vect el2h_irq_invalid // IRQ EL2h
257 invalid_vect el2h_fiq_invalid // FIQ EL2h
258 valid_vect el2_error // Error EL2h
260 valid_vect el1_sync // Synchronous 64-bit EL1
261 valid_vect el1_irq // IRQ 64-bit EL1
262 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
263 valid_vect el1_error // Error 64-bit EL1
265 valid_vect el1_sync // Synchronous 32-bit EL1
266 valid_vect el1_irq // IRQ 32-bit EL1
267 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
268 valid_vect el1_error // Error 32-bit EL1
269 ENDPROC(__kvm_hyp_vector)
271 #ifdef CONFIG_KVM_INDIRECT_VECTORS
279 * The default sequence is to directly branch to the KVM vectors,
280 * using the computed offset. This applies for VHE as well as
281 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
283 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
286 * stp x0, x1, [sp, #-16]!
287 * movz x0, #(addr & 0xffff)
288 * movk x0, #((addr >> 16) & 0xffff), lsl #16
289 * movk x0, #((addr >> 32) & 0xffff), lsl #32
293 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
294 * See kvm_patch_vector_branch for details.
296 alternative_cb kvm_patch_vector_branch
297 stp x0, x1, [sp, #-16]!
298 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
305 .macro generate_vectors
310 .org 0b + SZ_2K // Safety measure
314 ENTRY(__bp_harden_hyp_vecs_start)
315 .rept BP_HARDEN_EL2_SLOTS
318 ENTRY(__bp_harden_hyp_vecs_end)
322 ENTRY(__smccc_workaround_1_smc_start)
325 stp x2, x3, [sp, #(8 * 0)]
326 stp x0, x1, [sp, #(8 * 2)]
327 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
329 ldp x2, x3, [sp, #(8 * 0)]
330 ldp x0, x1, [sp, #(8 * 2)]
332 ENTRY(__smccc_workaround_1_smc_end)