1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
16 #include <asm/spectre.h>
18 .macro save_caller_saved_regs_vect
19 /* x0 and x1 were saved in the vector entry */
20 stp x2, x3, [sp, #-16]!
21 stp x4, x5, [sp, #-16]!
22 stp x6, x7, [sp, #-16]!
23 stp x8, x9, [sp, #-16]!
24 stp x10, x11, [sp, #-16]!
25 stp x12, x13, [sp, #-16]!
26 stp x14, x15, [sp, #-16]!
27 stp x16, x17, [sp, #-16]!
30 .macro restore_caller_saved_regs_vect
31 ldp x16, x17, [sp], #16
32 ldp x14, x15, [sp], #16
33 ldp x12, x13, [sp], #16
34 ldp x10, x11, [sp], #16
44 el1_sync: // Guest trapped into EL2
47 lsr x0, x0, #ESR_ELx_EC_SHIFT
48 cmp x0, #ESR_ELx_EC_HVC64
49 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
53 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
54 * The workaround has already been applied on the host,
55 * so let's quickly get back to the guest. We don't bother
56 * restoring x1, as it can be clobbered anyway.
58 ldr x1, [sp] // Guest's x0
59 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
62 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
63 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
64 ARM_SMCCC_ARCH_WORKAROUND_2)
75 mov x0, #ARM_EXCEPTION_TRAP
80 mov x0, #ARM_EXCEPTION_IRQ
85 mov x0, #ARM_EXCEPTION_EL1_SERROR
89 /* Check for illegal exception return */
93 save_caller_saved_regs_vect
94 stp x29, x30, [sp, #-16]!
95 bl kvm_unexpected_el2_exception
96 ldp x29, x30, [sp], #16
97 restore_caller_saved_regs_vect
102 /* Let's attempt a recovery from the illegal exception return */
104 mov x0, #ARM_EXCEPTION_IL
109 save_caller_saved_regs_vect
110 stp x29, x30, [sp, #-16]!
112 bl kvm_unexpected_el2_exception
114 ldp x29, x30, [sp], #16
115 restore_caller_saved_regs_vect
120 .macro invalid_vector label, target = __guest_exit_panic
122 SYM_CODE_START(\label)
127 /* None of these should ever happen */
128 invalid_vector el2t_sync_invalid
129 invalid_vector el2t_irq_invalid
130 invalid_vector el2t_fiq_invalid
131 invalid_vector el2t_error_invalid
132 invalid_vector el2h_irq_invalid
133 invalid_vector el2h_fiq_invalid
134 invalid_vector el1_fiq_invalid
140 .macro check_preamble_length start, end
141 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
142 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
143 .error "KVM vector preamble length mismatch"
147 .macro valid_vect target
151 stp x0, x1, [sp, #-16]!
155 check_preamble_length 661b, 662b
158 .macro invalid_vect target
162 stp x0, x1, [sp, #-16]!
166 check_preamble_length 661b, 662b
169 SYM_CODE_START(__kvm_hyp_vector)
170 invalid_vect el2t_sync_invalid // Synchronous EL2t
171 invalid_vect el2t_irq_invalid // IRQ EL2t
172 invalid_vect el2t_fiq_invalid // FIQ EL2t
173 invalid_vect el2t_error_invalid // Error EL2t
175 valid_vect el2_sync // Synchronous EL2h
176 invalid_vect el2h_irq_invalid // IRQ EL2h
177 invalid_vect el2h_fiq_invalid // FIQ EL2h
178 valid_vect el2_error // Error EL2h
180 valid_vect el1_sync // Synchronous 64-bit EL1
181 valid_vect el1_irq // IRQ 64-bit EL1
182 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
183 valid_vect el1_error // Error 64-bit EL1
185 valid_vect el1_sync // Synchronous 32-bit EL1
186 valid_vect el1_irq // IRQ 32-bit EL1
187 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
188 valid_vect el1_error // Error 32-bit EL1
189 SYM_CODE_END(__kvm_hyp_vector)
191 .macro spectrev2_smccc_wa1_smc
193 stp x2, x3, [sp, #(8 * 0)]
194 stp x0, x1, [sp, #(8 * 2)]
195 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
197 ldp x2, x3, [sp, #(8 * 0)]
201 .macro hyp_ventry indirect, spectrev2
205 spectrev2_smccc_wa1_smc
207 stp x0, x1, [sp, #-16]!
210 alternative_cb kvm_patch_vector_branch
212 * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
214 * movz x0, #(addr & 0xffff)
215 * movk x0, #((addr >> 16) & 0xffff), lsl #16
216 * movk x0, #((addr >> 32) & 0xffff), lsl #32
220 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
221 * See kvm_patch_vector_branch for details.
229 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
232 .macro generate_vectors indirect, spectrev2
235 hyp_ventry \indirect, \spectrev2
237 .org 0b + SZ_2K // Safety measure
241 SYM_CODE_START(__bp_harden_hyp_vecs)
242 generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
243 generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
244 generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
245 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
247 SYM_CODE_END(__bp_harden_hyp_vecs)