1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_coproc.h>
18 #include <asm/kvm_emulate.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/debug-monitors.h>
21 #include <asm/traps.h>
23 #include <kvm/arm_hypercalls.h>
25 #define CREATE_TRACE_POINTS
28 typedef int (*exit_handle_fn
)(struct kvm_vcpu
*, struct kvm_run
*);
30 static void kvm_handle_guest_serror(struct kvm_vcpu
*vcpu
, u32 esr
)
32 if (!arm64_is_ras_serror(esr
) || arm64_is_fatal_ras_serror(NULL
, esr
))
33 kvm_inject_vabt(vcpu
);
36 static int handle_hvc(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu
), vcpu_get_reg(vcpu
, 0),
41 kvm_vcpu_hvc_get_imm(vcpu
));
42 vcpu
->stat
.hvc_exit_stat
++;
44 ret
= kvm_hvc_call_handler(vcpu
);
46 vcpu_set_reg(vcpu
, 0, ~0UL);
53 static int handle_smc(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
56 * "If an SMC instruction executed at Non-secure EL1 is
57 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
58 * Trap exception, not a Secure Monitor Call exception [...]"
60 * We need to advance the PC after the trap, as it would
61 * otherwise return to the same address...
63 vcpu_set_reg(vcpu
, 0, ~0UL);
64 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
69 * Guest access to FP/ASIMD registers are routed to this handler only
70 * when the system doesn't support FP/ASIMD.
72 static int handle_no_fpsimd(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
74 kvm_inject_undefined(vcpu
);
79 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
80 * instruction executed by a guest
82 * @vcpu: the vcpu pointer
84 * WFE: Yield the CPU and come back to this vcpu when the scheduler
86 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
87 * world-switches and schedule other host processes until there is an
88 * incoming IRQ or FIQ to the VM.
90 static int kvm_handle_wfx(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
92 if (kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_WFx_ISS_WFE
) {
93 trace_kvm_wfx_arm64(*vcpu_pc(vcpu
), true);
94 vcpu
->stat
.wfe_exit_stat
++;
95 kvm_vcpu_on_spin(vcpu
, vcpu_mode_priv(vcpu
));
97 trace_kvm_wfx_arm64(*vcpu_pc(vcpu
), false);
98 vcpu
->stat
.wfi_exit_stat
++;
100 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
103 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
109 * kvm_handle_guest_debug - handle a debug exception instruction
111 * @vcpu: the vcpu pointer
112 * @run: access to the kvm_run structure for results
114 * We route all debug exceptions through the same handler. If both the
115 * guest and host are using the same debug facilities it will be up to
116 * userspace to re-inject the correct exception for guest delivery.
118 * @return: 0 (while setting run->exit_reason), -1 for error
120 static int kvm_handle_guest_debug(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
122 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
125 run
->exit_reason
= KVM_EXIT_DEBUG
;
126 run
->debug
.arch
.hsr
= hsr
;
128 switch (ESR_ELx_EC(hsr
)) {
129 case ESR_ELx_EC_WATCHPT_LOW
:
130 run
->debug
.arch
.far
= vcpu
->arch
.fault
.far_el2
;
132 case ESR_ELx_EC_SOFTSTP_LOW
:
133 case ESR_ELx_EC_BREAKPT_LOW
:
134 case ESR_ELx_EC_BKPT32
:
135 case ESR_ELx_EC_BRK64
:
138 kvm_err("%s: un-handled case hsr: %#08x\n",
139 __func__
, (unsigned int) hsr
);
147 static int kvm_handle_unknown_ec(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
149 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
151 kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
152 hsr
, esr_get_class_string(hsr
));
154 kvm_inject_undefined(vcpu
);
158 static int handle_sve(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
160 /* Until SVE is supported for guests: */
161 kvm_inject_undefined(vcpu
);
165 #define __ptrauth_save_key(regs, key) \
167 regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
168 regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
172 * Handle the guest trying to use a ptrauth instruction, or trying to access a
175 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu
*vcpu
)
177 struct kvm_cpu_context
*ctxt
;
179 if (vcpu_has_ptrauth(vcpu
)) {
180 vcpu_ptrauth_enable(vcpu
);
181 ctxt
= vcpu
->arch
.host_cpu_context
;
182 __ptrauth_save_key(ctxt
->sys_regs
, APIA
);
183 __ptrauth_save_key(ctxt
->sys_regs
, APIB
);
184 __ptrauth_save_key(ctxt
->sys_regs
, APDA
);
185 __ptrauth_save_key(ctxt
->sys_regs
, APDB
);
186 __ptrauth_save_key(ctxt
->sys_regs
, APGA
);
188 kvm_inject_undefined(vcpu
);
193 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
196 static int kvm_handle_ptrauth(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
198 kvm_arm_vcpu_ptrauth_trap(vcpu
);
202 static exit_handle_fn arm_exit_handlers
[] = {
203 [0 ... ESR_ELx_EC_MAX
] = kvm_handle_unknown_ec
,
204 [ESR_ELx_EC_WFx
] = kvm_handle_wfx
,
205 [ESR_ELx_EC_CP15_32
] = kvm_handle_cp15_32
,
206 [ESR_ELx_EC_CP15_64
] = kvm_handle_cp15_64
,
207 [ESR_ELx_EC_CP14_MR
] = kvm_handle_cp14_32
,
208 [ESR_ELx_EC_CP14_LS
] = kvm_handle_cp14_load_store
,
209 [ESR_ELx_EC_CP14_64
] = kvm_handle_cp14_64
,
210 [ESR_ELx_EC_HVC32
] = handle_hvc
,
211 [ESR_ELx_EC_SMC32
] = handle_smc
,
212 [ESR_ELx_EC_HVC64
] = handle_hvc
,
213 [ESR_ELx_EC_SMC64
] = handle_smc
,
214 [ESR_ELx_EC_SYS64
] = kvm_handle_sys_reg
,
215 [ESR_ELx_EC_SVE
] = handle_sve
,
216 [ESR_ELx_EC_IABT_LOW
] = kvm_handle_guest_abort
,
217 [ESR_ELx_EC_DABT_LOW
] = kvm_handle_guest_abort
,
218 [ESR_ELx_EC_SOFTSTP_LOW
]= kvm_handle_guest_debug
,
219 [ESR_ELx_EC_WATCHPT_LOW
]= kvm_handle_guest_debug
,
220 [ESR_ELx_EC_BREAKPT_LOW
]= kvm_handle_guest_debug
,
221 [ESR_ELx_EC_BKPT32
] = kvm_handle_guest_debug
,
222 [ESR_ELx_EC_BRK64
] = kvm_handle_guest_debug
,
223 [ESR_ELx_EC_FP_ASIMD
] = handle_no_fpsimd
,
224 [ESR_ELx_EC_PAC
] = kvm_handle_ptrauth
,
227 static exit_handle_fn
kvm_get_exit_handler(struct kvm_vcpu
*vcpu
)
229 u32 hsr
= kvm_vcpu_get_hsr(vcpu
);
230 u8 hsr_ec
= ESR_ELx_EC(hsr
);
232 return arm_exit_handlers
[hsr_ec
];
236 * We may be single-stepping an emulated instruction. If the emulation
237 * has been completed in the kernel, we can return to userspace with a
238 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
241 static int handle_trap_exceptions(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
246 * See ARM ARM B1.14.1: "Hyp traps on instructions
247 * that fail their condition code check"
249 if (!kvm_condition_valid(vcpu
)) {
250 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
253 exit_handle_fn exit_handler
;
255 exit_handler
= kvm_get_exit_handler(vcpu
);
256 handled
= exit_handler(vcpu
, run
);
263 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
264 * proper exit to userspace.
266 int handle_exit(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
269 if (ARM_SERROR_PENDING(exception_index
)) {
270 u8 hsr_ec
= ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu
));
273 * HVC/SMC already have an adjusted PC, which we need
274 * to correct in order to return to after having
275 * injected the SError.
277 if (hsr_ec
== ESR_ELx_EC_HVC32
|| hsr_ec
== ESR_ELx_EC_HVC64
||
278 hsr_ec
== ESR_ELx_EC_SMC32
|| hsr_ec
== ESR_ELx_EC_SMC64
) {
279 u32 adj
= kvm_vcpu_trap_il_is32bit(vcpu
) ? 4 : 2;
280 *vcpu_pc(vcpu
) -= adj
;
286 exception_index
= ARM_EXCEPTION_CODE(exception_index
);
288 switch (exception_index
) {
289 case ARM_EXCEPTION_IRQ
:
291 case ARM_EXCEPTION_EL1_SERROR
:
293 case ARM_EXCEPTION_TRAP
:
294 return handle_trap_exceptions(vcpu
, run
);
295 case ARM_EXCEPTION_HYP_GONE
:
297 * EL2 has been reset to the hyp-stub. This happens when a guest
298 * is pre-empted by kvm_reboot()'s shutdown call.
300 run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
302 case ARM_EXCEPTION_IL
:
304 * We attempted an illegal exception return. Guest state must
305 * have been corrupted somehow. Give up.
307 run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
310 kvm_pr_unimpl("Unsupported exception type: %d",
312 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
317 /* For exit types that need handling before we can be preempted */
318 void handle_exit_early(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
321 if (ARM_SERROR_PENDING(exception_index
)) {
322 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN
)) {
323 u64 disr
= kvm_vcpu_get_disr(vcpu
);
325 kvm_handle_guest_serror(vcpu
, disr_to_esr(disr
));
327 kvm_inject_vabt(vcpu
);
333 exception_index
= ARM_EXCEPTION_CODE(exception_index
);
335 if (exception_index
== ARM_EXCEPTION_EL1_SERROR
)
336 kvm_handle_guest_serror(vcpu
, kvm_vcpu_get_hsr(vcpu
));