1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
13 #include <kvm/arm_psci.h>
15 #include <asm/barrier.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kprobes.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_host.h>
21 #include <asm/kvm_hyp.h>
22 #include <asm/kvm_mmu.h>
23 #include <asm/fpsimd.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/processor.h>
26 #include <asm/thread_info.h>
28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
29 static bool __hyp_text
update_fp_enabled(struct kvm_vcpu
*vcpu
)
32 * When the system doesn't support FP/SIMD, we cannot rely on
33 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
34 * abort on the very first access to FP and thus we should never
35 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
38 if (!system_supports_fpsimd() ||
39 vcpu
->arch
.host_thread_info
->flags
& _TIF_FOREIGN_FPSTATE
)
40 vcpu
->arch
.flags
&= ~(KVM_ARM64_FP_ENABLED
|
43 return !!(vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
);
46 /* Save the 32-bit only FPSIMD system register state */
47 static void __hyp_text
__fpsimd_save_fpexc32(struct kvm_vcpu
*vcpu
)
49 if (!vcpu_el1_is_32bit(vcpu
))
52 vcpu
->arch
.ctxt
.sys_regs
[FPEXC32_EL2
] = read_sysreg(fpexc32_el2
);
55 static void __hyp_text
__activate_traps_fpsimd32(struct kvm_vcpu
*vcpu
)
58 * We are about to set CPTR_EL2.TFP to trap all floating point
59 * register accesses to EL2, however, the ARM ARM clearly states that
60 * traps are only taken to EL2 if the operation would not otherwise
61 * trap to EL1. Therefore, always make sure that for 32-bit guests,
62 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
63 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
64 * it will cause an exception.
66 if (vcpu_el1_is_32bit(vcpu
) && system_supports_fpsimd()) {
67 write_sysreg(1 << 30, fpexc32_el2
);
72 static void __hyp_text
__activate_traps_common(struct kvm_vcpu
*vcpu
)
74 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
75 write_sysreg(1 << 15, hstr_el2
);
78 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
79 * PMSELR_EL0 to make sure it never contains the cycle
80 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
81 * EL1 instead of being trapped to EL2.
83 write_sysreg(0, pmselr_el0
);
84 write_sysreg(ARMV8_PMU_USERENR_MASK
, pmuserenr_el0
);
85 write_sysreg(vcpu
->arch
.mdcr_el2
, mdcr_el2
);
88 static void __hyp_text
__deactivate_traps_common(void)
90 write_sysreg(0, hstr_el2
);
91 write_sysreg(0, pmuserenr_el0
);
94 static void activate_traps_vhe(struct kvm_vcpu
*vcpu
)
98 val
= read_sysreg(cpacr_el1
);
100 val
&= ~CPACR_EL1_ZEN
;
101 if (update_fp_enabled(vcpu
)) {
102 if (vcpu_has_sve(vcpu
))
103 val
|= CPACR_EL1_ZEN
;
105 val
&= ~CPACR_EL1_FPEN
;
106 __activate_traps_fpsimd32(vcpu
);
109 write_sysreg(val
, cpacr_el1
);
111 write_sysreg(kvm_get_hyp_vector(), vbar_el1
);
113 NOKPROBE_SYMBOL(activate_traps_vhe
);
115 static void __hyp_text
__activate_traps_nvhe(struct kvm_vcpu
*vcpu
)
119 __activate_traps_common(vcpu
);
121 val
= CPTR_EL2_DEFAULT
;
122 val
|= CPTR_EL2_TTA
| CPTR_EL2_TZ
;
123 if (!update_fp_enabled(vcpu
)) {
125 __activate_traps_fpsimd32(vcpu
);
128 write_sysreg(val
, cptr_el2
);
130 if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
)) {
131 struct kvm_cpu_context
*ctxt
= &vcpu
->arch
.ctxt
;
135 * At this stage, and thanks to the above isb(), S2 is
136 * configured and enabled. We can now restore the guest's S1
137 * configuration: SCTLR, and only then TCR.
139 write_sysreg_el1(ctxt
->sys_regs
[SCTLR_EL1
], SYS_SCTLR
);
141 write_sysreg_el1(ctxt
->sys_regs
[TCR_EL1
], SYS_TCR
);
145 static void __hyp_text
__activate_traps(struct kvm_vcpu
*vcpu
)
147 u64 hcr
= vcpu
->arch
.hcr_el2
;
149 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM
))
152 write_sysreg(hcr
, hcr_el2
);
154 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
) && (hcr
& HCR_VSE
))
155 write_sysreg_s(vcpu
->arch
.vsesr_el2
, SYS_VSESR_EL2
);
158 activate_traps_vhe(vcpu
);
160 __activate_traps_nvhe(vcpu
);
163 static void deactivate_traps_vhe(void)
165 extern char vectors
[]; /* kernel exception vectors */
166 write_sysreg(HCR_HOST_VHE_FLAGS
, hcr_el2
);
169 * ARM errata 1165522 and 1530923 require the actual execution of the
170 * above before we can switch to the EL2/EL0 translation regime used by
173 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE
));
175 write_sysreg(CPACR_EL1_DEFAULT
, cpacr_el1
);
176 write_sysreg(vectors
, vbar_el1
);
178 NOKPROBE_SYMBOL(deactivate_traps_vhe
);
180 static void __hyp_text
__deactivate_traps_nvhe(void)
182 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
184 if (cpus_have_const_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE
)) {
188 * Set the TCR and SCTLR registers in the exact opposite
189 * sequence as __activate_traps_nvhe (first prevent walks,
190 * then force the MMU on). A generous sprinkling of isb()
191 * ensure that things happen in this exact order.
193 val
= read_sysreg_el1(SYS_TCR
);
194 write_sysreg_el1(val
| TCR_EPD1_MASK
| TCR_EPD0_MASK
, SYS_TCR
);
196 val
= read_sysreg_el1(SYS_SCTLR
);
197 write_sysreg_el1(val
| SCTLR_ELx_M
, SYS_SCTLR
);
201 __deactivate_traps_common();
203 mdcr_el2
&= MDCR_EL2_HPMN_MASK
;
204 mdcr_el2
|= MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
;
206 write_sysreg(mdcr_el2
, mdcr_el2
);
207 write_sysreg(HCR_HOST_NVHE_FLAGS
, hcr_el2
);
208 write_sysreg(CPTR_EL2_DEFAULT
, cptr_el2
);
211 static void __hyp_text
__deactivate_traps(struct kvm_vcpu
*vcpu
)
214 * If we pended a virtual abort, preserve it until it gets
215 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
216 * the crucial bit is "On taking a vSError interrupt,
217 * HCR_EL2.VSE is cleared to 0."
219 if (vcpu
->arch
.hcr_el2
& HCR_VSE
) {
220 vcpu
->arch
.hcr_el2
&= ~HCR_VSE
;
221 vcpu
->arch
.hcr_el2
|= read_sysreg(hcr_el2
) & HCR_VSE
;
225 deactivate_traps_vhe();
227 __deactivate_traps_nvhe();
230 void activate_traps_vhe_load(struct kvm_vcpu
*vcpu
)
232 __activate_traps_common(vcpu
);
235 void deactivate_traps_vhe_put(void)
237 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
239 mdcr_el2
&= MDCR_EL2_HPMN_MASK
|
240 MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
|
243 write_sysreg(mdcr_el2
, mdcr_el2
);
245 __deactivate_traps_common();
248 static void __hyp_text
__activate_vm(struct kvm
*kvm
)
250 __load_guest_stage2(kvm
);
253 static void __hyp_text
__deactivate_vm(struct kvm_vcpu
*vcpu
)
255 write_sysreg(0, vttbr_el2
);
258 /* Save VGICv3 state on non-VHE systems */
259 static void __hyp_text
__hyp_vgic_save_state(struct kvm_vcpu
*vcpu
)
261 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
)) {
262 __vgic_v3_save_state(vcpu
);
263 __vgic_v3_deactivate_traps(vcpu
);
267 /* Restore VGICv3 state on non_VEH systems */
268 static void __hyp_text
__hyp_vgic_restore_state(struct kvm_vcpu
*vcpu
)
270 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
)) {
271 __vgic_v3_activate_traps(vcpu
);
272 __vgic_v3_restore_state(vcpu
);
276 static bool __hyp_text
__translate_far_to_hpfar(u64 far
, u64
*hpfar
)
281 * Resolve the IPA the hard way using the guest VA.
283 * Stage-1 translation already validated the memory access
284 * rights. As such, we can use the EL1 translation regime, and
285 * don't have to distinguish between EL0 and EL1 access.
287 * We do need to save/restore PAR_EL1 though, as we haven't
288 * saved the guest context yet, and we may return early...
290 par
= read_sysreg(par_el1
);
291 asm volatile("at s1e1r, %0" : : "r" (far
));
294 tmp
= read_sysreg(par_el1
);
295 write_sysreg(par
, par_el1
);
297 if (unlikely(tmp
& SYS_PAR_EL1_F
))
298 return false; /* Translation failed, back to guest */
300 /* Convert PAR to HPFAR format */
301 *hpfar
= PAR_TO_HPFAR(tmp
);
305 static bool __hyp_text
__populate_fault_info(struct kvm_vcpu
*vcpu
)
311 esr
= vcpu
->arch
.fault
.esr_el2
;
312 ec
= ESR_ELx_EC(esr
);
314 if (ec
!= ESR_ELx_EC_DABT_LOW
&& ec
!= ESR_ELx_EC_IABT_LOW
)
317 far
= read_sysreg_el2(SYS_FAR
);
320 * The HPFAR can be invalid if the stage 2 fault did not
321 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
322 * bit is clear) and one of the two following cases are true:
323 * 1. The fault was due to a permission fault
324 * 2. The processor carries errata 834220
326 * Therefore, for all non S1PTW faults where we either have a
327 * permission fault or the errata workaround is enabled, we
328 * resolve the IPA using the AT instruction.
330 if (!(esr
& ESR_ELx_S1PTW
) &&
331 (cpus_have_const_cap(ARM64_WORKAROUND_834220
) ||
332 (esr
& ESR_ELx_FSC_TYPE
) == FSC_PERM
)) {
333 if (!__translate_far_to_hpfar(far
, &hpfar
))
336 hpfar
= read_sysreg(hpfar_el2
);
339 vcpu
->arch
.fault
.far_el2
= far
;
340 vcpu
->arch
.fault
.hpfar_el2
= hpfar
;
344 /* Check for an FPSIMD/SVE trap and handle as appropriate */
345 static bool __hyp_text
__hyp_handle_fpsimd(struct kvm_vcpu
*vcpu
)
347 bool vhe
, sve_guest
, sve_host
;
350 if (!system_supports_fpsimd())
353 if (system_supports_sve()) {
354 sve_guest
= vcpu_has_sve(vcpu
);
355 sve_host
= vcpu
->arch
.flags
& KVM_ARM64_HOST_SVE_IN_USE
;
363 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
364 if (hsr_ec
!= ESR_ELx_EC_FP_ASIMD
&&
365 hsr_ec
!= ESR_ELx_EC_SVE
)
368 /* Don't handle SVE traps for non-SVE vcpus here: */
370 if (hsr_ec
!= ESR_ELx_EC_FP_ASIMD
)
373 /* Valid trap. Switch the context: */
376 u64 reg
= read_sysreg(cpacr_el1
) | CPACR_EL1_FPEN
;
379 reg
|= CPACR_EL1_ZEN
;
381 write_sysreg(reg
, cpacr_el1
);
383 write_sysreg(read_sysreg(cptr_el2
) & ~(u64
)CPTR_EL2_TFP
,
389 if (vcpu
->arch
.flags
& KVM_ARM64_FP_HOST
) {
391 * In the SVE case, VHE is assumed: it is enforced by
392 * Kconfig and kvm_arch_init().
395 struct thread_struct
*thread
= container_of(
396 vcpu
->arch
.host_fpsimd_state
,
397 struct thread_struct
, uw
.fpsimd_state
);
399 sve_save_state(sve_pffr(thread
),
400 &vcpu
->arch
.host_fpsimd_state
->fpsr
);
402 __fpsimd_save_state(vcpu
->arch
.host_fpsimd_state
);
405 vcpu
->arch
.flags
&= ~KVM_ARM64_FP_HOST
;
409 sve_load_state(vcpu_sve_pffr(vcpu
),
410 &vcpu
->arch
.ctxt
.gp_regs
.fp_regs
.fpsr
,
411 sve_vq_from_vl(vcpu
->arch
.sve_max_vl
) - 1);
412 write_sysreg_s(vcpu
->arch
.ctxt
.sys_regs
[ZCR_EL1
], SYS_ZCR_EL12
);
414 __fpsimd_restore_state(&vcpu
->arch
.ctxt
.gp_regs
.fp_regs
);
417 /* Skip restoring fpexc32 for AArch64 guests */
418 if (!(read_sysreg(hcr_el2
) & HCR_RW
))
419 write_sysreg(vcpu
->arch
.ctxt
.sys_regs
[FPEXC32_EL2
],
422 vcpu
->arch
.flags
|= KVM_ARM64_FP_ENABLED
;
427 static bool __hyp_text
handle_tx2_tvm(struct kvm_vcpu
*vcpu
)
429 u32 sysreg
= esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu
));
430 int rt
= kvm_vcpu_sys_get_rt(vcpu
);
431 u64 val
= vcpu_get_reg(vcpu
, rt
);
434 * The normal sysreg handling code expects to see the traps,
435 * let's not do anything here.
437 if (vcpu
->arch
.hcr_el2
& HCR_TVM
)
442 write_sysreg_el1(val
, SYS_SCTLR
);
445 write_sysreg_el1(val
, SYS_TTBR0
);
448 write_sysreg_el1(val
, SYS_TTBR1
);
451 write_sysreg_el1(val
, SYS_TCR
);
454 write_sysreg_el1(val
, SYS_ESR
);
457 write_sysreg_el1(val
, SYS_FAR
);
460 write_sysreg_el1(val
, SYS_AFSR0
);
463 write_sysreg_el1(val
, SYS_AFSR1
);
466 write_sysreg_el1(val
, SYS_MAIR
);
469 write_sysreg_el1(val
, SYS_AMAIR
);
471 case SYS_CONTEXTIDR_EL1
:
472 write_sysreg_el1(val
, SYS_CONTEXTIDR
);
478 __kvm_skip_instr(vcpu
);
483 * Return true when we were able to fixup the guest exit and should return to
484 * the guest, false when we should restore the host state and return to the
487 static bool __hyp_text
fixup_guest_exit(struct kvm_vcpu
*vcpu
, u64
*exit_code
)
489 if (ARM_EXCEPTION_CODE(*exit_code
) != ARM_EXCEPTION_IRQ
)
490 vcpu
->arch
.fault
.esr_el2
= read_sysreg_el2(SYS_ESR
);
493 * We're using the raw exception code in order to only process
494 * the trap if no SError is pending. We will come back to the
495 * same PC once the SError has been injected, and replay the
496 * trapping instruction.
498 if (*exit_code
!= ARM_EXCEPTION_TRAP
)
501 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM
) &&
502 kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_SYS64
&&
503 handle_tx2_tvm(vcpu
))
507 * We trap the first access to the FP/SIMD to save the host context
508 * and restore the guest context lazily.
509 * If FP/SIMD is not implemented, handle the trap and inject an
510 * undefined instruction exception to the guest.
511 * Similarly for trapped SVE accesses.
513 if (__hyp_handle_fpsimd(vcpu
))
516 if (!__populate_fault_info(vcpu
))
519 if (static_branch_unlikely(&vgic_v2_cpuif_trap
)) {
522 valid
= kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_DABT_LOW
&&
523 kvm_vcpu_trap_get_fault_type(vcpu
) == FSC_FAULT
&&
524 kvm_vcpu_dabt_isvalid(vcpu
) &&
525 !kvm_vcpu_dabt_isextabt(vcpu
) &&
526 !kvm_vcpu_dabt_iss1tw(vcpu
);
529 int ret
= __vgic_v2_perform_cpuif_access(vcpu
);
534 /* Promote an illegal access to an SError.*/
536 *exit_code
= ARM_EXCEPTION_EL1_SERROR
;
542 if (static_branch_unlikely(&vgic_v3_cpuif_trap
) &&
543 (kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_SYS64
||
544 kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_CP15_32
)) {
545 int ret
= __vgic_v3_perform_cpuif_access(vcpu
);
552 /* Return to the host kernel and handle the exit */
556 static inline bool __hyp_text
__needs_ssbd_off(struct kvm_vcpu
*vcpu
)
558 if (!cpus_have_const_cap(ARM64_SSBD
))
561 return !(vcpu
->arch
.workaround_flags
& VCPU_WORKAROUND_2_FLAG
);
564 static void __hyp_text
__set_guest_arch_workaround_state(struct kvm_vcpu
*vcpu
)
566 #ifdef CONFIG_ARM64_SSBD
568 * The host runs with the workaround always present. If the
569 * guest wants it disabled, so be it...
571 if (__needs_ssbd_off(vcpu
) &&
572 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
573 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 0, NULL
);
577 static void __hyp_text
__set_host_arch_workaround_state(struct kvm_vcpu
*vcpu
)
579 #ifdef CONFIG_ARM64_SSBD
581 * If the guest has disabled the workaround, bring it back on.
583 if (__needs_ssbd_off(vcpu
) &&
584 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
585 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 1, NULL
);
590 * Disable host events, enable guest events
592 static bool __hyp_text
__pmu_switch_to_guest(struct kvm_cpu_context
*host_ctxt
)
594 struct kvm_host_data
*host
;
595 struct kvm_pmu_events
*pmu
;
597 host
= container_of(host_ctxt
, struct kvm_host_data
, host_ctxt
);
598 pmu
= &host
->pmu_events
;
600 if (pmu
->events_host
)
601 write_sysreg(pmu
->events_host
, pmcntenclr_el0
);
603 if (pmu
->events_guest
)
604 write_sysreg(pmu
->events_guest
, pmcntenset_el0
);
606 return (pmu
->events_host
|| pmu
->events_guest
);
610 * Disable guest events, enable host events
612 static void __hyp_text
__pmu_switch_to_host(struct kvm_cpu_context
*host_ctxt
)
614 struct kvm_host_data
*host
;
615 struct kvm_pmu_events
*pmu
;
617 host
= container_of(host_ctxt
, struct kvm_host_data
, host_ctxt
);
618 pmu
= &host
->pmu_events
;
620 if (pmu
->events_guest
)
621 write_sysreg(pmu
->events_guest
, pmcntenclr_el0
);
623 if (pmu
->events_host
)
624 write_sysreg(pmu
->events_host
, pmcntenset_el0
);
627 /* Switch to the guest for VHE systems running in EL2 */
628 int kvm_vcpu_run_vhe(struct kvm_vcpu
*vcpu
)
630 struct kvm_cpu_context
*host_ctxt
;
631 struct kvm_cpu_context
*guest_ctxt
;
634 host_ctxt
= vcpu
->arch
.host_cpu_context
;
635 host_ctxt
->__hyp_running_vcpu
= vcpu
;
636 guest_ctxt
= &vcpu
->arch
.ctxt
;
638 sysreg_save_host_state_vhe(host_ctxt
);
641 * ARM erratum 1165522 requires us to configure both stage 1 and
642 * stage 2 translation for the guest context before we clear
645 * We have already configured the guest's stage 1 translation in
646 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
647 * before __activate_traps, because __activate_vm configures
648 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
649 * (among other things).
651 __activate_vm(vcpu
->kvm
);
652 __activate_traps(vcpu
);
654 sysreg_restore_guest_state_vhe(guest_ctxt
);
655 __debug_switch_to_guest(vcpu
);
657 __set_guest_arch_workaround_state(vcpu
);
660 /* Jump in the fire! */
661 exit_code
= __guest_enter(vcpu
, host_ctxt
);
663 /* And we're baaack! */
664 } while (fixup_guest_exit(vcpu
, &exit_code
));
666 __set_host_arch_workaround_state(vcpu
);
668 sysreg_save_guest_state_vhe(guest_ctxt
);
670 __deactivate_traps(vcpu
);
672 sysreg_restore_host_state_vhe(host_ctxt
);
674 if (vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
)
675 __fpsimd_save_fpexc32(vcpu
);
677 __debug_switch_to_host(vcpu
);
681 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe
);
683 /* Switch to the guest for legacy non-VHE systems */
684 int __hyp_text
__kvm_vcpu_run_nvhe(struct kvm_vcpu
*vcpu
)
686 struct kvm_cpu_context
*host_ctxt
;
687 struct kvm_cpu_context
*guest_ctxt
;
688 bool pmu_switch_needed
;
692 * Having IRQs masked via PMR when entering the guest means the GIC
693 * will not signal the CPU of interrupts of lower priority, and the
694 * only way to get out will be via guest exceptions.
695 * Naturally, we want to avoid this.
697 if (system_uses_irq_prio_masking()) {
698 gic_write_pmr(GIC_PRIO_IRQON
| GIC_PRIO_PSR_I_SET
);
702 vcpu
= kern_hyp_va(vcpu
);
704 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
705 host_ctxt
->__hyp_running_vcpu
= vcpu
;
706 guest_ctxt
= &vcpu
->arch
.ctxt
;
708 pmu_switch_needed
= __pmu_switch_to_guest(host_ctxt
);
710 __sysreg_save_state_nvhe(host_ctxt
);
713 * We must restore the 32-bit state before the sysregs, thanks
714 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
716 * Also, and in order to be able to deal with erratum #1319537 (A57)
717 * and #1319367 (A72), we must ensure that all VM-related sysreg are
718 * restored before we enable S2 translation.
720 __sysreg32_restore_state(vcpu
);
721 __sysreg_restore_state_nvhe(guest_ctxt
);
723 __activate_vm(kern_hyp_va(vcpu
->kvm
));
724 __activate_traps(vcpu
);
726 __hyp_vgic_restore_state(vcpu
);
727 __timer_enable_traps(vcpu
);
729 __debug_switch_to_guest(vcpu
);
731 __set_guest_arch_workaround_state(vcpu
);
734 /* Jump in the fire! */
735 exit_code
= __guest_enter(vcpu
, host_ctxt
);
737 /* And we're baaack! */
738 } while (fixup_guest_exit(vcpu
, &exit_code
));
740 __set_host_arch_workaround_state(vcpu
);
742 __sysreg_save_state_nvhe(guest_ctxt
);
743 __sysreg32_save_state(vcpu
);
744 __timer_disable_traps(vcpu
);
745 __hyp_vgic_save_state(vcpu
);
747 __deactivate_traps(vcpu
);
748 __deactivate_vm(vcpu
);
750 __sysreg_restore_state_nvhe(host_ctxt
);
752 if (vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
)
753 __fpsimd_save_fpexc32(vcpu
);
756 * This must come after restoring the host sysregs, since a non-VHE
757 * system may enable SPE here and make use of the TTBRs.
759 __debug_switch_to_host(vcpu
);
761 if (pmu_switch_needed
)
762 __pmu_switch_to_host(host_ctxt
);
764 /* Returning to host will clear PSR.I, remask PMR if needed */
765 if (system_uses_irq_prio_masking())
766 gic_write_pmr(GIC_PRIO_IRQOFF
);
771 static const char __hyp_panic_string
[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
773 static void __hyp_text
__hyp_call_panic_nvhe(u64 spsr
, u64 elr
, u64 par
,
774 struct kvm_cpu_context
*__host_ctxt
)
776 struct kvm_vcpu
*vcpu
;
777 unsigned long str_va
;
779 vcpu
= __host_ctxt
->__hyp_running_vcpu
;
781 if (read_sysreg(vttbr_el2
)) {
782 __timer_disable_traps(vcpu
);
783 __deactivate_traps(vcpu
);
784 __deactivate_vm(vcpu
);
785 __sysreg_restore_state_nvhe(__host_ctxt
);
789 * Force the panic string to be loaded from the literal pool,
790 * making sure it is a kernel address and not a PC-relative
793 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va
));
795 __hyp_do_panic(str_va
,
797 read_sysreg(esr_el2
), read_sysreg_el2(SYS_FAR
),
798 read_sysreg(hpfar_el2
), par
, vcpu
);
801 static void __hyp_call_panic_vhe(u64 spsr
, u64 elr
, u64 par
,
802 struct kvm_cpu_context
*host_ctxt
)
804 struct kvm_vcpu
*vcpu
;
805 vcpu
= host_ctxt
->__hyp_running_vcpu
;
807 __deactivate_traps(vcpu
);
808 sysreg_restore_host_state_vhe(host_ctxt
);
810 panic(__hyp_panic_string
,
812 read_sysreg_el2(SYS_ESR
), read_sysreg_el2(SYS_FAR
),
813 read_sysreg(hpfar_el2
), par
, vcpu
);
815 NOKPROBE_SYMBOL(__hyp_call_panic_vhe
);
817 void __hyp_text __noreturn
hyp_panic(struct kvm_cpu_context
*host_ctxt
)
819 u64 spsr
= read_sysreg_el2(SYS_SPSR
);
820 u64 elr
= read_sysreg_el2(SYS_ELR
);
821 u64 par
= read_sysreg(par_el1
);
824 __hyp_call_panic_nvhe(spsr
, elr
, par
, host_ctxt
);
826 __hyp_call_panic_vhe(spsr
, elr
, par
, host_ctxt
);