1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/linkage.h>
9 #include <asm/assembler.h>
10 #include <asm/kvm_arm.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/pgtable-hwdef.h>
13 #include <asm/sysreg.h>
17 .pushsection .hyp.idmap.text, "ax"
22 ventry __invalid // Synchronous EL2t
23 ventry __invalid // IRQ EL2t
24 ventry __invalid // FIQ EL2t
25 ventry __invalid // Error EL2t
27 ventry __invalid // Synchronous EL2h
28 ventry __invalid // IRQ EL2h
29 ventry __invalid // FIQ EL2h
30 ventry __invalid // Error EL2h
32 ventry __do_hyp_init // Synchronous 64-bit EL1
33 ventry __invalid // IRQ 64-bit EL1
34 ventry __invalid // FIQ 64-bit EL1
35 ventry __invalid // Error 64-bit EL1
37 ventry __invalid // Synchronous 32-bit EL1
38 ventry __invalid // IRQ 32-bit EL1
39 ventry __invalid // FIQ 32-bit EL1
40 ventry __invalid // Error 32-bit EL1
52 /* Check for a stub HVC call */
53 cmp x0, #HVC_STUB_HCALL_NR
54 b.lo __kvm_handle_stub_hvc
57 alternative_if ARM64_HAS_CNP
58 orr x4, x4, #TTBR_CNP_BIT
59 alternative_else_nop_endif
69 * The ID map may be configured to use an extended virtual address
70 * range. This is only the case if system RAM is out of range for the
71 * currently configured page size and VA_BITS, in which case we will
72 * also need the extended virtual range for the HYP ID map, or we won't
73 * be able to enable the EL2 MMU.
75 * However, at EL2, there is only one TTBR register, and we can't switch
76 * between translation tables *and* update TCR_EL2.T0SZ at the same
77 * time. Bottom line: we need to use the extended range with *both* our
80 * So use the same T0SZ value we use for the ID map.
83 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
86 * Set the PS bits in TCR_EL2.
88 tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
96 /* Invalidate the stale TLBs from Bootloader */
101 * Preserve all the RES1 bits while setting the default flags,
102 * as well as the EE bit on BE. Drop the A flag since the compiler
103 * is allowed to generate unaligned accesses.
105 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
106 CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
110 /* Set the stack and new vectors */
115 /* Set tpidr_el2 for use by HYP */
120 ENDPROC(__kvm_hyp_init)
122 ENTRY(__kvm_handle_stub_hvc)
123 cmp x0, #HVC_SOFT_RESTART
126 /* This is where we're about to jump, staying at EL2 */
128 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
131 /* Shuffle the arguments, and don't come back */
137 1: cmp x0, #HVC_RESET_VECTORS
141 * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
142 * case we coming via HVC_SOFT_RESTART.
145 ldr x6, =SCTLR_ELx_FLAGS
146 bic x5, x5, x6 // Clear SCTL_M and etc
147 pre_disable_mmu_workaround
151 /* Install stub vectors */
152 adr_l x5, __hyp_stub_vectors
157 1: /* Bad stub call */
158 ldr x0, =HVC_STUB_ERR
161 ENDPROC(__kvm_handle_stub_hvc)