2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/assembler.h>
22 #include <asm/fpsimdmacros.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_mmu.h>
28 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
29 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32 .pushsection .hyp.text, "ax"
34 .macro save_callee_saved_regs ctxt
35 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
43 .macro restore_callee_saved_regs ctxt
44 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
45 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
46 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
47 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
48 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
49 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
53 * u64 __guest_enter(struct kvm_vcpu *vcpu,
54 * struct kvm_cpu_context *host_ctxt);
58 // x1: host/guest context
59 // x2-x18: clobbered by macros
61 // Store the host regs
62 save_callee_saved_regs x1
64 // Preserve vcpu & host_ctxt for use at exit time
65 stp x0, x1, [sp, #-16]!
67 add x1, x0, #VCPU_CONTEXT
69 // Prepare x0-x1 for later restore by pushing them onto the stack
70 ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
71 stp x2, x3, [sp, #-16]!
74 ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
75 ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
76 ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
77 ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
78 ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
79 ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
80 ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
81 ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
82 ldr x18, [x1, #CPU_XREG_OFFSET(18)]
85 restore_callee_saved_regs x1
87 // Last bits of the 64bit state
90 // Do not touch any register after this!
92 ENDPROC(__guest_enter)
98 // x4-x29,lr: vcpu regs
99 // vcpu x0-x3 on the stack
101 add x2, x0, #VCPU_CONTEXT
103 stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
104 stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
105 stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
106 stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
107 stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
108 stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
109 stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
110 str x18, [x2, #CPU_XREG_OFFSET(18)]
112 ldp x6, x7, [sp], #16 // x2, x3
113 ldp x4, x5, [sp], #16 // x0, x1
115 stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
116 stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
118 save_callee_saved_regs x2
120 // Restore vcpu & host_ctxt from the stack
121 // (preserving return code in x1)
122 ldp x0, x2, [sp], #16
123 // Now restore the host regs
124 restore_callee_saved_regs x2
128 ENDPROC(__guest_exit)
130 ENTRY(__fpsimd_guest_restore)
131 stp x4, lr, [sp, #-16]!
133 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
135 bic x2, x2, #CPTR_EL2_TFP
139 orr x2, x2, #CPACR_EL1_FPEN
146 ldr x0, [x3, #VCPU_HOST_CONTEXT]
148 add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
149 bl __fpsimd_save_state
151 add x2, x3, #VCPU_CONTEXT
152 add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
153 bl __fpsimd_restore_state
155 // Skip restoring fpexc32 for AArch64 guests
157 tbnz x1, #HCR_RW_SHIFT, 1f
158 ldr x4, [x3, #VCPU_FPEXC32_EL2]
161 ldp x4, lr, [sp], #16
162 ldp x2, x3, [sp], #16
163 ldp x0, x1, [sp], #16
166 ENDPROC(__fpsimd_guest_restore)
169 * When using the extended idmap, we don't have a trampoline page we can use
170 * while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
171 * directly would be ideal, but if we're using the extended idmap then the
172 * idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
173 * kvm_call_hyp using kern_hyp_va.
176 * x1: HYP phys_idmap_start
178 ENTRY(__extended_idmap_trampoline)
180 adr_l x3, __kvm_hyp_reset
182 /* insert __kvm_hyp_reset()s offset into phys_idmap_start */
183 bfi x4, x3, #0, #PAGE_SHIFT
185 ENDPROC(__extended_idmap_trampoline)