2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
27 #if defined(CONFIG_PPC_BOOK3S_64)
30 #define FUNC(name) GLUE(.,name)
32 #elif defined(CONFIG_PPC_BOOK3S_32)
35 #define FUNC(name) name
37 #endif /* CONFIG_PPC_BOOK3S_XX */
40 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
41 #define VCPU_LOAD_NVGPRS(vcpu) \
42 PPC_LL r14, VCPU_GPR(r14)(vcpu); \
43 PPC_LL r15, VCPU_GPR(r15)(vcpu); \
44 PPC_LL r16, VCPU_GPR(r16)(vcpu); \
45 PPC_LL r17, VCPU_GPR(r17)(vcpu); \
46 PPC_LL r18, VCPU_GPR(r18)(vcpu); \
47 PPC_LL r19, VCPU_GPR(r19)(vcpu); \
48 PPC_LL r20, VCPU_GPR(r20)(vcpu); \
49 PPC_LL r21, VCPU_GPR(r21)(vcpu); \
50 PPC_LL r22, VCPU_GPR(r22)(vcpu); \
51 PPC_LL r23, VCPU_GPR(r23)(vcpu); \
52 PPC_LL r24, VCPU_GPR(r24)(vcpu); \
53 PPC_LL r25, VCPU_GPR(r25)(vcpu); \
54 PPC_LL r26, VCPU_GPR(r26)(vcpu); \
55 PPC_LL r27, VCPU_GPR(r27)(vcpu); \
56 PPC_LL r28, VCPU_GPR(r28)(vcpu); \
57 PPC_LL r29, VCPU_GPR(r29)(vcpu); \
58 PPC_LL r30, VCPU_GPR(r30)(vcpu); \
59 PPC_LL r31, VCPU_GPR(r31)(vcpu); \
61 /*****************************************************************************
63 * Guest entry / exit code that is in kernel module memory (highmem) *
65 ****************************************************************************/
71 _GLOBAL(__kvmppc_vcpu_run)
74 /* Write correct stack frame */
76 PPC_STL r0,PPC_LR_STKOFF(r1)
78 /* Save host state to the stack */
79 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
81 /* Save r3 (kvm_run) and r4 (vcpu) */
84 /* Save non-volatile registers (r14 - r31) */
90 /* Load non-volatile guest state from the vcpu */
93 kvm_start_lightweight:
95 #ifdef CONFIG_PPC_BOOK3S_64
96 PPC_LL r3, VCPU_HFLAGS(r4)
97 rldicl r3, r3, 0, 63 /* r3 &= 1 */
98 stb r3, HSTATE_RESTORE_HID5(r13)
99 #endif /* CONFIG_PPC_BOOK3S_64 */
101 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
103 /* Jump to segment patching handler and into our guest */
104 bl FUNC(kvmppc_entry_trampoline)
108 * This is the handler in module memory. It gets jumped at from the
109 * lowmem trampoline code, so it's basically the guest exit code.
113 .global kvmppc_handler_highmem
114 kvmppc_handler_highmem:
117 * Register usage at this point:
121 * R12 = exit handler id
130 PPC_STL r14, VCPU_GPR(r14)(r7)
131 PPC_STL r15, VCPU_GPR(r15)(r7)
132 PPC_STL r16, VCPU_GPR(r16)(r7)
133 PPC_STL r17, VCPU_GPR(r17)(r7)
134 PPC_STL r18, VCPU_GPR(r18)(r7)
135 PPC_STL r19, VCPU_GPR(r19)(r7)
136 PPC_STL r20, VCPU_GPR(r20)(r7)
137 PPC_STL r21, VCPU_GPR(r21)(r7)
138 PPC_STL r22, VCPU_GPR(r22)(r7)
139 PPC_STL r23, VCPU_GPR(r23)(r7)
140 PPC_STL r24, VCPU_GPR(r24)(r7)
141 PPC_STL r25, VCPU_GPR(r25)(r7)
142 PPC_STL r26, VCPU_GPR(r26)(r7)
143 PPC_STL r27, VCPU_GPR(r27)(r7)
144 PPC_STL r28, VCPU_GPR(r28)(r7)
145 PPC_STL r29, VCPU_GPR(r29)(r7)
146 PPC_STL r30, VCPU_GPR(r30)(r7)
147 PPC_STL r31, VCPU_GPR(r31)(r7)
149 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
152 /* Restore r3 (kvm_run) and r4 (vcpu) */
154 bl FUNC(kvmppc_handle_exit)
156 /* If RESUME_GUEST, get back in the loop */
157 cmpwi r3, RESUME_GUEST
158 beq kvm_loop_lightweight
160 cmpwi r3, RESUME_GUEST_NV
161 beq kvm_loop_heavyweight
168 /* Restore non-volatile host registers (r14 - r31) */
171 addi r1, r1, SWITCH_FRAME_SIZE
174 kvm_loop_heavyweight:
177 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
179 /* Load vcpu and cpu_run */
182 /* Load non-volatile guest state from the vcpu */
185 /* Jump back into the beginning of this function */
186 b kvm_start_lightweight
188 kvm_loop_lightweight:
190 /* We'll need the vcpu pointer */
193 /* Jump back into the beginning of this function */
194 b kvm_start_lightweight