arm64: dts: Revert "specify console via command line"
[linux/fpc-iii.git] / arch / powerpc / kernel / kvm_emul.S
blob7af6f8b50c5d638646f927fb1aa63eb74529a49b
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright SUSE Linux Products GmbH 2010
5  * Copyright 2010-2011 Freescale Semiconductor, Inc.
6  *
7  * Authors: Alexander Graf <agraf@suse.de>
8  */
10 #include <asm/ppc_asm.h>
11 #include <asm/kvm_asm.h>
12 #include <asm/reg.h>
13 #include <asm/page.h>
14 #include <asm/asm-offsets.h>
15 #include <asm/asm-compat.h>
17 #define KVM_MAGIC_PAGE          (-4096)
19 #ifdef CONFIG_64BIT
20 #define LL64(reg, offs, reg2)   ld      reg, (offs)(reg2)
21 #define STL64(reg, offs, reg2)  std     reg, (offs)(reg2)
22 #else
23 #define LL64(reg, offs, reg2)   lwz     reg, (offs + 4)(reg2)
24 #define STL64(reg, offs, reg2)  stw     reg, (offs + 4)(reg2)
25 #endif
27 #define SCRATCH_SAVE                                                    \
28         /* Enable critical section. We are critical if                  \
29            shared->critical == r1 */                                    \
30         STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);              \
31                                                                         \
32         /* Save state */                                                \
33         PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
34         PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
35         mfcr    r31;                                                    \
36         stw     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
38 #define SCRATCH_RESTORE                                                 \
39         /* Restore state */                                             \
40         PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
41         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);          \
42         mtcr    r30;                                                    \
43         PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
44                                                                         \
45         /* Disable critical section. We are critical if                 \
46            shared->critical == r1 and r2 is always != r1 */             \
47         STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
49 .global kvm_template_start
50 kvm_template_start:
52 .global kvm_emulate_mtmsrd
53 kvm_emulate_mtmsrd:
55         SCRATCH_SAVE
57         /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
58         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
59         lis     r30, (~(MSR_EE | MSR_RI))@h
60         ori     r30, r30, (~(MSR_EE | MSR_RI))@l
61         and     r31, r31, r30
63         /* OR the register's (MSR_EE|MSR_RI) on MSR */
64 kvm_emulate_mtmsrd_reg:
65         ori     r30, r0, 0
66         andi.   r30, r30, (MSR_EE|MSR_RI)
67         or      r31, r31, r30
69         /* Put MSR back into magic page */
70         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
72         /* Check if we have to fetch an interrupt */
73         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
74         cmpwi   r31, 0
75         beq+    no_check
77         /* Check if we may trigger an interrupt */
78         andi.   r30, r30, MSR_EE
79         beq     no_check
81         SCRATCH_RESTORE
83         /* Nag hypervisor */
84 kvm_emulate_mtmsrd_orig_ins:
85         tlbsync
87         b       kvm_emulate_mtmsrd_branch
89 no_check:
91         SCRATCH_RESTORE
93         /* Go back to caller */
94 kvm_emulate_mtmsrd_branch:
95         b       .
96 kvm_emulate_mtmsrd_end:
98 .global kvm_emulate_mtmsrd_branch_offs
99 kvm_emulate_mtmsrd_branch_offs:
100         .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
102 .global kvm_emulate_mtmsrd_reg_offs
103 kvm_emulate_mtmsrd_reg_offs:
104         .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
106 .global kvm_emulate_mtmsrd_orig_ins_offs
107 kvm_emulate_mtmsrd_orig_ins_offs:
108         .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
110 .global kvm_emulate_mtmsrd_len
111 kvm_emulate_mtmsrd_len:
112         .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
115 #define MSR_SAFE_BITS (MSR_EE | MSR_RI)
116 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
118 .global kvm_emulate_mtmsr
119 kvm_emulate_mtmsr:
121         SCRATCH_SAVE
123         /* Fetch old MSR in r31 */
124         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
126         /* Find the changed bits between old and new MSR */
127 kvm_emulate_mtmsr_reg1:
128         ori     r30, r0, 0
129         xor     r31, r30, r31
131         /* Check if we need to really do mtmsr */
132         LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
133         and.    r31, r31, r30
135         /* No critical bits changed? Maybe we can stay in the guest. */
136         beq     maybe_stay_in_guest
138 do_mtmsr:
140         SCRATCH_RESTORE
142         /* Just fire off the mtmsr if it's critical */
143 kvm_emulate_mtmsr_orig_ins:
144         mtmsr   r0
146         b       kvm_emulate_mtmsr_branch
148 maybe_stay_in_guest:
150         /* Get the target register in r30 */
151 kvm_emulate_mtmsr_reg2:
152         ori     r30, r0, 0
154         /* Put MSR into magic page because we don't call mtmsr */
155         STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
157         /* Check if we have to fetch an interrupt */
158         lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
159         cmpwi   r31, 0
160         beq+    no_mtmsr
162         /* Check if we may trigger an interrupt */
163         andi.   r31, r30, MSR_EE
164         bne     do_mtmsr
166 no_mtmsr:
168         SCRATCH_RESTORE
170         /* Go back to caller */
171 kvm_emulate_mtmsr_branch:
172         b       .
173 kvm_emulate_mtmsr_end:
175 .global kvm_emulate_mtmsr_branch_offs
176 kvm_emulate_mtmsr_branch_offs:
177         .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
179 .global kvm_emulate_mtmsr_reg1_offs
180 kvm_emulate_mtmsr_reg1_offs:
181         .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
183 .global kvm_emulate_mtmsr_reg2_offs
184 kvm_emulate_mtmsr_reg2_offs:
185         .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
187 .global kvm_emulate_mtmsr_orig_ins_offs
188 kvm_emulate_mtmsr_orig_ins_offs:
189         .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
191 .global kvm_emulate_mtmsr_len
192 kvm_emulate_mtmsr_len:
193         .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
195 #ifdef CONFIG_BOOKE
197 /* also used for wrteei 1 */
198 .global kvm_emulate_wrtee
199 kvm_emulate_wrtee:
201         SCRATCH_SAVE
203         /* Fetch old MSR in r31 */
204         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
206         /* Insert new MSR[EE] */
207 kvm_emulate_wrtee_reg:
208         ori     r30, r0, 0
209         rlwimi  r31, r30, 0, MSR_EE
211         /*
212          * If MSR[EE] is now set, check for a pending interrupt.
213          * We could skip this if MSR[EE] was already on, but that
214          * should be rare, so don't bother.
215          */
216         andi.   r30, r30, MSR_EE
218         /* Put MSR into magic page because we don't call wrtee */
219         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
221         beq     no_wrtee
223         /* Check if we have to fetch an interrupt */
224         lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
225         cmpwi   r30, 0
226         bne     do_wrtee
228 no_wrtee:
229         SCRATCH_RESTORE
231         /* Go back to caller */
232 kvm_emulate_wrtee_branch:
233         b       .
235 do_wrtee:
236         SCRATCH_RESTORE
238         /* Just fire off the wrtee if it's critical */
239 kvm_emulate_wrtee_orig_ins:
240         wrtee   r0
242         b       kvm_emulate_wrtee_branch
244 kvm_emulate_wrtee_end:
246 .global kvm_emulate_wrtee_branch_offs
247 kvm_emulate_wrtee_branch_offs:
248         .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
250 .global kvm_emulate_wrtee_reg_offs
251 kvm_emulate_wrtee_reg_offs:
252         .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
254 .global kvm_emulate_wrtee_orig_ins_offs
255 kvm_emulate_wrtee_orig_ins_offs:
256         .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
258 .global kvm_emulate_wrtee_len
259 kvm_emulate_wrtee_len:
260         .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
262 .global kvm_emulate_wrteei_0
263 kvm_emulate_wrteei_0:
264         SCRATCH_SAVE
266         /* Fetch old MSR in r31 */
267         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
269         /* Remove MSR_EE from old MSR */
270         rlwinm  r31, r31, 0, ~MSR_EE
272         /* Write new MSR value back */
273         STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
275         SCRATCH_RESTORE
277         /* Go back to caller */
278 kvm_emulate_wrteei_0_branch:
279         b       .
280 kvm_emulate_wrteei_0_end:
282 .global kvm_emulate_wrteei_0_branch_offs
283 kvm_emulate_wrteei_0_branch_offs:
284         .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
286 .global kvm_emulate_wrteei_0_len
287 kvm_emulate_wrteei_0_len:
288         .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
290 #endif /* CONFIG_BOOKE */
292 #ifdef CONFIG_PPC_BOOK3S_32
294 .global kvm_emulate_mtsrin
295 kvm_emulate_mtsrin:
297         SCRATCH_SAVE
299         LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
300         andi.   r31, r31, MSR_DR | MSR_IR
301         beq     kvm_emulate_mtsrin_reg1
303         SCRATCH_RESTORE
305 kvm_emulate_mtsrin_orig_ins:
306         nop
307         b       kvm_emulate_mtsrin_branch
309 kvm_emulate_mtsrin_reg1:
310         /* rX >> 26 */
311         rlwinm  r30,r0,6,26,29
313 kvm_emulate_mtsrin_reg2:
314         stw     r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
316         SCRATCH_RESTORE
318         /* Go back to caller */
319 kvm_emulate_mtsrin_branch:
320         b       .
321 kvm_emulate_mtsrin_end:
323 .global kvm_emulate_mtsrin_branch_offs
324 kvm_emulate_mtsrin_branch_offs:
325         .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
327 .global kvm_emulate_mtsrin_reg1_offs
328 kvm_emulate_mtsrin_reg1_offs:
329         .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
331 .global kvm_emulate_mtsrin_reg2_offs
332 kvm_emulate_mtsrin_reg2_offs:
333         .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
335 .global kvm_emulate_mtsrin_orig_ins_offs
336 kvm_emulate_mtsrin_orig_ins_offs:
337         .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
339 .global kvm_emulate_mtsrin_len
340 kvm_emulate_mtsrin_len:
341         .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
343 #endif /* CONFIG_PPC_BOOK3S_32 */
345         .balign 4
346         .global kvm_tmp
347 kvm_tmp:
348         .space  (64 * 1024)
350 .global kvm_tmp_end
351 kvm_tmp_end:
353 .global kvm_template_end
354 kvm_template_end: