Merge branch 'akpm'
[linux-2.6/next.git] / arch / powerpc / kvm / book3s_rmhandlers.S
blobc1f877c4a884655dfccb58d00c803c395a9695d7
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/asm-offsets.h>
26 #ifdef CONFIG_PPC_BOOK3S_64
27 #include <asm/exception-64s.h>
28 #endif
30 /*****************************************************************************
31  *                                                                           *
32  *        Real Mode handlers that need to be in low physical memory          *
33  *                                                                           *
34  ****************************************************************************/
36 #if defined(CONFIG_PPC_BOOK3S_64)
38 #define LOAD_SHADOW_VCPU(reg)   GET_PACA(reg)                                   
39 #define MSR_NOIRQ               MSR_KERNEL & ~(MSR_IR | MSR_DR)
40 #define FUNC(name)              GLUE(.,name)
42 kvmppc_skip_interrupt:
43         /*
44          * Here all GPRs are unchanged from when the interrupt happened
45          * except for r13, which is saved in SPRG_SCRATCH0.
46          */
47         mfspr   r13, SPRN_SRR0
48         addi    r13, r13, 4
49         mtspr   SPRN_SRR0, r13
50         GET_SCRATCH0(r13)
51         rfid
52         b       .
54 kvmppc_skip_Hinterrupt:
55         /*
56          * Here all GPRs are unchanged from when the interrupt happened
57          * except for r13, which is saved in SPRG_SCRATCH0.
58          */
59         mfspr   r13, SPRN_HSRR0
60         addi    r13, r13, 4
61         mtspr   SPRN_HSRR0, r13
62         GET_SCRATCH0(r13)
63         hrfid
64         b       .
66 #elif defined(CONFIG_PPC_BOOK3S_32)
68 #define MSR_NOIRQ               MSR_KERNEL
69 #define FUNC(name)              name
71 .macro INTERRUPT_TRAMPOLINE intno
73 .global kvmppc_trampoline_\intno
74 kvmppc_trampoline_\intno:
76         mtspr   SPRN_SPRG_SCRATCH0, r13         /* Save r13 */
78         /*
79          * First thing to do is to find out if we're coming
80          * from a KVM guest or a Linux process.
81          *
82          * To distinguish, we check a magic byte in the PACA/current
83          */
84         mfspr   r13, SPRN_SPRG_THREAD
85         lwz     r13, THREAD_KVM_SVCPU(r13)
86         /* PPC32 can have a NULL pointer - let's check for that */
87         mtspr   SPRN_SPRG_SCRATCH1, r12         /* Save r12 */
88         mfcr    r12
89         cmpwi   r13, 0
90         bne     1f
91 2:      mtcr    r12
92         mfspr   r12, SPRN_SPRG_SCRATCH1
93         mfspr   r13, SPRN_SPRG_SCRATCH0         /* r13 = original r13 */
94         b       kvmppc_resume_\intno            /* Get back original handler */
96 1:      tophys(r13, r13)
97         stw     r12, HSTATE_SCRATCH1(r13)
98         mfspr   r12, SPRN_SPRG_SCRATCH1
99         stw     r12, HSTATE_SCRATCH0(r13)
100         lbz     r12, HSTATE_IN_GUEST(r13)
101         cmpwi   r12, KVM_GUEST_MODE_NONE
102         bne     ..kvmppc_handler_hasmagic_\intno
103         /* No KVM guest? Then jump back to the Linux handler! */
104         lwz     r12, HSTATE_SCRATCH1(r13)
105         b       2b
107         /* Now we know we're handling a KVM guest */
108 ..kvmppc_handler_hasmagic_\intno:
110         /* Should we just skip the faulting instruction? */
111         cmpwi   r12, KVM_GUEST_MODE_SKIP
112         beq     kvmppc_handler_skip_ins
114         /* Let's store which interrupt we're handling */
115         li      r12, \intno
117         /* Jump into the SLB exit code that goes to the highmem handler */
118         b       kvmppc_handler_trampoline_exit
120 .endm
122 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSTEM_RESET
123 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_MACHINE_CHECK
124 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DATA_STORAGE
125 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_INST_STORAGE
126 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_EXTERNAL
127 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALIGNMENT
128 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PROGRAM
129 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_FP_UNAVAIL
130 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DECREMENTER
131 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSCALL
132 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_TRACE
133 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PERFMON
134 INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALTIVEC
137  * Bring us back to the faulting code, but skip the
138  * faulting instruction.
140  * This is a generic exit path from the interrupt
141  * trampolines above.
143  * Input Registers:
145  * R12            = free
146  * R13            = Shadow VCPU (PACA)
147  * HSTATE.SCRATCH0 = guest R12
148  * HSTATE.SCRATCH1 = guest CR
149  * SPRG_SCRATCH0  = guest R13
151  */
152 kvmppc_handler_skip_ins:
154         /* Patch the IP to the next instruction */
155         mfsrr0  r12
156         addi    r12, r12, 4
157         mtsrr0  r12
159         /* Clean up all state */
160         lwz     r12, HSTATE_SCRATCH1(r13)
161         mtcr    r12
162         PPC_LL  r12, HSTATE_SCRATCH0(r13)
163         GET_SCRATCH0(r13)
165         /* And get back into the code */
166         RFI
167 #endif
170  * This trampoline brings us back to a real mode handler
172  * Input Registers:
174  * R5 = SRR0
175  * R6 = SRR1
176  * LR = real-mode IP
178  */
179 .global kvmppc_handler_lowmem_trampoline
180 kvmppc_handler_lowmem_trampoline:
182         mtsrr0  r5
183         mtsrr1  r6
184         blr
185 kvmppc_handler_lowmem_trampoline_end:
188  * Call a function in real mode
190  * Input Registers:
192  * R3 = function
193  * R4 = MSR
194  * R5 = scratch register
196  */
197 _GLOBAL(kvmppc_rmcall)
198         LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
199         mtmsr   r5              /* Disable relocation and interrupts, so mtsrr
200                                    doesn't get interrupted */
201         sync
202         mtsrr0  r3
203         mtsrr1  r4
204         RFI
206 #if defined(CONFIG_PPC_BOOK3S_32)
207 #define STACK_LR        INT_FRAME_SIZE+4
209 /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
210 #define MSR_EXT_START                                           \
211         PPC_STL r20, _NIP(r1);                                  \
212         mfmsr   r20;                                            \
213         LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE);                  \
214         andc    r3,r20,r3;              /* Disable DR,EE */     \
215         mtmsr   r3;                                             \
216         sync
218 #define MSR_EXT_END                                             \
219         mtmsr   r20;                    /* Enable DR,EE */      \
220         sync;                                                   \
221         PPC_LL  r20, _NIP(r1)
223 #elif defined(CONFIG_PPC_BOOK3S_64)
224 #define STACK_LR        _LINK
225 #define MSR_EXT_START
226 #define MSR_EXT_END
227 #endif
230  * Activate current's external feature (FPU/Altivec/VSX)
231  */
232 #define define_load_up(what)                                    \
233                                                                 \
234 _GLOBAL(kvmppc_load_up_ ## what);                               \
235         PPC_STLU r1, -INT_FRAME_SIZE(r1);                       \
236         mflr    r3;                                             \
237         PPC_STL r3, STACK_LR(r1);                               \
238         MSR_EXT_START;                                          \
239                                                                 \
240         bl      FUNC(load_up_ ## what);                         \
241                                                                 \
242         MSR_EXT_END;                                            \
243         PPC_LL  r3, STACK_LR(r1);                               \
244         mtlr    r3;                                             \
245         addi    r1, r1, INT_FRAME_SIZE;                         \
246         blr
248 define_load_up(fpu)
249 #ifdef CONFIG_ALTIVEC
250 define_load_up(altivec)
251 #endif
252 #ifdef CONFIG_VSX
253 define_load_up(vsx)
254 #endif
256 #include "book3s_segment.S"