x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / powerpc / kernel / exceptions-64s.S
blob3a9ed6ac224b323cf4908e3cfed19ec4ea03d613
1 /*
2  * This file contains the 64-bit "server" PowerPC variant
3  * of the low level exception handling including exception
4  * vectors, exception return, part of the slb and stab
5  * handling and other fixed offset specific things.
6  *
7  * This file is meant to be #included from head_64.S due to
8  * position dependent assembly.
9  *
10  * Most of this originates from head_64.S and thus has the same
11  * copyright history.
12  *
13  */
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
20  * We layout physical memory as follows:
21  * 0x0000 - 0x00ff : Secondary processor spin code
22  * 0x0100 - 0x17ff : pSeries Interrupt prologs
23  * 0x1800 - 0x4000 : interrupt support common interrupt prologs
24  * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
25  * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
26  * 0x7000 - 0x7fff : FWNMI data area
27  * 0x8000 - 0x8fff : Initial (CPU0) segment table
28  * 0x9000 -        : Early init and support code
29  */
30         /* Syscall routine is used twice, in reloc-off and reloc-on paths */
31 #define SYSCALL_PSERIES_1                                       \
32 BEGIN_FTR_SECTION                                               \
33         cmpdi   r0,0x1ebe ;                                     \
34         beq-    1f ;                                            \
35 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                          \
36         mr      r9,r13 ;                                        \
37         GET_PACA(r13) ;                                         \
38         mfspr   r11,SPRN_SRR0 ;                                 \
41 #define SYSCALL_PSERIES_2_RFID                                  \
42         mfspr   r12,SPRN_SRR1 ;                                 \
43         ld      r10,PACAKBASE(r13) ;                            \
44         LOAD_HANDLER(r10, system_call_entry) ;                  \
45         mtspr   SPRN_SRR0,r10 ;                                 \
46         ld      r10,PACAKMSR(r13) ;                             \
47         mtspr   SPRN_SRR1,r10 ;                                 \
48         rfid ;                                                  \
49         b       . ;     /* prevent speculative execution */
51 #define SYSCALL_PSERIES_3                                       \
52         /* Fast LE/BE switch system call */                     \
53 1:      mfspr   r12,SPRN_SRR1 ;                                 \
54         xori    r12,r12,MSR_LE ;                                \
55         mtspr   SPRN_SRR1,r12 ;                                 \
56         rfid ;          /* return to userspace */               \
57         b       . ;                                             \
58 2:      mfspr   r12,SPRN_SRR1 ;                                 \
59         andi.   r12,r12,MSR_PR ;                                \
60         bne     0b ;                                            \
61         mtspr   SPRN_SRR0,r3 ;                                  \
62         mtspr   SPRN_SRR1,r4 ;                                  \
63         mtspr   SPRN_SDR1,r5 ;                                  \
64         rfid ;                                                  \
65         b       . ;     /* prevent speculative execution */
67 #if defined(CONFIG_RELOCATABLE)
68         /*
69          * We can't branch directly; in the direct case we use LR
70          * and system_call_entry restores LR.  (We thus need to move
71          * LR to r10 in the RFID case too.)
72          */
73 #define SYSCALL_PSERIES_2_DIRECT                                \
74         mflr    r10 ;                                           \
75         ld      r12,PACAKBASE(r13) ;                            \
76         LOAD_HANDLER(r12, system_call_entry_direct) ;           \
77         mtctr   r12 ;                                           \
78         mfspr   r12,SPRN_SRR1 ;                                 \
79         /* Re-use of r13... No spare regs to do this */ \
80         li      r13,MSR_RI ;                                    \
81         mtmsrd  r13,1 ;                                         \
82         GET_PACA(r13) ; /* get r13 back */                      \
83         bctr ;
84 #else
85         /* We can branch directly */
86 #define SYSCALL_PSERIES_2_DIRECT                                \
87         mfspr   r12,SPRN_SRR1 ;                                 \
88         li      r10,MSR_RI ;                                    \
89         mtmsrd  r10,1 ;                 /* Set RI (EE=0) */     \
90         b       system_call_entry_direct ;
91 #endif
94  * This is the start of the interrupt handlers for pSeries
95  * This code runs with relocation off.
96  * Code from here to __end_interrupts gets copied down to real
97  * address 0x100 when we are running a relocatable kernel.
98  * Therefore any relative branches in this section must only
99  * branch to labels in this section.
100  */
101         . = 0x100
102         .globl __start_interrupts
103 __start_interrupts:
105         .globl system_reset_pSeries;
106 system_reset_pSeries:
107         HMT_MEDIUM_PPR_DISCARD
108         SET_SCRATCH0(r13)
109 #ifdef CONFIG_PPC_P7_NAP
110 BEGIN_FTR_SECTION
111         /* Running native on arch 2.06 or later, check if we are
112          * waking up from nap. We only handle no state loss and
113          * supervisor state loss. We do -not- handle hypervisor
114          * state loss at this time.
115          */
116         mfspr   r13,SPRN_SRR1
117         rlwinm. r13,r13,47-31,30,31
118         beq     9f
120         /* waking up from powersave (nap) state */
121         cmpwi   cr1,r13,2
122         /* Total loss of HV state is fatal, we could try to use the
123          * PIR to locate a PACA, then use an emergency stack etc...
124          * but for now, let's just stay stuck here
125          */
126         bgt     cr1,.
127         GET_PACA(r13)
129 #ifdef CONFIG_KVM_BOOK3S_64_HV
130         li      r0,KVM_HWTHREAD_IN_KERNEL
131         stb     r0,HSTATE_HWTHREAD_STATE(r13)
132         /* Order setting hwthread_state vs. testing hwthread_req */
133         sync
134         lbz     r0,HSTATE_HWTHREAD_REQ(r13)
135         cmpwi   r0,0
136         beq     1f
137         b       kvm_start_guest
139 #endif
141         beq     cr1,2f
142         b       .power7_wakeup_noloss
143 2:      b       .power7_wakeup_loss
145 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
146 #endif /* CONFIG_PPC_P7_NAP */
147         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
148                                  NOTEST, 0x100)
150         . = 0x200
151 machine_check_pSeries_1:
152         /* This is moved out of line as it can be patched by FW, but
153          * some code path might still want to branch into the original
154          * vector
155          */
156         HMT_MEDIUM_PPR_DISCARD
157         SET_SCRATCH0(r13)               /* save r13 */
158         EXCEPTION_PROLOG_0(PACA_EXMC)
159         b       machine_check_pSeries_0
161         . = 0x300
162         .globl data_access_pSeries
163 data_access_pSeries:
164         HMT_MEDIUM_PPR_DISCARD
165         SET_SCRATCH0(r13)
166 BEGIN_FTR_SECTION
167         b       data_access_check_stab
168 data_access_not_stab:
169 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
170         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
171                                  KVMTEST, 0x300)
173         . = 0x380
174         .globl data_access_slb_pSeries
175 data_access_slb_pSeries:
176         HMT_MEDIUM_PPR_DISCARD
177         SET_SCRATCH0(r13)
178         EXCEPTION_PROLOG_0(PACA_EXSLB)
179         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
180         std     r3,PACA_EXSLB+EX_R3(r13)
181         mfspr   r3,SPRN_DAR
182 #ifdef __DISABLED__
183         /* Keep that around for when we re-implement dynamic VSIDs */
184         cmpdi   r3,0
185         bge     slb_miss_user_pseries
186 #endif /* __DISABLED__ */
187         mfspr   r12,SPRN_SRR1
188 #ifndef CONFIG_RELOCATABLE
189         b       .slb_miss_realmode
190 #else
191         /*
192          * We can't just use a direct branch to .slb_miss_realmode
193          * because the distance from here to there depends on where
194          * the kernel ends up being put.
195          */
196         mfctr   r11
197         ld      r10,PACAKBASE(r13)
198         LOAD_HANDLER(r10, .slb_miss_realmode)
199         mtctr   r10
200         bctr
201 #endif
203         STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
205         . = 0x480
206         .globl instruction_access_slb_pSeries
207 instruction_access_slb_pSeries:
208         HMT_MEDIUM_PPR_DISCARD
209         SET_SCRATCH0(r13)
210         EXCEPTION_PROLOG_0(PACA_EXSLB)
211         EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
212         std     r3,PACA_EXSLB+EX_R3(r13)
213         mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
214 #ifdef __DISABLED__
215         /* Keep that around for when we re-implement dynamic VSIDs */
216         cmpdi   r3,0
217         bge     slb_miss_user_pseries
218 #endif /* __DISABLED__ */
219         mfspr   r12,SPRN_SRR1
220 #ifndef CONFIG_RELOCATABLE
221         b       .slb_miss_realmode
222 #else
223         mfctr   r11
224         ld      r10,PACAKBASE(r13)
225         LOAD_HANDLER(r10, .slb_miss_realmode)
226         mtctr   r10
227         bctr
228 #endif
230         /* We open code these as we can't have a ". = x" (even with
231          * x = "." within a feature section
232          */
233         . = 0x500;
234         .globl hardware_interrupt_pSeries;
235         .globl hardware_interrupt_hv;
236 hardware_interrupt_pSeries:
237 hardware_interrupt_hv:
238         HMT_MEDIUM_PPR_DISCARD
239         BEGIN_FTR_SECTION
240                 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
241                                             EXC_HV, SOFTEN_TEST_HV)
242                 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
243         FTR_SECTION_ELSE
244                 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
245                                             EXC_STD, SOFTEN_TEST_HV_201)
246                 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
247         ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
249         STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
250         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
252         STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
253         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
255         STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
256         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
258         . = 0x900
259         .globl decrementer_pSeries
260 decrementer_pSeries:
261         _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
263         STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
265         MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
266         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
268         STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
269         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
271         . = 0xc00
272         .globl  system_call_pSeries
273 system_call_pSeries:
274         HMT_MEDIUM
275 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
276         SET_SCRATCH0(r13)
277         GET_PACA(r13)
278         std     r9,PACA_EXGEN+EX_R9(r13)
279         std     r10,PACA_EXGEN+EX_R10(r13)
280         mfcr    r9
281         KVMTEST(0xc00)
282         GET_SCRATCH0(r13)
283 #endif
284         SYSCALL_PSERIES_1
285         SYSCALL_PSERIES_2_RFID
286         SYSCALL_PSERIES_3
287         KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
289         STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
290         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
292         /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
293          * out of line to handle them
294          */
295         . = 0xe00
296 hv_data_storage_trampoline:
297         SET_SCRATCH0(r13)
298         EXCEPTION_PROLOG_0(PACA_EXGEN)
299         b       h_data_storage_hv
301         . = 0xe20
302 hv_instr_storage_trampoline:
303         SET_SCRATCH0(r13)
304         EXCEPTION_PROLOG_0(PACA_EXGEN)
305         b       h_instr_storage_hv
307         . = 0xe40
308 emulation_assist_trampoline:
309         SET_SCRATCH0(r13)
310         EXCEPTION_PROLOG_0(PACA_EXGEN)
311         b       emulation_assist_hv
313         . = 0xe60
314 hv_exception_trampoline:
315         SET_SCRATCH0(r13)
316         EXCEPTION_PROLOG_0(PACA_EXGEN)
317         b       hmi_exception_hv
319         . = 0xe80
320 hv_doorbell_trampoline:
321         SET_SCRATCH0(r13)
322         EXCEPTION_PROLOG_0(PACA_EXGEN)
323         b       h_doorbell_hv
325         /* We need to deal with the Altivec unavailable exception
326          * here which is at 0xf20, thus in the middle of the
327          * prolog code of the PerformanceMonitor one. A little
328          * trickery is thus necessary
329          */
330         . = 0xf00
331 performance_monitor_pseries_trampoline:
332         SET_SCRATCH0(r13)
333         EXCEPTION_PROLOG_0(PACA_EXGEN)
334         b       performance_monitor_pSeries
336         . = 0xf20
337 altivec_unavailable_pseries_trampoline:
338         SET_SCRATCH0(r13)
339         EXCEPTION_PROLOG_0(PACA_EXGEN)
340         b       altivec_unavailable_pSeries
342         . = 0xf40
343 vsx_unavailable_pseries_trampoline:
344         SET_SCRATCH0(r13)
345         EXCEPTION_PROLOG_0(PACA_EXGEN)
346         b       vsx_unavailable_pSeries
348         . = 0xf60
349 facility_unavailable_trampoline:
350         SET_SCRATCH0(r13)
351         EXCEPTION_PROLOG_0(PACA_EXGEN)
352         b       facility_unavailable_pSeries
354         . = 0xf80
355 hv_facility_unavailable_trampoline:
356         SET_SCRATCH0(r13)
357         EXCEPTION_PROLOG_0(PACA_EXGEN)
358         b       facility_unavailable_hv
360 #ifdef CONFIG_CBE_RAS
361         STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
362         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
363 #endif /* CONFIG_CBE_RAS */
365         STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
366         KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
368         . = 0x1500
369         .global denorm_exception_hv
370 denorm_exception_hv:
371         HMT_MEDIUM_PPR_DISCARD
372         mtspr   SPRN_SPRG_HSCRATCH0,r13
373         EXCEPTION_PROLOG_0(PACA_EXGEN)
374         EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
376 #ifdef CONFIG_PPC_DENORMALISATION
377         mfspr   r10,SPRN_HSRR1
378         mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
379         andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
380         addi    r11,r11,-4              /* HSRR0 is next instruction */
381         bne+    denorm_assist
382 #endif
384         KVMTEST(0x1500)
385         EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
386         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
388 #ifdef CONFIG_CBE_RAS
389         STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
390         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
391 #endif /* CONFIG_CBE_RAS */
393         STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
394         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
396 #ifdef CONFIG_CBE_RAS
397         STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
398         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
399 #else
400         . = 0x1800
401 #endif /* CONFIG_CBE_RAS */
404 /*** Out of line interrupts support ***/
406         .align  7
407         /* moved from 0x200 */
408 machine_check_pSeries:
409         .globl machine_check_fwnmi
410 machine_check_fwnmi:
411         HMT_MEDIUM_PPR_DISCARD
412         SET_SCRATCH0(r13)               /* save r13 */
413         EXCEPTION_PROLOG_0(PACA_EXMC)
414 machine_check_pSeries_0:
415         EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
416         EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
417         KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
419         /* moved from 0x300 */
420 data_access_check_stab:
421         GET_PACA(r13)
422         std     r9,PACA_EXSLB+EX_R9(r13)
423         std     r10,PACA_EXSLB+EX_R10(r13)
424         mfspr   r10,SPRN_DAR
425         mfspr   r9,SPRN_DSISR
426         srdi    r10,r10,60
427         rlwimi  r10,r9,16,0x20
428 #ifdef CONFIG_KVM_BOOK3S_PR
429         lbz     r9,HSTATE_IN_GUEST(r13)
430         rlwimi  r10,r9,8,0x300
431 #endif
432         mfcr    r9
433         cmpwi   r10,0x2c
434         beq     do_stab_bolted_pSeries
435         mtcrf   0x80,r9
436         ld      r9,PACA_EXSLB+EX_R9(r13)
437         ld      r10,PACA_EXSLB+EX_R10(r13)
438         b       data_access_not_stab
439 do_stab_bolted_pSeries:
440         std     r11,PACA_EXSLB+EX_R11(r13)
441         std     r12,PACA_EXSLB+EX_R12(r13)
442         GET_SCRATCH0(r10)
443         std     r10,PACA_EXSLB+EX_R13(r13)
444         EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
446         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
447         KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
448         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
449         KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
450         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
451         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
453 #ifdef CONFIG_PPC_DENORMALISATION
454 denorm_assist:
455 BEGIN_FTR_SECTION
457  * To denormalise we need to move a copy of the register to itself.
458  * For POWER6 do that here for all FP regs.
459  */
460         mfmsr   r10
461         ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
462         xori    r10,r10,(MSR_FE0|MSR_FE1)
463         mtmsrd  r10
464         sync
466 #define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
467 #define FMR4(n)  FMR2(n) ; FMR2(n+2)
468 #define FMR8(n)  FMR4(n) ; FMR4(n+4)
469 #define FMR16(n) FMR8(n) ; FMR8(n+8)
470 #define FMR32(n) FMR16(n) ; FMR16(n+16)
471         FMR32(0)
473 FTR_SECTION_ELSE
475  * To denormalise we need to move a copy of the register to itself.
476  * For POWER7 do that here for the first 32 VSX registers only.
477  */
478         mfmsr   r10
479         oris    r10,r10,MSR_VSX@h
480         mtmsrd  r10
481         sync
483 #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
484 #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
485 #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
486 #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
487 #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
488         XVCPSGNDP32(0)
490 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
492 BEGIN_FTR_SECTION
493         b       denorm_done
494 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
496  * To denormalise we need to move a copy of the register to itself.
497  * For POWER8 we need to do that for all 64 VSX registers
498  */
499         XVCPSGNDP32(32)
500 denorm_done:
501         mtspr   SPRN_HSRR0,r11
502         mtcrf   0x80,r9
503         ld      r9,PACA_EXGEN+EX_R9(r13)
504         RESTORE_PPR_PACA(PACA_EXGEN, r10)
505 BEGIN_FTR_SECTION
506         ld      r10,PACA_EXGEN+EX_CFAR(r13)
507         mtspr   SPRN_CFAR,r10
508 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
509         ld      r10,PACA_EXGEN+EX_R10(r13)
510         ld      r11,PACA_EXGEN+EX_R11(r13)
511         ld      r12,PACA_EXGEN+EX_R12(r13)
512         ld      r13,PACA_EXGEN+EX_R13(r13)
513         HRFID
514         b       .
515 #endif
517         .align  7
518         /* moved from 0xe00 */
519         STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
520         KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
521         STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
522         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
523         STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
524         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
525         STD_EXCEPTION_HV_OOL(0xe62, hmi_exception) /* need to flush cache ? */
526         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
527         MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
528         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
530         /* moved from 0xf00 */
531         STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
532         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
533         STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
534         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
535         STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
536         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
537         STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
538         KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
539         STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
540         KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
543  * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
544  * - If it was a decrementer interrupt, we bump the dec to max and and return.
545  * - If it was a doorbell we return immediately since doorbells are edge
546  *   triggered and won't automatically refire.
547  * - else we hard disable and return.
548  * This is called with r10 containing the value to OR to the paca field.
549  */
550 #define MASKED_INTERRUPT(_H)                            \
551 masked_##_H##interrupt:                                 \
552         std     r11,PACA_EXGEN+EX_R11(r13);             \
553         lbz     r11,PACAIRQHAPPENED(r13);               \
554         or      r11,r11,r10;                            \
555         stb     r11,PACAIRQHAPPENED(r13);               \
556         cmpwi   r10,PACA_IRQ_DEC;                       \
557         bne     1f;                                     \
558         lis     r10,0x7fff;                             \
559         ori     r10,r10,0xffff;                         \
560         mtspr   SPRN_DEC,r10;                           \
561         b       2f;                                     \
562 1:      cmpwi   r10,PACA_IRQ_DBELL;                     \
563         beq     2f;                                     \
564         mfspr   r10,SPRN_##_H##SRR1;                    \
565         rldicl  r10,r10,48,1; /* clear MSR_EE */        \
566         rotldi  r10,r10,16;                             \
567         mtspr   SPRN_##_H##SRR1,r10;                    \
568 2:      mtcrf   0x80,r9;                                \
569         ld      r9,PACA_EXGEN+EX_R9(r13);               \
570         ld      r10,PACA_EXGEN+EX_R10(r13);             \
571         ld      r11,PACA_EXGEN+EX_R11(r13);             \
572         GET_SCRATCH0(r13);                              \
573         ##_H##rfid;                                     \
574         b       .
575         
576         MASKED_INTERRUPT()
577         MASKED_INTERRUPT(H)
580  * Called from arch_local_irq_enable when an interrupt needs
581  * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
582  * which kind of interrupt. MSR:EE is already off. We generate a
583  * stackframe like if a real interrupt had happened.
585  * Note: While MSR:EE is off, we need to make sure that _MSR
586  * in the generated frame has EE set to 1 or the exception
587  * handler will not properly re-enable them.
588  */
589 _GLOBAL(__replay_interrupt)
590         /* We are going to jump to the exception common code which
591          * will retrieve various register values from the PACA which
592          * we don't give a damn about, so we don't bother storing them.
593          */
594         mfmsr   r12
595         mflr    r11
596         mfcr    r9
597         ori     r12,r12,MSR_EE
598         cmpwi   r3,0x900
599         beq     decrementer_common
600         cmpwi   r3,0x500
601         beq     hardware_interrupt_common
602 BEGIN_FTR_SECTION
603         cmpwi   r3,0xe80
604         beq     h_doorbell_common
605 FTR_SECTION_ELSE
606         cmpwi   r3,0xa00
607         beq     doorbell_super_common
608 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
609         blr
611 #ifdef CONFIG_PPC_PSERIES
613  * Vectors for the FWNMI option.  Share common code.
614  */
615         .globl system_reset_fwnmi
616       .align 7
617 system_reset_fwnmi:
618         HMT_MEDIUM_PPR_DISCARD
619         SET_SCRATCH0(r13)               /* save r13 */
620         EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
621                                  NOTEST, 0x100)
623 #endif /* CONFIG_PPC_PSERIES */
625 #ifdef __DISABLED__
627  * This is used for when the SLB miss handler has to go virtual,
628  * which doesn't happen for now anymore but will once we re-implement
629  * dynamic VSIDs for shared page tables
630  */
631 slb_miss_user_pseries:
632         std     r10,PACA_EXGEN+EX_R10(r13)
633         std     r11,PACA_EXGEN+EX_R11(r13)
634         std     r12,PACA_EXGEN+EX_R12(r13)
635         GET_SCRATCH0(r10)
636         ld      r11,PACA_EXSLB+EX_R9(r13)
637         ld      r12,PACA_EXSLB+EX_R3(r13)
638         std     r10,PACA_EXGEN+EX_R13(r13)
639         std     r11,PACA_EXGEN+EX_R9(r13)
640         std     r12,PACA_EXGEN+EX_R3(r13)
641         clrrdi  r12,r13,32
642         mfmsr   r10
643         mfspr   r11,SRR0                        /* save SRR0 */
644         ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
645         ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
646         mtspr   SRR0,r12
647         mfspr   r12,SRR1                        /* and SRR1 */
648         mtspr   SRR1,r10
649         rfid
650         b       .                               /* prevent spec. execution */
651 #endif /* __DISABLED__ */
654  * Code from here down to __end_handlers is invoked from the
655  * exception prologs above.  Because the prologs assemble the
656  * addresses of these handlers using the LOAD_HANDLER macro,
657  * which uses an ori instruction, these handlers must be in
658  * the first 64k of the kernel image.
659  */
661 /*** Common interrupt handlers ***/
663         STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
665         /*
666          * Machine check is different because we use a different
667          * save area: PACA_EXMC instead of PACA_EXGEN.
668          */
669         .align  7
670         .globl machine_check_common
671 machine_check_common:
673         mfspr   r10,SPRN_DAR
674         std     r10,PACA_EXGEN+EX_DAR(r13)
675         mfspr   r10,SPRN_DSISR
676         stw     r10,PACA_EXGEN+EX_DSISR(r13)
677         EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
678         FINISH_NAP
679         DISABLE_INTS
680         ld      r3,PACA_EXGEN+EX_DAR(r13)
681         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
682         std     r3,_DAR(r1)
683         std     r4,_DSISR(r1)
684         bl      .save_nvgprs
685         addi    r3,r1,STACK_FRAME_OVERHEAD
686         bl      .machine_check_exception
687         b       .ret_from_except
689         STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
690         STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
691         STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
692 #ifdef CONFIG_PPC_DOORBELL
693         STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
694 #else
695         STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
696 #endif
697         STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
698         STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
699         STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
700         STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
701         STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
702 #ifdef CONFIG_PPC_DOORBELL
703         STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
704 #else
705         STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
706 #endif
707         STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
708         STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
709         STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
710 #ifdef CONFIG_ALTIVEC
711         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
712 #else
713         STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
714 #endif
715 #ifdef CONFIG_CBE_RAS
716         STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
717         STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
718         STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
719 #endif /* CONFIG_CBE_RAS */
721         /*
722          * Relocation-on interrupts: A subset of the interrupts can be delivered
723          * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
724          * it.  Addresses are the same as the original interrupt addresses, but
725          * offset by 0xc000000000004000.
726          * It's impossible to receive interrupts below 0x300 via this mechanism.
727          * KVM: None of these traps are from the guest ; anything that escalated
728          * to HV=1 from HV=0 is delivered via real mode handlers.
729          */
731         /*
732          * This uses the standard macro, since the original 0x300 vector
733          * only has extra guff for STAB-based processors -- which never
734          * come here.
735          */
736         STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
737         . = 0x4380
738         .globl data_access_slb_relon_pSeries
739 data_access_slb_relon_pSeries:
740         SET_SCRATCH0(r13)
741         EXCEPTION_PROLOG_0(PACA_EXSLB)
742         EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
743         std     r3,PACA_EXSLB+EX_R3(r13)
744         mfspr   r3,SPRN_DAR
745         mfspr   r12,SPRN_SRR1
746 #ifndef CONFIG_RELOCATABLE
747         b       .slb_miss_realmode
748 #else
749         /*
750          * We can't just use a direct branch to .slb_miss_realmode
751          * because the distance from here to there depends on where
752          * the kernel ends up being put.
753          */
754         mfctr   r11
755         ld      r10,PACAKBASE(r13)
756         LOAD_HANDLER(r10, .slb_miss_realmode)
757         mtctr   r10
758         bctr
759 #endif
761         STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
762         . = 0x4480
763         .globl instruction_access_slb_relon_pSeries
764 instruction_access_slb_relon_pSeries:
765         SET_SCRATCH0(r13)
766         EXCEPTION_PROLOG_0(PACA_EXSLB)
767         EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
768         std     r3,PACA_EXSLB+EX_R3(r13)
769         mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
770         mfspr   r12,SPRN_SRR1
771 #ifndef CONFIG_RELOCATABLE
772         b       .slb_miss_realmode
773 #else
774         mfctr   r11
775         ld      r10,PACAKBASE(r13)
776         LOAD_HANDLER(r10, .slb_miss_realmode)
777         mtctr   r10
778         bctr
779 #endif
781         . = 0x4500
782         .globl hardware_interrupt_relon_pSeries;
783         .globl hardware_interrupt_relon_hv;
784 hardware_interrupt_relon_pSeries:
785 hardware_interrupt_relon_hv:
786         BEGIN_FTR_SECTION
787                 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
788         FTR_SECTION_ELSE
789                 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
790         ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
791         STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
792         STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
793         STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
794         MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
795         STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
796         MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
797         STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
799         . = 0x4c00
800         .globl system_call_relon_pSeries
801 system_call_relon_pSeries:
802         HMT_MEDIUM
803         SYSCALL_PSERIES_1
804         SYSCALL_PSERIES_2_DIRECT
805         SYSCALL_PSERIES_3
807         STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
809         . = 0x4e00
810         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
812         . = 0x4e20
813         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
815         . = 0x4e40
816 emulation_assist_relon_trampoline:
817         SET_SCRATCH0(r13)
818         EXCEPTION_PROLOG_0(PACA_EXGEN)
819         b       emulation_assist_relon_hv
821         . = 0x4e60
822         b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
824         . = 0x4e80
825 h_doorbell_relon_trampoline:
826         SET_SCRATCH0(r13)
827         EXCEPTION_PROLOG_0(PACA_EXGEN)
828         b       h_doorbell_relon_hv
830         . = 0x4f00
831 performance_monitor_relon_pseries_trampoline:
832         SET_SCRATCH0(r13)
833         EXCEPTION_PROLOG_0(PACA_EXGEN)
834         b       performance_monitor_relon_pSeries
836         . = 0x4f20
837 altivec_unavailable_relon_pseries_trampoline:
838         SET_SCRATCH0(r13)
839         EXCEPTION_PROLOG_0(PACA_EXGEN)
840         b       altivec_unavailable_relon_pSeries
842         . = 0x4f40
843 vsx_unavailable_relon_pseries_trampoline:
844         SET_SCRATCH0(r13)
845         EXCEPTION_PROLOG_0(PACA_EXGEN)
846         b       vsx_unavailable_relon_pSeries
848         . = 0x4f60
849 facility_unavailable_relon_trampoline:
850         SET_SCRATCH0(r13)
851         EXCEPTION_PROLOG_0(PACA_EXGEN)
852         b       facility_unavailable_relon_pSeries
854         . = 0x4f80
855 hv_facility_unavailable_relon_trampoline:
856         SET_SCRATCH0(r13)
857         EXCEPTION_PROLOG_0(PACA_EXGEN)
858         b       hv_facility_unavailable_relon_hv
860         STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
861 #ifdef CONFIG_PPC_DENORMALISATION
862         . = 0x5500
863         b       denorm_exception_hv
864 #endif
865         STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
867         /* Other future vectors */
868         .align  7
869         .globl  __end_interrupts
870 __end_interrupts:
872         .align  7
873 system_call_entry_direct:
874 #if defined(CONFIG_RELOCATABLE)
875         /* The first level prologue may have used LR to get here, saving
876          * orig in r10.  To save hacking/ifdeffing common code, restore here.
877          */
878         mtlr    r10
879 #endif
880 system_call_entry:
881         b       system_call_common
883 ppc64_runlatch_on_trampoline:
884         b       .__ppc64_runlatch_on
887  * Here we have detected that the kernel stack pointer is bad.
888  * R9 contains the saved CR, r13 points to the paca,
889  * r10 contains the (bad) kernel stack pointer,
890  * r11 and r12 contain the saved SRR0 and SRR1.
891  * We switch to using an emergency stack, save the registers there,
892  * and call kernel_bad_stack(), which panics.
893  */
894 bad_stack:
895         ld      r1,PACAEMERGSP(r13)
896         subi    r1,r1,64+INT_FRAME_SIZE
897         std     r9,_CCR(r1)
898         std     r10,GPR1(r1)
899         std     r11,_NIP(r1)
900         std     r12,_MSR(r1)
901         mfspr   r11,SPRN_DAR
902         mfspr   r12,SPRN_DSISR
903         std     r11,_DAR(r1)
904         std     r12,_DSISR(r1)
905         mflr    r10
906         mfctr   r11
907         mfxer   r12
908         std     r10,_LINK(r1)
909         std     r11,_CTR(r1)
910         std     r12,_XER(r1)
911         SAVE_GPR(0,r1)
912         SAVE_GPR(2,r1)
913         ld      r10,EX_R3(r3)
914         std     r10,GPR3(r1)
915         SAVE_GPR(4,r1)
916         SAVE_4GPRS(5,r1)
917         ld      r9,EX_R9(r3)
918         ld      r10,EX_R10(r3)
919         SAVE_2GPRS(9,r1)
920         ld      r9,EX_R11(r3)
921         ld      r10,EX_R12(r3)
922         ld      r11,EX_R13(r3)
923         std     r9,GPR11(r1)
924         std     r10,GPR12(r1)
925         std     r11,GPR13(r1)
926 BEGIN_FTR_SECTION
927         ld      r10,EX_CFAR(r3)
928         std     r10,ORIG_GPR3(r1)
929 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
930         SAVE_8GPRS(14,r1)
931         SAVE_10GPRS(22,r1)
932         lhz     r12,PACA_TRAP_SAVE(r13)
933         std     r12,_TRAP(r1)
934         addi    r11,r1,INT_FRAME_SIZE
935         std     r11,0(r1)
936         li      r12,0
937         std     r12,0(r11)
938         ld      r2,PACATOC(r13)
939         ld      r11,exception_marker@toc(r2)
940         std     r12,RESULT(r1)
941         std     r11,STACK_FRAME_OVERHEAD-16(r1)
942 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
943         bl      .kernel_bad_stack
944         b       1b
947  * Here r13 points to the paca, r9 contains the saved CR,
948  * SRR0 and SRR1 are saved in r11 and r12,
949  * r9 - r13 are saved in paca->exgen.
950  */
951         .align  7
952         .globl data_access_common
953 data_access_common:
954         mfspr   r10,SPRN_DAR
955         std     r10,PACA_EXGEN+EX_DAR(r13)
956         mfspr   r10,SPRN_DSISR
957         stw     r10,PACA_EXGEN+EX_DSISR(r13)
958         EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
959         DISABLE_INTS
960         ld      r12,_MSR(r1)
961         ld      r3,PACA_EXGEN+EX_DAR(r13)
962         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
963         li      r5,0x300
964         b       .do_hash_page           /* Try to handle as hpte fault */
966         .align  7
967         .globl  h_data_storage_common
968 h_data_storage_common:
969         mfspr   r10,SPRN_HDAR
970         std     r10,PACA_EXGEN+EX_DAR(r13)
971         mfspr   r10,SPRN_HDSISR
972         stw     r10,PACA_EXGEN+EX_DSISR(r13)
973         EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
974         bl      .save_nvgprs
975         DISABLE_INTS
976         addi    r3,r1,STACK_FRAME_OVERHEAD
977         bl      .unknown_exception
978         b       .ret_from_except
980         .align  7
981         .globl instruction_access_common
982 instruction_access_common:
983         EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
984         DISABLE_INTS
985         ld      r12,_MSR(r1)
986         ld      r3,_NIP(r1)
987         andis.  r4,r12,0x5820
988         li      r5,0x400
989         b       .do_hash_page           /* Try to handle as hpte fault */
991         STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
994  * Here is the common SLB miss user that is used when going to virtual
995  * mode for SLB misses, that is currently not used
996  */
997 #ifdef __DISABLED__
998         .align  7
999         .globl  slb_miss_user_common
1000 slb_miss_user_common:
1001         mflr    r10
1002         std     r3,PACA_EXGEN+EX_DAR(r13)
1003         stw     r9,PACA_EXGEN+EX_CCR(r13)
1004         std     r10,PACA_EXGEN+EX_LR(r13)
1005         std     r11,PACA_EXGEN+EX_SRR0(r13)
1006         bl      .slb_allocate_user
1008         ld      r10,PACA_EXGEN+EX_LR(r13)
1009         ld      r3,PACA_EXGEN+EX_R3(r13)
1010         lwz     r9,PACA_EXGEN+EX_CCR(r13)
1011         ld      r11,PACA_EXGEN+EX_SRR0(r13)
1012         mtlr    r10
1013         beq-    slb_miss_fault
1015         andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
1016         beq-    unrecov_user_slb
1017         mfmsr   r10
1019 .machine push
1020 .machine "power4"
1021         mtcrf   0x80,r9
1022 .machine pop
1024         clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
1025         mtmsrd  r10,1
1027         mtspr   SRR0,r11
1028         mtspr   SRR1,r12
1030         ld      r9,PACA_EXGEN+EX_R9(r13)
1031         ld      r10,PACA_EXGEN+EX_R10(r13)
1032         ld      r11,PACA_EXGEN+EX_R11(r13)
1033         ld      r12,PACA_EXGEN+EX_R12(r13)
1034         ld      r13,PACA_EXGEN+EX_R13(r13)
1035         rfid
1036         b       .
1038 slb_miss_fault:
1039         EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1040         ld      r4,PACA_EXGEN+EX_DAR(r13)
1041         li      r5,0
1042         std     r4,_DAR(r1)
1043         std     r5,_DSISR(r1)
1044         b       handle_page_fault
1046 unrecov_user_slb:
1047         EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1048         DISABLE_INTS
1049         bl      .save_nvgprs
1050 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
1051         bl      .unrecoverable_exception
1052         b       1b
1054 #endif /* __DISABLED__ */
1057         .align  7
1058         .globl alignment_common
1059 alignment_common:
1060         mfspr   r10,SPRN_DAR
1061         std     r10,PACA_EXGEN+EX_DAR(r13)
1062         mfspr   r10,SPRN_DSISR
1063         stw     r10,PACA_EXGEN+EX_DSISR(r13)
1064         EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1065         ld      r3,PACA_EXGEN+EX_DAR(r13)
1066         lwz     r4,PACA_EXGEN+EX_DSISR(r13)
1067         std     r3,_DAR(r1)
1068         std     r4,_DSISR(r1)
1069         bl      .save_nvgprs
1070         DISABLE_INTS
1071         addi    r3,r1,STACK_FRAME_OVERHEAD
1072         bl      .alignment_exception
1073         b       .ret_from_except
1075         .align  7
1076         .globl program_check_common
1077 program_check_common:
1078         EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1079         bl      .save_nvgprs
1080         DISABLE_INTS
1081         addi    r3,r1,STACK_FRAME_OVERHEAD
1082         bl      .program_check_exception
1083         b       .ret_from_except
1085         .align  7
1086         .globl fp_unavailable_common
1087 fp_unavailable_common:
1088         EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1089         bne     1f                      /* if from user, just load it up */
1090         bl      .save_nvgprs
1091         DISABLE_INTS
1092         addi    r3,r1,STACK_FRAME_OVERHEAD
1093         bl      .kernel_fp_unavailable_exception
1094         BUG_OPCODE
1096 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1097 BEGIN_FTR_SECTION
1098         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1099          * transaction), go do TM stuff
1100          */
1101         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1102         bne-    2f
1103 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1104 #endif
1105         bl      .load_up_fpu
1106         b       fast_exception_return
1107 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1108 2:      /* User process was in a transaction */
1109         bl      .save_nvgprs
1110         DISABLE_INTS
1111         addi    r3,r1,STACK_FRAME_OVERHEAD
1112         bl      .fp_unavailable_tm
1113         b       .ret_from_except
1114 #endif
1115         .align  7
1116         .globl altivec_unavailable_common
1117 altivec_unavailable_common:
1118         EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1119 #ifdef CONFIG_ALTIVEC
1120 BEGIN_FTR_SECTION
1121         beq     1f
1122 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1123   BEGIN_FTR_SECTION_NESTED(69)
1124         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1125          * transaction), go do TM stuff
1126          */
1127         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1128         bne-    2f
1129   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1130 #endif
1131         bl      .load_up_altivec
1132         b       fast_exception_return
1133 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1134 2:      /* User process was in a transaction */
1135         bl      .save_nvgprs
1136         DISABLE_INTS
1137         addi    r3,r1,STACK_FRAME_OVERHEAD
1138         bl      .altivec_unavailable_tm
1139         b       .ret_from_except
1140 #endif
1142 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1143 #endif
1144         bl      .save_nvgprs
1145         DISABLE_INTS
1146         addi    r3,r1,STACK_FRAME_OVERHEAD
1147         bl      .altivec_unavailable_exception
1148         b       .ret_from_except
1150         .align  7
1151         .globl vsx_unavailable_common
1152 vsx_unavailable_common:
1153         EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1154 #ifdef CONFIG_VSX
1155 BEGIN_FTR_SECTION
1156         beq     1f
1157 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158   BEGIN_FTR_SECTION_NESTED(69)
1159         /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1160          * transaction), go do TM stuff
1161          */
1162         rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1163         bne-    2f
1164   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1165 #endif
1166         b       .load_up_vsx
1167 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1168 2:      /* User process was in a transaction */
1169         bl      .save_nvgprs
1170         DISABLE_INTS
1171         addi    r3,r1,STACK_FRAME_OVERHEAD
1172         bl      .vsx_unavailable_tm
1173         b       .ret_from_except
1174 #endif
1176 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1177 #endif
1178         bl      .save_nvgprs
1179         DISABLE_INTS
1180         addi    r3,r1,STACK_FRAME_OVERHEAD
1181         bl      .vsx_unavailable_exception
1182         b       .ret_from_except
1184         STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1185         STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
1187         .align  7
1188         .globl  __end_handlers
1189 __end_handlers:
1191         /* Equivalents to the above handlers for relocation-on interrupt vectors */
1192         STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1193         MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1195         STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1196         STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1197         STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1198         STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1199         STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1201 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1203  * Data area reserved for FWNMI option.
1204  * This address (0x7000) is fixed by the RPA.
1205  */
1206         .= 0x7000
1207         .globl fwnmi_data_area
1208 fwnmi_data_area:
1210         /* pseries and powernv need to keep the whole page from
1211          * 0x7000 to 0x8000 free for use by the firmware
1212          */
1213         . = 0x8000
1214 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1216 /* Space for CPU0's segment table */
1217         .balign 4096
1218         .globl initial_stab
1219 initial_stab:
1220         .space  4096
1222 #ifdef CONFIG_PPC_POWERNV
1223 _GLOBAL(opal_mc_secondary_handler)
1224         HMT_MEDIUM_PPR_DISCARD
1225         SET_SCRATCH0(r13)
1226         GET_PACA(r13)
1227         clrldi  r3,r3,2
1228         tovirt(r3,r3)
1229         std     r3,PACA_OPAL_MC_EVT(r13)
1230         ld      r13,OPAL_MC_SRR0(r3)
1231         mtspr   SPRN_SRR0,r13
1232         ld      r13,OPAL_MC_SRR1(r3)
1233         mtspr   SPRN_SRR1,r13
1234         ld      r3,OPAL_MC_GPR3(r3)
1235         GET_SCRATCH0(r13)
1236         b       machine_check_pSeries
1237 #endif /* CONFIG_PPC_POWERNV */
1241  * r13 points to the PACA, r9 contains the saved CR,
1242  * r12 contain the saved SRR1, SRR0 is still ready for return
1243  * r3 has the faulting address
1244  * r9 - r13 are saved in paca->exslb.
1245  * r3 is saved in paca->slb_r3
1246  * We assume we aren't going to take any exceptions during this procedure.
1247  */
1248 _GLOBAL(slb_miss_realmode)
1249         mflr    r10
1250 #ifdef CONFIG_RELOCATABLE
1251         mtctr   r11
1252 #endif
1254         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1255         std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
1257         bl      .slb_allocate_realmode
1259         /* All done -- return from exception. */
1261         ld      r10,PACA_EXSLB+EX_LR(r13)
1262         ld      r3,PACA_EXSLB+EX_R3(r13)
1263         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1265         mtlr    r10
1267         andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
1268         beq-    2f
1270 .machine        push
1271 .machine        "power4"
1272         mtcrf   0x80,r9
1273         mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
1274 .machine        pop
1276         RESTORE_PPR_PACA(PACA_EXSLB, r9)
1277         ld      r9,PACA_EXSLB+EX_R9(r13)
1278         ld      r10,PACA_EXSLB+EX_R10(r13)
1279         ld      r11,PACA_EXSLB+EX_R11(r13)
1280         ld      r12,PACA_EXSLB+EX_R12(r13)
1281         ld      r13,PACA_EXSLB+EX_R13(r13)
1282         rfid
1283         b       .       /* prevent speculative execution */
1285 2:      mfspr   r11,SPRN_SRR0
1286         ld      r10,PACAKBASE(r13)
1287         LOAD_HANDLER(r10,unrecov_slb)
1288         mtspr   SPRN_SRR0,r10
1289         ld      r10,PACAKMSR(r13)
1290         mtspr   SPRN_SRR1,r10
1291         rfid
1292         b       .
1294 unrecov_slb:
1295         EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1296         DISABLE_INTS
1297         bl      .save_nvgprs
1298 1:      addi    r3,r1,STACK_FRAME_OVERHEAD
1299         bl      .unrecoverable_exception
1300         b       1b
1303 #ifdef CONFIG_PPC_970_NAP
1304 power4_fixup_nap:
1305         andc    r9,r9,r10
1306         std     r9,TI_LOCAL_FLAGS(r11)
1307         ld      r10,_LINK(r1)           /* make idle task do the */
1308         std     r10,_NIP(r1)            /* equivalent of a blr */
1309         blr
1310 #endif
1313  * Hash table stuff
1314  */
1315         .align  7
1316 _STATIC(do_hash_page)
1317         std     r3,_DAR(r1)
1318         std     r4,_DSISR(r1)
1320         andis.  r0,r4,0xa410            /* weird error? */
1321         bne-    handle_page_fault       /* if not, try to insert a HPTE */
1322         andis.  r0,r4,DSISR_DABRMATCH@h
1323         bne-    handle_dabr_fault
1325 BEGIN_FTR_SECTION
1326         andis.  r0,r4,0x0020            /* Is it a segment table fault? */
1327         bne-    do_ste_alloc            /* If so handle it */
1328 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1330         CURRENT_THREAD_INFO(r11, r1)
1331         lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
1332         andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
1333         bne     77f                     /* then don't call hash_page now */
1334         /*
1335          * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1336          * accessing a userspace segment (even from the kernel). We assume
1337          * kernel addresses always have the high bit set.
1338          */
1339         rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1340         rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
1341         orc     r0,r12,r0               /* MSR_PR | ~high_bit */
1342         rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
1343         ori     r4,r4,1                 /* add _PAGE_PRESENT */
1344         rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
1346         /*
1347          * r3 contains the faulting address
1348          * r4 contains the required access permissions
1349          * r5 contains the trap number
1350          *
1351          * at return r3 = 0 for success, 1 for page fault, negative for error
1352          */
1353         bl      .hash_page              /* build HPTE if possible */
1354         cmpdi   r3,0                    /* see if hash_page succeeded */
1356         /* Success */
1357         beq     fast_exc_return_irq     /* Return from exception on success */
1359         /* Error */
1360         blt-    13f
1362 /* Here we have a page fault that hash_page can't handle. */
1363 handle_page_fault:
1364 11:     ld      r4,_DAR(r1)
1365         ld      r5,_DSISR(r1)
1366         addi    r3,r1,STACK_FRAME_OVERHEAD
1367         bl      .do_page_fault
1368         cmpdi   r3,0
1369         beq+    12f
1370         bl      .save_nvgprs
1371         mr      r5,r3
1372         addi    r3,r1,STACK_FRAME_OVERHEAD
1373         lwz     r4,_DAR(r1)
1374         bl      .bad_page_fault
1375         b       .ret_from_except
1377 /* We have a data breakpoint exception - handle it */
1378 handle_dabr_fault:
1379         bl      .save_nvgprs
1380         ld      r4,_DAR(r1)
1381         ld      r5,_DSISR(r1)
1382         addi    r3,r1,STACK_FRAME_OVERHEAD
1383         bl      .do_break
1384 12:     b       .ret_from_except_lite
1387 /* We have a page fault that hash_page could handle but HV refused
1388  * the PTE insertion
1389  */
1390 13:     bl      .save_nvgprs
1391         mr      r5,r3
1392         addi    r3,r1,STACK_FRAME_OVERHEAD
1393         ld      r4,_DAR(r1)
1394         bl      .low_hash_fault
1395         b       .ret_from_except
1398  * We come here as a result of a DSI at a point where we don't want
1399  * to call hash_page, such as when we are accessing memory (possibly
1400  * user memory) inside a PMU interrupt that occurred while interrupts
1401  * were soft-disabled.  We want to invoke the exception handler for
1402  * the access, or panic if there isn't a handler.
1403  */
1404 77:     bl      .save_nvgprs
1405         mr      r4,r3
1406         addi    r3,r1,STACK_FRAME_OVERHEAD
1407         li      r5,SIGSEGV
1408         bl      .bad_page_fault
1409         b       .ret_from_except
1411         /* here we have a segment miss */
1412 do_ste_alloc:
1413         bl      .ste_allocate           /* try to insert stab entry */
1414         cmpdi   r3,0
1415         bne-    handle_page_fault
1416         b       fast_exception_return
1419  * r13 points to the PACA, r9 contains the saved CR,
1420  * r11 and r12 contain the saved SRR0 and SRR1.
1421  * r9 - r13 are saved in paca->exslb.
1422  * We assume we aren't going to take any exceptions during this procedure.
1423  * We assume (DAR >> 60) == 0xc.
1424  */
1425         .align  7
1426 _GLOBAL(do_stab_bolted)
1427         stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1428         std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
1429         mfspr   r11,SPRN_DAR                    /* ea */
1431         /*
1432          * check for bad kernel/user address
1433          * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1434          */
1435         rldicr. r9,r11,4,(63 - 46 - 4)
1436         li      r9,0    /* VSID = 0 for bad address */
1437         bne-    0f
1439         /*
1440          * Calculate VSID:
1441          * This is the kernel vsid, we take the top for context from
1442          * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1443          * Here we know that (ea >> 60) == 0xc
1444          */
1445         lis     r9,(MAX_USER_CONTEXT + 1)@ha
1446         addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
1448         srdi    r10,r11,SID_SHIFT
1449         rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1450         ASM_VSID_SCRAMBLE(r10, r9, 256M)
1451         rldic   r9,r10,12,16    /* r9 = vsid << 12 */
1454         /* Hash to the primary group */
1455         ld      r10,PACASTABVIRT(r13)
1456         srdi    r11,r11,SID_SHIFT
1457         rldimi  r10,r11,7,52    /* r10 = first ste of the group */
1459         /* Search the primary group for a free entry */
1460 1:      ld      r11,0(r10)      /* Test valid bit of the current ste    */
1461         andi.   r11,r11,0x80
1462         beq     2f
1463         addi    r10,r10,16
1464         andi.   r11,r10,0x70
1465         bne     1b
1467         /* Stick for only searching the primary group for now.          */
1468         /* At least for now, we use a very simple random castout scheme */
1469         /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
1470         mftb    r11
1471         rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
1472         ori     r11,r11,0x10
1474         /* r10 currently points to an ste one past the group of interest */
1475         /* make it point to the randomly selected entry                 */
1476         subi    r10,r10,128
1477         or      r10,r10,r11     /* r10 is the entry to invalidate       */
1479         isync                   /* mark the entry invalid               */
1480         ld      r11,0(r10)
1481         rldicl  r11,r11,56,1    /* clear the valid bit */
1482         rotldi  r11,r11,8
1483         std     r11,0(r10)
1484         sync
1486         clrrdi  r11,r11,28      /* Get the esid part of the ste         */
1487         slbie   r11
1489 2:      std     r9,8(r10)       /* Store the vsid part of the ste       */
1490         eieio
1492         mfspr   r11,SPRN_DAR            /* Get the new esid                     */
1493         clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
1494         ori     r11,r11,0x90    /* Turn on valid and kp                 */
1495         std     r11,0(r10)      /* Put new entry back into the stab     */
1497         sync
1499         /* All done -- return from exception. */
1500         lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1501         ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
1503         andi.   r10,r12,MSR_RI
1504         beq-    unrecov_slb
1506         mtcrf   0x80,r9                 /* restore CR */
1508         mfmsr   r10
1509         clrrdi  r10,r10,2
1510         mtmsrd  r10,1
1512         mtspr   SPRN_SRR0,r11
1513         mtspr   SPRN_SRR1,r12
1514         ld      r9,PACA_EXSLB+EX_R9(r13)
1515         ld      r10,PACA_EXSLB+EX_R10(r13)
1516         ld      r11,PACA_EXSLB+EX_R11(r13)
1517         ld      r12,PACA_EXSLB+EX_R12(r13)
1518         ld      r13,PACA_EXSLB+EX_R13(r13)
1519         rfid
1520         b       .       /* prevent speculative execution */