2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
7 * This file is meant to be #included from head_64.S due to
8 * position dependant assembly.
10 * Most of this originates from head_64.S and thus has the same
15 #include <asm/exception-64s.h>
18 * We layout physical memory as follows:
19 * 0x0000 - 0x00ff : Secondary processor spin code
20 * 0x0100 - 0x2fff : pSeries Interrupt prologs
21 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
22 * 0x6000 - 0x6fff : Initial (CPU0) segment table
23 * 0x7000 - 0x7fff : FWNMI data area
24 * 0x8000 - : Early init and support code
28 * This is the start of the interrupt handlers for pSeries
29 * This code runs with relocation off.
30 * Code from here to __end_interrupts gets copied down to real
31 * address 0x100 when we are running a relocatable kernel.
32 * Therefore any relative branches in this section must only
33 * branch to labels in this section.
36 .globl __start_interrupts
39 STD_EXCEPTION_PSERIES(0x100, system_reset)
42 _machine_check_pSeries:
45 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
46 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
49 .globl data_access_pSeries
53 mtspr SPRN_SPRG_SCRATCH0,r13
55 mfspr r13,SPRN_SPRG_PACA
56 std r9,PACA_EXSLB+EX_R9(r13)
57 std r10,PACA_EXSLB+EX_R10(r13)
64 beq do_stab_bolted_pSeries
65 ld r10,PACA_EXSLB+EX_R10(r13)
66 std r11,PACA_EXGEN+EX_R11(r13)
67 ld r11,PACA_EXSLB+EX_R9(r13)
68 std r12,PACA_EXGEN+EX_R12(r13)
69 mfspr r12,SPRN_SPRG_SCRATCH0
70 std r10,PACA_EXGEN+EX_R10(r13)
71 std r11,PACA_EXGEN+EX_R9(r13)
72 std r12,PACA_EXGEN+EX_R13(r13)
73 EXCEPTION_PROLOG_PSERIES_1(data_access_common)
75 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
76 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
79 .globl data_access_slb_pSeries
80 data_access_slb_pSeries:
83 mtspr SPRN_SPRG_SCRATCH0,r13
84 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
85 std r3,PACA_EXSLB+EX_R3(r13)
87 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
90 /* Keep that around for when we re-implement dynamic VSIDs */
92 bge slb_miss_user_pseries
93 #endif /* __DISABLED__ */
94 std r10,PACA_EXSLB+EX_R10(r13)
95 std r11,PACA_EXSLB+EX_R11(r13)
96 std r12,PACA_EXSLB+EX_R12(r13)
97 mfspr r10,SPRN_SPRG_SCRATCH0
98 std r10,PACA_EXSLB+EX_R13(r13)
99 mfspr r12,SPRN_SRR1 /* and SRR1 */
100 #ifndef CONFIG_RELOCATABLE
104 * We can't just use a direct branch to .slb_miss_realmode
105 * because the distance from here to there depends on where
106 * the kernel ends up being put.
109 ld r10,PACAKBASE(r13)
110 LOAD_HANDLER(r10, .slb_miss_realmode)
115 STD_EXCEPTION_PSERIES(0x400, instruction_access)
118 .globl instruction_access_slb_pSeries
119 instruction_access_slb_pSeries:
122 mtspr SPRN_SPRG_SCRATCH0,r13
123 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
124 std r3,PACA_EXSLB+EX_R3(r13)
125 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
126 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
129 /* Keep that around for when we re-implement dynamic VSIDs */
131 bge slb_miss_user_pseries
132 #endif /* __DISABLED__ */
133 std r10,PACA_EXSLB+EX_R10(r13)
134 std r11,PACA_EXSLB+EX_R11(r13)
135 std r12,PACA_EXSLB+EX_R12(r13)
136 mfspr r10,SPRN_SPRG_SCRATCH0
137 std r10,PACA_EXSLB+EX_R13(r13)
138 mfspr r12,SPRN_SRR1 /* and SRR1 */
139 #ifndef CONFIG_RELOCATABLE
143 ld r10,PACAKBASE(r13)
144 LOAD_HANDLER(r10, .slb_miss_realmode)
149 MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
150 STD_EXCEPTION_PSERIES(0x600, alignment)
151 STD_EXCEPTION_PSERIES(0x700, program_check)
152 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
153 MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
154 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
155 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
158 .globl system_call_pSeries
165 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
167 mfspr r13,SPRN_SPRG_PACA
169 ld r12,PACAKBASE(r13)
171 LOAD_HANDLER(r12, system_call_entry)
176 b . /* prevent speculative execution */
178 /* Fast LE/BE switch system call */
179 1: mfspr r12,SPRN_SRR1
182 rfid /* return to userspace */
185 STD_EXCEPTION_PSERIES(0xd00, single_step)
186 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
188 /* We need to deal with the Altivec unavailable exception
189 * here which is at 0xf20, thus in the middle of the
190 * prolog code of the PerformanceMonitor one. A little
191 * trickery is thus necessary
193 performance_monitor_pSeries_1:
196 b performance_monitor_pSeries
198 altivec_unavailable_pSeries_1:
201 b altivec_unavailable_pSeries
203 vsx_unavailable_pSeries_1:
206 b vsx_unavailable_pSeries
208 #ifdef CONFIG_CBE_RAS
209 HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
210 #endif /* CONFIG_CBE_RAS */
211 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
212 #ifdef CONFIG_CBE_RAS
213 HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
214 #endif /* CONFIG_CBE_RAS */
215 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
216 #ifdef CONFIG_CBE_RAS
217 HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
218 #endif /* CONFIG_CBE_RAS */
222 /*** pSeries interrupt support ***/
224 /* moved from 0xf00 */
225 STD_EXCEPTION_PSERIES(., performance_monitor)
226 STD_EXCEPTION_PSERIES(., altivec_unavailable)
227 STD_EXCEPTION_PSERIES(., vsx_unavailable)
230 * An interrupt came in while soft-disabled; clear EE in SRR1,
231 * clear paca->hard_enabled and return.
234 stb r10,PACAHARDIRQEN(r13)
236 ld r9,PACA_EXGEN+EX_R9(r13)
238 rldicl r10,r10,48,1 /* clear MSR_EE */
241 ld r10,PACA_EXGEN+EX_R10(r13)
242 mfspr r13,SPRN_SPRG_SCRATCH0
247 do_stab_bolted_pSeries:
248 std r11,PACA_EXSLB+EX_R11(r13)
249 std r12,PACA_EXSLB+EX_R12(r13)
250 mfspr r10,SPRN_SPRG_SCRATCH0
251 std r10,PACA_EXSLB+EX_R13(r13)
252 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted)
254 #ifdef CONFIG_PPC_PSERIES
256 * Vectors for the FWNMI option. Share common code.
258 .globl system_reset_fwnmi
262 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
263 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
265 .globl machine_check_fwnmi
269 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
270 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
272 #endif /* CONFIG_PPC_PSERIES */
276 * This is used for when the SLB miss handler has to go virtual,
277 * which doesn't happen for now anymore but will once we re-implement
278 * dynamic VSIDs for shared page tables
280 slb_miss_user_pseries:
281 std r10,PACA_EXGEN+EX_R10(r13)
282 std r11,PACA_EXGEN+EX_R11(r13)
283 std r12,PACA_EXGEN+EX_R12(r13)
284 mfspr r10,SPRG_SCRATCH0
285 ld r11,PACA_EXSLB+EX_R9(r13)
286 ld r12,PACA_EXSLB+EX_R3(r13)
287 std r10,PACA_EXGEN+EX_R13(r13)
288 std r11,PACA_EXGEN+EX_R9(r13)
289 std r12,PACA_EXGEN+EX_R3(r13)
292 mfspr r11,SRR0 /* save SRR0 */
293 ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
294 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
296 mfspr r12,SRR1 /* and SRR1 */
299 b . /* prevent spec. execution */
300 #endif /* __DISABLED__ */
303 .globl __end_interrupts
307 * Code from here down to __end_handlers is invoked from the
308 * exception prologs above. Because the prologs assemble the
309 * addresses of these handlers using the LOAD_HANDLER macro,
310 * which uses an addi instruction, these handlers must be in
311 * the first 32k of the kernel image.
314 /*** Common interrupt handlers ***/
316 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
319 * Machine check is different because we use a different
320 * save area: PACA_EXMC instead of PACA_EXGEN.
323 .globl machine_check_common
324 machine_check_common:
325 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
329 addi r3,r1,STACK_FRAME_OVERHEAD
330 bl .machine_check_exception
333 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
334 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
335 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
336 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
337 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
338 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
339 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
340 #ifdef CONFIG_ALTIVEC
341 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
343 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
345 #ifdef CONFIG_CBE_RAS
346 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
347 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
348 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
349 #endif /* CONFIG_CBE_RAS */
356 * Here we have detected that the kernel stack pointer is bad.
357 * R9 contains the saved CR, r13 points to the paca,
358 * r10 contains the (bad) kernel stack pointer,
359 * r11 and r12 contain the saved SRR0 and SRR1.
360 * We switch to using an emergency stack, save the registers there,
361 * and call kernel_bad_stack(), which panics.
364 ld r1,PACAEMERGSP(r13)
365 subi r1,r1,64+INT_FRAME_SIZE
386 lhz r12,PACA_TRAP_SAVE(r13)
388 addi r11,r1,INT_FRAME_SIZE
393 1: addi r3,r1,STACK_FRAME_OVERHEAD
398 * Here r13 points to the paca, r9 contains the saved CR,
399 * SRR0 and SRR1 are saved in r11 and r12,
400 * r9 - r13 are saved in paca->exgen.
403 .globl data_access_common
406 std r10,PACA_EXGEN+EX_DAR(r13)
408 stw r10,PACA_EXGEN+EX_DSISR(r13)
409 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
410 ld r3,PACA_EXGEN+EX_DAR(r13)
411 lwz r4,PACA_EXGEN+EX_DSISR(r13)
413 b .do_hash_page /* Try to handle as hpte fault */
416 .globl instruction_access_common
417 instruction_access_common:
418 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
422 b .do_hash_page /* Try to handle as hpte fault */
425 * Here is the common SLB miss user that is used when going to virtual
426 * mode for SLB misses, that is currently not used
430 .globl slb_miss_user_common
431 slb_miss_user_common:
433 std r3,PACA_EXGEN+EX_DAR(r13)
434 stw r9,PACA_EXGEN+EX_CCR(r13)
435 std r10,PACA_EXGEN+EX_LR(r13)
436 std r11,PACA_EXGEN+EX_SRR0(r13)
437 bl .slb_allocate_user
439 ld r10,PACA_EXGEN+EX_LR(r13)
440 ld r3,PACA_EXGEN+EX_R3(r13)
441 lwz r9,PACA_EXGEN+EX_CCR(r13)
442 ld r11,PACA_EXGEN+EX_SRR0(r13)
446 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
447 beq- unrecov_user_slb
455 clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
461 ld r9,PACA_EXGEN+EX_R9(r13)
462 ld r10,PACA_EXGEN+EX_R10(r13)
463 ld r11,PACA_EXGEN+EX_R11(r13)
464 ld r12,PACA_EXGEN+EX_R12(r13)
465 ld r13,PACA_EXGEN+EX_R13(r13)
470 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
471 ld r4,PACA_EXGEN+EX_DAR(r13)
478 EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
481 1: addi r3,r1,STACK_FRAME_OVERHEAD
482 bl .unrecoverable_exception
485 #endif /* __DISABLED__ */
489 * r13 points to the PACA, r9 contains the saved CR,
490 * r12 contain the saved SRR1, SRR0 is still ready for return
491 * r3 has the faulting address
492 * r9 - r13 are saved in paca->exslb.
493 * r3 is saved in paca->slb_r3
494 * We assume we aren't going to take any exceptions during this procedure.
496 _GLOBAL(slb_miss_realmode)
498 #ifdef CONFIG_RELOCATABLE
502 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
503 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
505 bl .slb_allocate_realmode
507 /* All done -- return from exception. */
509 ld r10,PACA_EXSLB+EX_LR(r13)
510 ld r3,PACA_EXSLB+EX_R3(r13)
511 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
512 #ifdef CONFIG_PPC_ISERIES
514 ld r11,PACALPPACAPTR(r13)
515 ld r11,LPPACASRR0(r11) /* get SRR0 value */
516 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
517 #endif /* CONFIG_PPC_ISERIES */
521 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
527 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
530 #ifdef CONFIG_PPC_ISERIES
534 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
535 #endif /* CONFIG_PPC_ISERIES */
536 ld r9,PACA_EXSLB+EX_R9(r13)
537 ld r10,PACA_EXSLB+EX_R10(r13)
538 ld r11,PACA_EXSLB+EX_R11(r13)
539 ld r12,PACA_EXSLB+EX_R12(r13)
540 ld r13,PACA_EXSLB+EX_R13(r13)
542 b . /* prevent speculative execution */
545 #ifdef CONFIG_PPC_ISERIES
548 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
549 #endif /* CONFIG_PPC_ISERIES */
551 ld r10,PACAKBASE(r13)
552 LOAD_HANDLER(r10,unrecov_slb)
560 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
563 1: addi r3,r1,STACK_FRAME_OVERHEAD
564 bl .unrecoverable_exception
568 .globl hardware_interrupt_common
569 .globl hardware_interrupt_entry
570 hardware_interrupt_common:
571 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
573 hardware_interrupt_entry:
576 bl .ppc64_runlatch_on
577 END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
578 addi r3,r1,STACK_FRAME_OVERHEAD
580 b .ret_from_except_lite
582 #ifdef CONFIG_PPC_970_NAP
585 std r9,TI_LOCAL_FLAGS(r11)
586 ld r10,_LINK(r1) /* make idle task do the */
587 std r10,_NIP(r1) /* equivalent of a blr */
592 .globl alignment_common
595 std r10,PACA_EXGEN+EX_DAR(r13)
597 stw r10,PACA_EXGEN+EX_DSISR(r13)
598 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
599 ld r3,PACA_EXGEN+EX_DAR(r13)
600 lwz r4,PACA_EXGEN+EX_DSISR(r13)
604 addi r3,r1,STACK_FRAME_OVERHEAD
606 bl .alignment_exception
610 .globl program_check_common
611 program_check_common:
612 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
614 addi r3,r1,STACK_FRAME_OVERHEAD
616 bl .program_check_exception
620 .globl fp_unavailable_common
621 fp_unavailable_common:
622 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
623 bne 1f /* if from user, just load it up */
625 addi r3,r1,STACK_FRAME_OVERHEAD
627 bl .kernel_fp_unavailable_exception
630 b fast_exception_return
633 .globl altivec_unavailable_common
634 altivec_unavailable_common:
635 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
636 #ifdef CONFIG_ALTIVEC
640 b fast_exception_return
642 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
645 addi r3,r1,STACK_FRAME_OVERHEAD
647 bl .altivec_unavailable_exception
651 .globl vsx_unavailable_common
652 vsx_unavailable_common:
653 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
658 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
661 addi r3,r1,STACK_FRAME_OVERHEAD
663 bl .vsx_unavailable_exception
667 .globl __end_handlers
671 * Return from an exception with minimal checks.
672 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
673 * If interrupts have been enabled, or anything has been
674 * done that might have changed the scheduling status of
675 * any task or sent any task a signal, you should use
676 * ret_from_except or ret_from_except_lite instead of this.
678 fast_exc_return_irq: /* restores irq state too */
680 TRACE_AND_RESTORE_IRQ(r3);
682 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
683 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
686 .globl fast_exception_return
687 fast_exception_return:
690 andi. r3,r12,MSR_RI /* check if RI is set */
693 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
696 ACCOUNT_CPU_USER_EXIT(r3, r4)
712 rldicl r10,r10,48,1 /* clear EE */
713 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
721 b . /* prevent speculative execution */
725 1: addi r3,r1,STACK_FRAME_OVERHEAD
726 bl .unrecoverable_exception
734 _STATIC(do_hash_page)
738 andis. r0,r4,0xa450 /* weird error? */
739 bne- handle_page_fault /* if not, try to insert a HPTE */
741 andis. r0,r4,0x0020 /* Is it a segment table fault? */
742 bne- do_ste_alloc /* If so handle it */
743 END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
745 clrrdi r11,r1,THREAD_SHIFT
746 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
747 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
748 bne 77f /* then don't call hash_page now */
751 * On iSeries, we soft-disable interrupts here, then
752 * hard-enable interrupts so that the hash_page code can spin on
753 * the hash_table_lock without problems on a shared processor.
758 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
759 * and will clobber volatile registers when irq tracing is enabled
760 * so we need to reload them. It may be possible to be smarter here
761 * and move the irq tracing elsewhere but let's keep it simple for
764 #ifdef CONFIG_TRACE_IRQFLAGS
770 #endif /* CONFIG_TRACE_IRQFLAGS */
772 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
773 * accessing a userspace segment (even from the kernel). We assume
774 * kernel addresses always have the high bit set.
776 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
777 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
778 orc r0,r12,r0 /* MSR_PR | ~high_bit */
779 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
780 ori r4,r4,1 /* add _PAGE_PRESENT */
781 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
784 * r3 contains the faulting address
785 * r4 contains the required access permissions
786 * r5 contains the trap number
788 * at return r3 = 0 for success
790 bl .hash_page /* build HPTE if possible */
791 cmpdi r3,0 /* see if hash_page succeeded */
795 * If we had interrupts soft-enabled at the point where the
796 * DSI/ISI occurred, and an interrupt came in during hash_page,
798 * We jump to ret_from_except_lite rather than fast_exception_return
799 * because ret_from_except_lite will check for and handle pending
800 * interrupts if necessary.
803 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
807 * Here we have interrupts hard-disabled, so it is sufficient
808 * to restore paca->{soft,hard}_enable and get out.
810 beq fast_exc_return_irq /* Return from exception on success */
811 END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
813 /* For a hash failure, we don't bother re-enabling interrupts */
817 * hash_page couldn't handle it, set soft interrupt enable back
818 * to what it was before the trap. Note that .raw_local_irq_restore
819 * handles any interrupts pending at this point.
822 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
823 bl .raw_local_irq_restore
826 /* Here we have a page fault that hash_page can't handle. */
831 addi r3,r1,STACK_FRAME_OVERHEAD
837 addi r3,r1,STACK_FRAME_OVERHEAD
842 13: b .ret_from_except_lite
844 /* We have a page fault that hash_page could handle but HV refused
849 addi r3,r1,STACK_FRAME_OVERHEAD
855 * We come here as a result of a DSI at a point where we don't want
856 * to call hash_page, such as when we are accessing memory (possibly
857 * user memory) inside a PMU interrupt that occurred while interrupts
858 * were soft-disabled. We want to invoke the exception handler for
859 * the access, or panic if there isn't a handler.
863 addi r3,r1,STACK_FRAME_OVERHEAD
868 /* here we have a segment miss */
870 bl .ste_allocate /* try to insert stab entry */
872 bne- handle_page_fault
873 b fast_exception_return
876 * r13 points to the PACA, r9 contains the saved CR,
877 * r11 and r12 contain the saved SRR0 and SRR1.
878 * r9 - r13 are saved in paca->exslb.
879 * We assume we aren't going to take any exceptions during this procedure.
880 * We assume (DAR >> 60) == 0xc.
883 _GLOBAL(do_stab_bolted)
884 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
885 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
887 /* Hash to the primary group */
888 ld r10,PACASTABVIRT(r13)
891 rldimi r10,r11,7,52 /* r10 = first ste of the group */
894 /* This is a kernel address, so protovsid = ESID */
895 ASM_VSID_SCRAMBLE(r11, r9, 256M)
896 rldic r9,r11,12,16 /* r9 = vsid << 12 */
898 /* Search the primary group for a free entry */
899 1: ld r11,0(r10) /* Test valid bit of the current ste */
906 /* Stick for only searching the primary group for now. */
907 /* At least for now, we use a very simple random castout scheme */
908 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
910 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
913 /* r10 currently points to an ste one past the group of interest */
914 /* make it point to the randomly selected entry */
916 or r10,r10,r11 /* r10 is the entry to invalidate */
918 isync /* mark the entry invalid */
920 rldicl r11,r11,56,1 /* clear the valid bit */
925 clrrdi r11,r11,28 /* Get the esid part of the ste */
928 2: std r9,8(r10) /* Store the vsid part of the ste */
931 mfspr r11,SPRN_DAR /* Get the new esid */
932 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
933 ori r11,r11,0x90 /* Turn on valid and kp */
934 std r11,0(r10) /* Put new entry back into the stab */
938 /* All done -- return from exception. */
939 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
940 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
945 mtcrf 0x80,r9 /* restore CR */
953 ld r9,PACA_EXSLB+EX_R9(r13)
954 ld r10,PACA_EXSLB+EX_R10(r13)
955 ld r11,PACA_EXSLB+EX_R11(r13)
956 ld r12,PACA_EXSLB+EX_R12(r13)
957 ld r13,PACA_EXSLB+EX_R13(r13)
959 b . /* prevent speculative execution */
962 * Space for CPU0's segment table.
964 * On iSeries, the hypervisor must fill in at least one entry before
965 * we get control (with relocate on). The address is given to the hv
966 * as a page number (see xLparMap below), so this must be at a
967 * fixed address (the linker can't compute (u64)&initial_stab >>
970 . = STAB0_OFFSET /* 0x6000 */
975 #ifdef CONFIG_PPC_PSERIES
977 * Data area reserved for FWNMI option.
978 * This address (0x7000) is fixed by the RPA.
981 .globl fwnmi_data_area
983 #endif /* CONFIG_PPC_PSERIES */
985 /* iSeries does not use the FWNMI stuff, so it is safe to put
986 * this here, even if we later allow kernels that will boot on
987 * both pSeries and iSeries */
988 #ifdef CONFIG_PPC_ISERIES
992 .quad HvEsidsToMap /* xNumberEsids */
993 .quad HvRangesToMap /* xNumberRanges */
994 .quad STAB0_PAGE /* xSegmentTableOffs */
996 /* xEsids (HvEsidsToMap entries of 2 quads) */
997 .quad PAGE_OFFSET_ESID /* xKernelEsid */
998 .quad PAGE_OFFSET_VSID /* xKernelVsid */
999 .quad VMALLOC_START_ESID /* xKernelEsid */
1000 .quad VMALLOC_START_VSID /* xKernelVsid */
1001 /* xRanges (HvRangesToMap entries of 3 quads) */
1002 .quad HvPagesToMap /* xPages */
1003 .quad 0 /* xOffset */
1004 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1006 #endif /* CONFIG_PPC_ISERIES */
1008 #ifdef CONFIG_PPC_PSERIES
1010 #endif /* CONFIG_PPC_PSERIES */