1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
23 #include <asm/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
37 /* 601 only have IBAT */
38 #ifdef CONFIG_PPC_BOOK3S_601
39 #define LOAD_BAT(n, reg, RA, RB) \
41 mtspr SPRN_IBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB
47 #define LOAD_BAT(n, reg, RA, RB) \
48 /* see the comment for clear_bats() -- Cort */ \
50 mtspr SPRN_IBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##U,RA; \
52 lwz RA,(n*16)+0(reg); \
53 lwz RB,(n*16)+4(reg); \
54 mtspr SPRN_IBAT##n##U,RA; \
55 mtspr SPRN_IBAT##n##L,RB; \
56 lwz RA,(n*16)+8(reg); \
57 lwz RB,(n*16)+12(reg); \
58 mtspr SPRN_DBAT##n##U,RA; \
59 mtspr SPRN_DBAT##n##L,RB
63 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
64 .stabs "head_32.S",N_SO,0,0,0f
69 * _start is defined this way because the XCOFF loader in the OpenFirmware
70 * on the powermac expects the entry point to be a procedure descriptor.
74 * These are here for legacy reasons, the kernel used to
75 * need to look like a coff function entry for the pmac
76 * but we're always started by some kind of bootloader now.
79 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
80 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
84 * Enter here with the kernel text, data and bss loaded starting at
85 * 0, running with virtual == physical mapping.
86 * r5 points to the prom entry point (the client interface handler
87 * address). Address translation is turned on, with the prom
88 * managing the hash table. Interrupts are disabled. The stack
89 * pointer (r1) points to just below the end of the half-meg region
90 * from 0x380000 - 0x400000, which is mapped in already.
92 * If we are booted from MacOS via BootX, we enter with the kernel
93 * image loaded somewhere, and the following values in registers:
94 * r3: 'BooX' (0x426f6f58)
95 * r4: virtual address of boot_infos_t
99 * This is jumped to on prep systems right after the kernel is relocated
100 * to its proper place in memory by the boot loader. The expected layout
102 * r3: ptr to residual data
103 * r4: initrd_start or if no initrd then 0
104 * r5: initrd_end - unused if r4 is 0
105 * r6: Start of command line string
106 * r7: End of command line string
108 * This just gets a minimal mmu environment setup so we can call
109 * start_here() to do the real work.
116 * We have to do any OF calls before we map ourselves to KERNELBASE,
117 * because OF may have I/O devices mapped into that area
118 * (particularly on CHRP).
123 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
124 /* find out where we are now */
126 0: mflr r8 /* r8 = runtime addr here */
127 addis r8,r8,(_stext - 0b)@ha
128 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
130 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
132 /* We never return. We also hit that trap if trying to boot
133 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
137 * Check for BootX signature when supporting PowerMac and branch to
138 * appropriate trampoline if it's present
140 #ifdef CONFIG_PPC_PMAC
147 #endif /* CONFIG_PPC_PMAC */
149 1: mr r31,r3 /* save device tree ptr */
153 * early_init() does the early machine identification and does
154 * the necessary low-level setup and clears the BSS
155 * -- Cort <cort@fsmlabs.com>
159 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
160 * the physical address we are running at, returned by early_init()
168 bl load_segment_registers
172 #if defined(CONFIG_BOOTX_TEXT)
175 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
178 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
179 bl setup_usbgecko_bat
183 * Call setup_cpu for CPU 0 and initialize 6xx Idle
187 bl call_setup_cpu /* Call setup_cpu for this CPU */
188 #ifdef CONFIG_PPC_BOOK3S_32
191 #endif /* CONFIG_PPC_BOOK3S_32 */
195 * We need to run with _start at physical address 0.
196 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
197 * the exception vectors at 0 (and therefore this copy
198 * overwrites OF's exception vectors with our own).
199 * The MMU is off at this point.
203 addis r4,r3,KERNELBASE@h /* current address of _start */
204 lis r5,PHYSICAL_START@h
205 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
208 * we now have the 1st 16M of ram mapped with the bats.
209 * prep needs the mmu to be turned on here, but pmac already has it on.
210 * this shouldn't bother the pmac since it just gets turned on again
211 * as we jump to our code at KERNELBASE. -- Cort
212 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
213 * off, and in other cases, we now turn it off before changing BATs above.
217 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
220 ori r0,r0,start_here@l
223 RFI /* enables MMU */
226 * We need __secondary_hold as a place to hold the other cpus on
227 * an SMP machine, even when we are running a UP kernel.
229 . = 0xc0 /* for prep bootloader */
230 li r3,1 /* MTX only has 1 cpu */
231 .globl __secondary_hold
233 /* tell the master we're here */
234 stw r3,__secondary_hold_acknowledge@l(0)
237 /* wait until we're told to start */
240 /* our cpu # was at addr 0 - go */
241 mr r24,r3 /* cpu # */
245 #endif /* CONFIG_SMP */
247 .globl __secondary_hold_spinloop
248 __secondary_hold_spinloop:
250 .globl __secondary_hold_acknowledge
251 __secondary_hold_acknowledge:
255 /* core99 pmac starts the seconary here by changing the vector, and
256 putting it back to what it was (unknown_exception) when done. */
257 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
261 * On CHRP, this is complicated by the fact that we could get a
262 * machine check inside RTAS, and we have no guarantee that certain
263 * critical registers will have the values we expect. The set of
264 * registers that might have bad values includes all the GPRs
265 * and all the BATs. We indicate that we are in RTAS by putting
266 * a non-zero value, the address of the exception frame to use,
267 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
268 * and uses its value if it is non-zero.
269 * (Other exception handlers assume that r1 is a valid kernel stack
270 * pointer when we take an exception from supervisor mode.)
277 #ifdef CONFIG_VMAP_STACK
278 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
282 #ifdef CONFIG_PPC_CHRP
283 mfspr r11, SPRN_SPRG_THREAD
284 tovirt_vmstack r11, r11
285 lwz r11, RTAS_SP(r11)
288 #endif /* CONFIG_PPC_CHRP */
289 EXCEPTION_PROLOG_1 for_rtas=1
290 7: EXCEPTION_PROLOG_2
291 addi r3,r1,STACK_FRAME_OVERHEAD
292 #ifdef CONFIG_PPC_CHRP
293 #ifdef CONFIG_VMAP_STACK
294 mfspr r4, SPRN_SPRG_THREAD
299 beq cr1, machine_check_tramp
300 b machine_check_in_rtas
302 b machine_check_tramp
305 /* Data access exception. */
309 #ifdef CONFIG_VMAP_STACK
310 mtspr SPRN_SPRG_SCRATCH0,r10
311 mfspr r10, SPRN_SPRG_THREAD
312 BEGIN_MMU_FTR_SECTION
314 mfspr r10, SPRN_DSISR
316 #ifdef CONFIG_PPC_KUAP
317 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
319 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
321 mfspr r10, SPRN_SPRG_THREAD
323 .Lhash_page_dsi_cont:
326 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
327 mtspr SPRN_SPRG_SCRATCH1,r11
330 mfspr r11, SPRN_DSISR
334 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
337 andi. r11, r11, MSR_PR
340 b handle_page_fault_tramp_1
341 #else /* CONFIG_VMAP_STACK */
342 EXCEPTION_PROLOG handle_dar_dsisr=1
343 get_and_save_dar_dsisr_on_stack r4, r5, r11
344 BEGIN_MMU_FTR_SECTION
345 #ifdef CONFIG_PPC_KUAP
346 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
348 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
350 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
351 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
353 b handle_page_fault_tramp_1
355 b handle_page_fault_tramp_2
356 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
357 #endif /* CONFIG_VMAP_STACK */
359 /* Instruction access exception. */
363 #ifdef CONFIG_VMAP_STACK
364 mtspr SPRN_SPRG_SCRATCH0,r10
365 mtspr SPRN_SPRG_SCRATCH1,r11
366 mfspr r10, SPRN_SPRG_THREAD
369 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
372 BEGIN_MMU_FTR_SECTION
373 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
375 .Lhash_page_isi_cont:
376 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
377 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
378 andi. r11, r11, MSR_PR
382 #else /* CONFIG_VMAP_STACK */
384 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
385 beq 1f /* if so, try to put a PTE */
386 li r3,0 /* into the hash table */
387 mr r4,r12 /* SRR0 is fault address */
388 BEGIN_MMU_FTR_SECTION
390 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
391 #endif /* CONFIG_VMAP_STACK */
393 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
395 EXC_XFER_LITE(0x400, handle_page_fault)
397 /* External interrupt */
398 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
400 /* Alignment exception */
404 EXCEPTION_PROLOG handle_dar_dsisr=1
405 save_dar_dsisr_on_stack r4, r5, r11
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 b alignment_exception_tramp
409 /* Program check exception */
410 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
412 /* Floating-point unavailable */
418 * Certain Freescale cores don't have a FPU and treat fp instructions
419 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
422 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
425 bl load_up_fpu /* if from user, just load it up */
426 b fast_exception_return
427 1: addi r3,r1,STACK_FRAME_OVERHEAD
428 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
431 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
433 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
434 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
442 /* Single step - not used on 601 */
443 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
444 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
447 * The Altivec unavailable trap is at 0x0f20. Foo.
448 * We effectively remap it to 0x3000.
449 * We include an altivec unavailable exception vector even if
450 * not configured for Altivec, so that you can't panic a
451 * non-altivec kernel running on a machine with altivec just
452 * by executing an altivec instruction.
463 * Handle TLB miss for instruction on 603/603e.
464 * Note: we get an alternate set of r0 - r3 to use automatically.
470 * r1: linux style pte ( later becomes ppc hardware pte )
471 * r2: ptr to linux-style pte
474 /* Get PTE (linux-style) and check access */
476 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
477 lis r1,PAGE_OFFSET@h /* check if kernel address */
480 mfspr r2, SPRN_SPRG_PGDIR
482 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
484 li r1,_PAGE_PRESENT | _PAGE_EXEC
486 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
488 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
489 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
491 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
492 lwz r2,0(r2) /* get pmd entry */
493 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
494 beq- InstructionAddressInvalid /* return if no mapping */
495 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
496 lwz r0,0(r2) /* get linux-style pte */
497 andc. r1,r1,r0 /* check access & ~permission */
498 bne- InstructionAddressInvalid /* return if access not permitted */
499 /* Convert linux-style PTE to low word of PPC-style PTE */
500 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
501 ori r1, r1, 0xe06 /* clear out reserved bits */
502 andc r1, r0, r1 /* PP = user? 1 : 0 */
504 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
505 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
508 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
511 InstructionAddressInvalid:
513 rlwinm r1,r3,9,6,6 /* Get load/store bit */
516 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
517 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
520 mfspr r1,SPRN_IMISS /* Get failing address */
521 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
522 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
524 mtspr SPRN_DAR,r1 /* Set fault address */
525 mfmsr r0 /* Restore "normal" registers */
526 xoris r0,r0,MSR_TGPR>>16
527 mtcrf 0x80,r3 /* Restore CR0 */
532 * Handle TLB miss for DATA Load operation on 603/603e
538 * r1: linux style pte ( later becomes ppc hardware pte )
539 * r2: ptr to linux-style pte
542 /* Get PTE (linux-style) and check access */
544 lis r1,PAGE_OFFSET@h /* check if kernel address */
546 mfspr r2, SPRN_SPRG_PGDIR
548 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
553 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
554 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
555 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
556 lwz r2,0(r2) /* get pmd entry */
557 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
558 beq- DataAddressInvalid /* return if no mapping */
559 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
560 lwz r0,0(r2) /* get linux-style pte */
561 andc. r1,r1,r0 /* check access & ~permission */
562 bne- DataAddressInvalid /* return if access not permitted */
564 * NOTE! We are assuming this is not an SMP system, otherwise
565 * we would need to update the pte atomically with lwarx/stwcx.
567 /* Convert linux-style PTE to low word of PPC-style PTE */
568 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
569 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
570 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
571 ori r1,r1,0xe04 /* clear out reserved bits */
572 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
574 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
575 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
577 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
579 BEGIN_MMU_FTR_SECTION
581 mfspr r1,SPRN_SPRG_603_LRU
582 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
586 mtspr SPRN_SPRG_603_LRU,r1
588 rlwimi r2,r0,31-14,14,14
590 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
595 rlwinm r1,r3,9,6,6 /* Get load/store bit */
598 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
600 mfspr r1,SPRN_DMISS /* Get failing address */
601 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
602 beq 20f /* Jump if big endian */
604 20: mtspr SPRN_DAR,r1 /* Set fault address */
605 mfmsr r0 /* Restore "normal" registers */
606 xoris r0,r0,MSR_TGPR>>16
607 mtcrf 0x80,r3 /* Restore CR0 */
612 * Handle TLB miss for DATA Store on 603/603e
618 * r1: linux style pte ( later becomes ppc hardware pte )
619 * r2: ptr to linux-style pte
622 /* Get PTE (linux-style) and check access */
624 lis r1,PAGE_OFFSET@h /* check if kernel address */
626 mfspr r2, SPRN_SPRG_PGDIR
628 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
630 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
633 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
634 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
635 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r0,0(r2) /* get linux-style pte */
641 andc. r1,r1,r0 /* check access & ~permission */
642 bne- DataAddressInvalid /* return if access not permitted */
644 * NOTE! We are assuming this is not an SMP system, otherwise
645 * we would need to update the pte atomically with lwarx/stwcx.
647 /* Convert linux-style PTE to low word of PPC-style PTE */
648 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
649 li r1,0xe06 /* clear out reserved bits & PP msb */
650 andc r1,r0,r1 /* PP = user? 1: 0 */
652 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
653 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
655 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
657 BEGIN_MMU_FTR_SECTION
659 mfspr r1,SPRN_SPRG_603_LRU
660 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
664 mtspr SPRN_SPRG_603_LRU,r1
666 rlwimi r2,r0,31-14,14,14
668 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
672 #ifndef CONFIG_ALTIVEC
673 #define altivec_assist_exception unknown_exception
676 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
677 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
678 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
679 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
680 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
681 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
682 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
683 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
684 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
685 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
686 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
687 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
688 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
689 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
690 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
691 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
692 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
693 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
694 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
695 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
696 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
697 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
698 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
699 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
700 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
701 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
702 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
703 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
704 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
709 EXC_XFER_STD(0x200, machine_check_exception)
711 alignment_exception_tramp:
712 EXC_XFER_STD(0x600, alignment_exception)
714 handle_page_fault_tramp_1:
715 #ifdef CONFIG_VMAP_STACK
716 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
721 handle_page_fault_tramp_2:
722 EXC_XFER_LITE(0x300, handle_page_fault)
724 #ifdef CONFIG_VMAP_STACK
725 .macro save_regs_thread thread
726 stw r0, THR0(\thread)
727 stw r3, THR3(\thread)
728 stw r4, THR4(\thread)
729 stw r5, THR5(\thread)
730 stw r6, THR6(\thread)
731 stw r8, THR8(\thread)
732 stw r9, THR9(\thread)
734 stw r0, THLR(\thread)
736 stw r0, THCTR(\thread)
739 .macro restore_regs_thread thread
740 lwz r0, THLR(\thread)
742 lwz r0, THCTR(\thread)
744 lwz r0, THR0(\thread)
745 lwz r3, THR3(\thread)
746 lwz r4, THR4(\thread)
747 lwz r5, THR5(\thread)
748 lwz r6, THR6(\thread)
749 lwz r8, THR8(\thread)
750 lwz r9, THR9(\thread)
759 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
761 mfspr r10, SPRN_SPRG_THREAD
762 restore_regs_thread r10
763 b .Lhash_page_dsi_cont
767 mfspr r10, SPRN_SPRG_THREAD
773 mfspr r10, SPRN_SPRG_THREAD
774 restore_regs_thread r10
776 b .Lhash_page_isi_cont
778 .globl fast_hash_page_return
779 fast_hash_page_return:
780 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
781 mfspr r10, SPRN_SPRG_THREAD
782 restore_regs_thread r10
788 mfspr r10, SPRN_SPRG_SCRATCH0
794 mfspr r11, SPRN_SPRG_SCRATCH1
795 mfspr r10, SPRN_SPRG_SCRATCH0
800 vmap_stack_overflow_exception
805 #ifdef CONFIG_ALTIVEC
807 bl load_up_altivec /* if from user, just load it up */
808 b fast_exception_return
809 #endif /* CONFIG_ALTIVEC */
810 1: addi r3,r1,STACK_FRAME_OVERHEAD
811 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
815 addi r3,r1,STACK_FRAME_OVERHEAD
816 EXC_XFER_STD(0xf00, performance_monitor_exception)
820 * This code is jumped to from the startup code to copy
821 * the kernel image to physical address PHYSICAL_START.
824 addis r9,r26,klimit@ha /* fetch klimit */
826 addis r25,r25,-KERNELBASE@h
827 lis r3,PHYSICAL_START@h /* Destination base address */
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
835 bl copy_and_flush /* copy the rest */
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
844 _ENTRY(copy_and_flush)
847 4: li r0,L1_CACHE_BYTES/4
849 3: addi r6,r6,4 /* copy a cache line */
853 dcbst r6,r3 /* write it to memory */
855 icbi r6,r3 /* flush the icache line */
858 sync /* additional sync needed on g4 */
865 .globl __secondary_start_mpc86xx
866 __secondary_start_mpc86xx:
868 stw r3, __secondary_hold_acknowledge@l(0)
869 mr r24, r3 /* cpu # */
872 .globl __secondary_start_pmac_0
873 __secondary_start_pmac_0:
874 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
883 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
884 set to map the 0xf0000000 - 0xffffffff region */
886 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
891 .globl __secondary_start
893 /* Copy some CPU settings from CPU 0 */
894 bl __restore_cpu_setup
898 bl call_setup_cpu /* Call setup_cpu for this CPU */
899 #ifdef CONFIG_PPC_BOOK3S_32
902 #endif /* CONFIG_PPC_BOOK3S_32 */
904 /* get current's stack and current */
905 lis r2,secondary_current@ha
907 lwz r2,secondary_current@l(r2)
909 lwz r1,TASK_STACK(r1)
912 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
917 /* load up the MMU */
918 bl load_segment_registers
921 /* ptr to phys current thread */
923 addi r4,r4,THREAD /* phys address of our thread_struct */
924 mtspr SPRN_SPRG_THREAD,r4
925 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
926 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
927 mtspr SPRN_SPRG_PGDIR, r4
929 /* enable MMU and jump to start_secondary */
931 lis r3,start_secondary@h
932 ori r3,r3,start_secondary@l
937 #endif /* CONFIG_SMP */
939 #ifdef CONFIG_KVM_BOOK3S_HANDLER
940 #include "../kvm/book3s_rmhandlers.S"
944 * Those generic dummy functions are kept for CPUs not
945 * included in CONFIG_PPC_BOOK3S_32
947 #if !defined(CONFIG_PPC_BOOK3S_32)
948 _ENTRY(__save_cpu_setup)
950 _ENTRY(__restore_cpu_setup)
952 #endif /* !defined(CONFIG_PPC_BOOK3S_32) */
955 * Load stuff into the MMU. Intended to be called with
960 sync /* Force all PTE updates to finish */
962 tlbia /* Clear all TLB entries */
963 sync /* wait for tlbia/tlbie to finish */
964 TLBSYNC /* ... on all CPUs */
965 /* Load the SDR1 register (hash table base & size) */
966 lis r6, early_hash - PAGE_OFFSET@h
967 ori r6, r6, 3 /* 256kB table */
973 sync /* Force all PTE updates to finish */
975 tlbia /* Clear all TLB entries */
976 sync /* wait for tlbia/tlbie to finish */
977 TLBSYNC /* ... on all CPUs */
978 /* Load the SDR1 register (hash table base & size) */
984 /* Load the BAT registers with the values set up by MMU_init.
985 MMU_init takes care of whether we're on a 601 or not. */
993 BEGIN_MMU_FTR_SECTION
998 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1001 load_segment_registers:
1002 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
1003 mtctr r0 /* for context 0 */
1004 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
1005 #ifdef CONFIG_PPC_KUEP
1006 oris r3, r3, SR_NX@h /* Set Nx */
1008 #ifdef CONFIG_PPC_KUAP
1009 oris r3, r3, SR_KS@h /* Set Ks */
1013 addi r3, r3, 0x111 /* increment VSID */
1014 addis r4, r4, 0x1000 /* address of next segment */
1016 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
1017 mtctr r0 /* for context 0 */
1018 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
1019 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
1020 oris r3, r3, SR_KP@h /* Kp = 1 */
1022 addi r3, r3, 0x111 /* increment VSID */
1023 addis r4, r4, 0x1000 /* address of next segment */
1028 * This is where the main kernel code starts.
1031 /* ptr to current */
1033 ori r2,r2,init_task@l
1034 /* Set up for using our exception vectors */
1035 /* ptr to phys current thread */
1037 addi r4,r4,THREAD /* init task's THREAD */
1038 mtspr SPRN_SPRG_THREAD,r4
1039 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1040 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1041 mtspr SPRN_SPRG_PGDIR, r4
1044 lis r1,init_thread_union@ha
1045 addi r1,r1,init_thread_union@l
1047 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1049 * Do early platform-specific initialization,
1050 * and set up the MMU.
1061 BEGIN_MMU_FTR_SECTION
1062 bl MMU_init_hw_patch
1063 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
1067 * Go back to running unmapped so we can load up new values
1068 * for SDR1 (hash table pointer) and the segment registers
1069 * and change to using our exception vectors.
1074 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1081 /* Load up the kernel context */
1084 #ifdef CONFIG_BDI_SWITCH
1085 /* Add helper information for the Abatron bdiGDB debugger.
1086 * We do this here because we know the mmu is disabled, and
1087 * will be enabled for real in just a few instructions.
1089 lis r5, abatron_pteptrs@h
1090 ori r5, r5, abatron_pteptrs@l
1091 stw r5, 0xf0(r0) /* This much match your Abatron config */
1092 lis r6, swapper_pg_dir@h
1093 ori r6, r6, swapper_pg_dir@l
1096 #endif /* CONFIG_BDI_SWITCH */
1098 /* Now turn on the MMU for real! */
1100 lis r3,start_kernel@h
1101 ori r3,r3,start_kernel@l
1108 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1110 * Set up the segment registers for a new context.
1112 _ENTRY(switch_mmu_context)
1113 lwz r3,MMCONTEXTID(r4)
1116 mulli r3,r3,897 /* multiply context by skew factor */
1117 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1118 #ifdef CONFIG_PPC_KUEP
1119 oris r3, r3, SR_NX@h /* Set Nx */
1121 #ifdef CONFIG_PPC_KUAP
1122 oris r3, r3, SR_KS@h /* Set Ks */
1124 li r0,NUM_USER_SEGMENTS
1128 #ifdef CONFIG_BDI_SWITCH
1129 /* Context switch the PTE pointer for the Abatron BDI2000.
1130 * The PGDIR is passed as second argument.
1132 lis r5, abatron_pteptrs@ha
1133 stw r4, abatron_pteptrs@l + 0x4(r5)
1136 mtspr SPRN_SPRG_PGDIR, r4
1141 addi r3,r3,0x111 /* next VSID */
1142 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1143 addis r4,r4,0x1000 /* address of next segment */
1149 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1151 EXPORT_SYMBOL(switch_mmu_context)
1154 * An undocumented "feature" of 604e requires that the v bit
1155 * be cleared before changing BAT values.
1157 * Also, newer IBM firmware does not clear bat3 and 4 so
1158 * this makes sure it's done.
1164 #ifndef CONFIG_PPC_BOOK3S_601
1165 mtspr SPRN_DBAT0U,r10
1166 mtspr SPRN_DBAT0L,r10
1167 mtspr SPRN_DBAT1U,r10
1168 mtspr SPRN_DBAT1L,r10
1169 mtspr SPRN_DBAT2U,r10
1170 mtspr SPRN_DBAT2L,r10
1171 mtspr SPRN_DBAT3U,r10
1172 mtspr SPRN_DBAT3L,r10
1174 mtspr SPRN_IBAT0U,r10
1175 mtspr SPRN_IBAT0L,r10
1176 mtspr SPRN_IBAT1U,r10
1177 mtspr SPRN_IBAT1L,r10
1178 mtspr SPRN_IBAT2U,r10
1179 mtspr SPRN_IBAT2L,r10
1180 mtspr SPRN_IBAT3U,r10
1181 mtspr SPRN_IBAT3L,r10
1182 BEGIN_MMU_FTR_SECTION
1183 /* Here's a tweak: at this point, CPU setup have
1184 * not been called yet, so HIGH_BAT_EN may not be
1185 * set in HID0 for the 745x processors. However, it
1186 * seems that doesn't affect our ability to actually
1187 * write to these SPRs.
1189 mtspr SPRN_DBAT4U,r10
1190 mtspr SPRN_DBAT4L,r10
1191 mtspr SPRN_DBAT5U,r10
1192 mtspr SPRN_DBAT5L,r10
1193 mtspr SPRN_DBAT6U,r10
1194 mtspr SPRN_DBAT6L,r10
1195 mtspr SPRN_DBAT7U,r10
1196 mtspr SPRN_DBAT7L,r10
1197 mtspr SPRN_IBAT4U,r10
1198 mtspr SPRN_IBAT4L,r10
1199 mtspr SPRN_IBAT5U,r10
1200 mtspr SPRN_IBAT5L,r10
1201 mtspr SPRN_IBAT6U,r10
1202 mtspr SPRN_IBAT6L,r10
1203 mtspr SPRN_IBAT7U,r10
1204 mtspr SPRN_IBAT7L,r10
1205 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1214 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1215 rlwinm r0, r6, 0, ~MSR_RI
1216 rlwinm r0, r0, 0, ~MSR_EE
1228 LOAD_BAT(0, r3, r4, r5)
1229 LOAD_BAT(1, r3, r4, r5)
1230 LOAD_BAT(2, r3, r4, r5)
1231 LOAD_BAT(3, r3, r4, r5)
1232 BEGIN_MMU_FTR_SECTION
1233 LOAD_BAT(4, r3, r4, r5)
1234 LOAD_BAT(5, r3, r4, r5)
1235 LOAD_BAT(6, r3, r4, r5)
1236 LOAD_BAT(7, r3, r4, r5)
1237 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1238 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1247 1: addic. r10, r10, -0x1000
1254 addi r4, r3, __after_mmu_off - _start
1256 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1267 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1268 * (we keep one for debugging) and on others, we use one 256M BAT.
1271 lis r11,PAGE_OFFSET@h
1272 #ifdef CONFIG_PPC_BOOK3S_601
1273 ori r11,r11,4 /* set up BAT registers for 601 */
1274 li r8,0x7f /* valid, block length = 8MB */
1275 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1276 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1277 addis r11,r11,0x800000@h
1278 addis r8,r8,0x800000@h
1279 mtspr SPRN_IBAT1U,r11
1280 mtspr SPRN_IBAT1L,r8
1281 addis r11,r11,0x800000@h
1282 addis r8,r8,0x800000@h
1283 mtspr SPRN_IBAT2U,r11
1284 mtspr SPRN_IBAT2L,r8
1288 ori r8,r8,0x12 /* R/W access, M=1 */
1290 ori r8,r8,2 /* R/W access */
1291 #endif /* CONFIG_SMP */
1292 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1294 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1295 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1296 mtspr SPRN_IBAT0L,r8
1297 mtspr SPRN_IBAT0U,r11
1302 #ifdef CONFIG_BOOTX_TEXT
1305 * setup the display bat prepared for us in prom.c
1310 addis r8,r3,disp_BAT@ha
1311 addi r8,r8,disp_BAT@l
1316 #ifndef CONFIG_PPC_BOOK3S_601
1317 mtspr SPRN_DBAT3L,r8
1318 mtspr SPRN_DBAT3U,r11
1320 mtspr SPRN_IBAT3L,r8
1321 mtspr SPRN_IBAT3U,r11
1324 #endif /* CONFIG_BOOTX_TEXT */
1326 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1330 mtspr SPRN_DBAT1L, r8
1333 ori r11, r11, (BL_1M << 2) | 2
1334 mtspr SPRN_DBAT1U, r11
1339 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1341 /* prepare a BAT for early io */
1342 #if defined(CONFIG_GAMECUBE)
1344 #elif defined(CONFIG_WII)
1347 #error Invalid platform for USB Gecko based early debugging.
1350 * The virtual address used must match the virtual address
1351 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1353 lis r11, 0xfffe /* top 128K */
1354 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1355 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1356 mtspr SPRN_DBAT1L, r8
1357 mtspr SPRN_DBAT1U, r11
1362 /* Jump into the system reset for the rom.
1363 * We first disable the MMU, and then jump to the ROM reset address.
1365 * r3 is the board info structure, r4 is the location for starting.
1366 * I use this for building a small kernel that can load other kernels,
1367 * rather than trying to write or rely on a rom monitor that can tftp load.
1372 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1376 mfspr r11, SPRN_HID0
1378 ori r10,r10,HID0_ICE|HID0_DCE
1380 mtspr SPRN_HID0, r11
1382 li r5, MSR_ME|MSR_RI
1384 addis r6,r6,-KERNELBASE@h
1398 * We put a few things here that have to be page-aligned.
1399 * This stuff goes at the beginning of the data segment,
1400 * which is page-aligned.
1405 .globl empty_zero_page
1408 EXPORT_SYMBOL(empty_zero_page)
1410 .globl swapper_pg_dir
1412 .space PGD_TABLE_SIZE
1414 /* Room for two PTE pointers, usually the kernel and current user pointers
1415 * to their respective root page table.