3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 * This file contains the low-level support and setup for the
14 * PowerPC platform, including trap and interrupt dispatch.
15 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/init.h>
28 #include <asm/pgtable.h>
29 #include <asm/cputable.h>
30 #include <asm/cache.h>
31 #include <asm/thread_info.h>
32 #include <asm/ppc_asm.h>
33 #include <asm/asm-offsets.h>
34 #include <asm/ptrace.h>
37 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
38 #define LOAD_BAT(n, reg, RA, RB) \
39 /* see the comment for clear_bats() -- Cort */ \
41 mtspr SPRN_IBAT##n##U,RA; \
42 mtspr SPRN_DBAT##n##U,RA; \
43 lwz RA,(n*16)+0(reg); \
44 lwz RB,(n*16)+4(reg); \
45 mtspr SPRN_IBAT##n##U,RA; \
46 mtspr SPRN_IBAT##n##L,RB; \
48 lwz RA,(n*16)+8(reg); \
49 lwz RB,(n*16)+12(reg); \
50 mtspr SPRN_DBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##L,RB; \
55 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
56 .stabs "head_32.S",N_SO,0,0,0f
61 * _start is defined this way because the XCOFF loader in the OpenFirmware
62 * on the powermac expects the entry point to be a procedure descriptor.
66 * These are here for legacy reasons, the kernel used to
67 * need to look like a coff function entry for the pmac
68 * but we're always started by some kind of bootloader now.
71 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
72 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
76 * Enter here with the kernel text, data and bss loaded starting at
77 * 0, running with virtual == physical mapping.
78 * r5 points to the prom entry point (the client interface handler
79 * address). Address translation is turned on, with the prom
80 * managing the hash table. Interrupts are disabled. The stack
81 * pointer (r1) points to just below the end of the half-meg region
82 * from 0x380000 - 0x400000, which is mapped in already.
84 * If we are booted from MacOS via BootX, we enter with the kernel
85 * image loaded somewhere, and the following values in registers:
86 * r3: 'BooX' (0x426f6f58)
87 * r4: virtual address of boot_infos_t
91 * This is jumped to on prep systems right after the kernel is relocated
92 * to its proper place in memory by the boot loader. The expected layout
94 * r3: ptr to residual data
95 * r4: initrd_start or if no initrd then 0
96 * r5: initrd_end - unused if r4 is 0
97 * r6: Start of command line string
98 * r7: End of command line string
100 * This just gets a minimal mmu environment setup so we can call
101 * start_here() to do the real work.
108 * We have to do any OF calls before we map ourselves to KERNELBASE,
109 * because OF may have I/O devices mapped into that area
110 * (particularly on CHRP).
115 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
116 /* find out where we are now */
118 0: mflr r8 /* r8 = runtime addr here */
119 addis r8,r8,(_stext - 0b)@ha
120 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
122 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
124 /* We never return. We also hit that trap if trying to boot
125 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
129 * Check for BootX signature when supporting PowerMac and branch to
130 * appropriate trampoline if it's present
132 #ifdef CONFIG_PPC_PMAC
139 #endif /* CONFIG_PPC_PMAC */
141 1: mr r31,r3 /* save parameters */
146 * early_init() does the early machine identification and does
147 * the necessary low-level setup and clears the BSS
148 * -- Cort <cort@fsmlabs.com>
152 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
153 * the physical address we are running at, returned by early_init()
161 #if defined(CONFIG_BOOTX_TEXT)
164 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
167 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168 bl setup_usbgecko_bat
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
176 bl call_setup_cpu /* Call setup_cpu for this CPU */
180 #endif /* CONFIG_6xx */
184 * We need to run with _start at physical address 0.
185 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
186 * the exception vectors at 0 (and therefore this copy
187 * overwrites OF's exception vectors with our own).
188 * The MMU is off at this point.
192 addis r4,r3,KERNELBASE@h /* current address of _start */
193 lis r5,PHYSICAL_START@h
194 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
197 * we now have the 1st 16M of ram mapped with the bats.
198 * prep needs the mmu to be turned on here, but pmac already has it on.
199 * this shouldn't bother the pmac since it just gets turned on again
200 * as we jump to our code at KERNELBASE. -- Cort
201 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
202 * off, and in other cases, we now turn it off before changing BATs above.
206 ori r0,r0,MSR_DR|MSR_IR
209 ori r0,r0,start_here@l
212 RFI /* enables MMU */
215 * We need __secondary_hold as a place to hold the other cpus on
216 * an SMP machine, even when we are running a UP kernel.
218 . = 0xc0 /* for prep bootloader */
219 li r3,1 /* MTX only has 1 cpu */
220 .globl __secondary_hold
222 /* tell the master we're here */
223 stw r3,__secondary_hold_acknowledge@l(0)
226 /* wait until we're told to start */
229 /* our cpu # was at addr 0 - go */
230 mr r24,r3 /* cpu # */
234 #endif /* CONFIG_SMP */
236 .globl __secondary_hold_spinloop
237 __secondary_hold_spinloop:
239 .globl __secondary_hold_acknowledge
240 __secondary_hold_acknowledge:
244 * Exception entry code. This code runs with address translation
245 * turned off, i.e. using physical addresses.
246 * We assume sprg3 has the physical address of the current
247 * task's thread_struct.
249 #define EXCEPTION_PROLOG \
250 mtspr SPRN_SPRG_SCRATCH0,r10; \
251 mtspr SPRN_SPRG_SCRATCH1,r11; \
253 EXCEPTION_PROLOG_1; \
256 #define EXCEPTION_PROLOG_1 \
257 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
258 andi. r11,r11,MSR_PR; \
259 tophys(r11,r1); /* use tophys(r1) if kernel */ \
261 mfspr r11,SPRN_SPRG_THREAD; \
262 lwz r11,THREAD_INFO-THREAD(r11); \
263 addi r11,r11,THREAD_SIZE; \
265 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
268 #define EXCEPTION_PROLOG_2 \
270 stw r10,_CCR(r11); /* save registers */ \
271 stw r12,GPR12(r11); \
273 mfspr r10,SPRN_SPRG_SCRATCH0; \
274 stw r10,GPR10(r11); \
275 mfspr r12,SPRN_SPRG_SCRATCH1; \
276 stw r12,GPR11(r11); \
278 stw r10,_LINK(r11); \
279 mfspr r12,SPRN_SRR0; \
280 mfspr r9,SPRN_SRR1; \
283 tovirt(r1,r11); /* set new kernel sp */ \
284 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
285 MTMSRD(r10); /* (except for mach check in rtas) */ \
287 lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
288 addi r10,r10,STACK_FRAME_REGS_MARKER@l; \
290 SAVE_4GPRS(3, r11); \
294 * Note: code which follows this uses cr0.eq (set if from kernel),
295 * r11, r12 (SRR0), and r9 (SRR1).
297 * Note2: once we have set r1 we are in a position to take exceptions
298 * again, and we could thus set MSR:RI at that point.
304 #define EXCEPTION(n, label, hdlr, xfer) \
308 addi r3,r1,STACK_FRAME_OVERHEAD; \
311 #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
313 stw r10,_TRAP(r11); \
321 #define COPY_EE(d, s) rlwimi d,s,0,16,16
324 #define EXC_XFER_STD(n, hdlr) \
325 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
326 ret_from_except_full)
328 #define EXC_XFER_LITE(n, hdlr) \
329 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
332 #define EXC_XFER_EE(n, hdlr) \
333 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
334 ret_from_except_full)
336 #define EXC_XFER_EE_LITE(n, hdlr) \
337 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
341 /* core99 pmac starts the seconary here by changing the vector, and
342 putting it back to what it was (unknown_exception) when done. */
343 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
347 * On CHRP, this is complicated by the fact that we could get a
348 * machine check inside RTAS, and we have no guarantee that certain
349 * critical registers will have the values we expect. The set of
350 * registers that might have bad values includes all the GPRs
351 * and all the BATs. We indicate that we are in RTAS by putting
352 * a non-zero value, the address of the exception frame to use,
353 * in SPRG2. The machine check handler checks SPRG2 and uses its
354 * value if it is non-zero. If we ever needed to free up SPRG2,
355 * we could use a field in the thread_info or thread_struct instead.
356 * (Other exception handlers assume that r1 is a valid kernel stack
357 * pointer when we take an exception from supervisor mode.)
361 mtspr SPRN_SPRG_SCRATCH0,r10
362 mtspr SPRN_SPRG_SCRATCH1,r11
364 #ifdef CONFIG_PPC_CHRP
365 mfspr r11,SPRN_SPRG_RTAS
368 #endif /* CONFIG_PPC_CHRP */
370 7: EXCEPTION_PROLOG_2
371 addi r3,r1,STACK_FRAME_OVERHEAD
372 #ifdef CONFIG_PPC_CHRP
373 mfspr r4,SPRN_SPRG_RTAS
377 EXC_XFER_STD(0x200, machine_check_exception)
378 #ifdef CONFIG_PPC_CHRP
379 1: b machine_check_in_rtas
382 /* Data access exception. */
388 andis. r0,r10,0xa470 /* weird error? */
389 bne 1f /* if not, try to put a PTE */
390 mfspr r4,SPRN_DAR /* into the hash table */
391 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
393 1: lwz r5,_DSISR(r11) /* get DSISR value */
395 EXC_XFER_EE_LITE(0x300, handle_page_fault)
398 /* Instruction access exception. */
402 andis. r0,r9,0x4000 /* no pte found? */
403 beq 1f /* if so, try to put a PTE */
404 li r3,0 /* into the hash table */
405 mr r4,r12 /* SRR0 is fault address */
409 EXC_XFER_EE_LITE(0x400, handle_page_fault)
411 /* External interrupt */
412 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
414 /* Alignment exception */
422 addi r3,r1,STACK_FRAME_OVERHEAD
423 EXC_XFER_EE(0x600, alignment_exception)
425 /* Program check exception */
426 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
428 /* Floating-point unavailable */
433 * Certain Freescale cores don't have a FPU and treat fp instructions
434 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
437 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
440 bl load_up_fpu /* if from user, just load it up */
441 b fast_exception_return
442 1: addi r3,r1,STACK_FRAME_OVERHEAD
443 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
446 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
448 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
449 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
455 EXC_XFER_EE_LITE(0xc00, DoSyscall)
457 /* Single step - not used on 601 */
458 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
459 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
462 * The Altivec unavailable trap is at 0x0f20. Foo.
463 * We effectively remap it to 0x3000.
464 * We include an altivec unavailable exception vector even if
465 * not configured for Altivec, so that you can't panic a
466 * non-altivec kernel running on a machine with altivec just
467 * by executing an altivec instruction.
476 * Handle TLB miss for instruction on 603/603e.
477 * Note: we get an alternate set of r0 - r3 to use automatically.
483 * r1: linux style pte ( later becomes ppc hardware pte )
484 * r2: ptr to linux-style pte
487 /* Get PTE (linux-style) and check access */
489 lis r1,PAGE_OFFSET@h /* check if kernel address */
491 mfspr r2,SPRN_SPRG_THREAD
492 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
495 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
496 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
497 lis r2,swapper_pg_dir@ha /* if kernel address, use */
498 addi r2,r2,swapper_pg_dir@l /* kernel page table */
500 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
501 lwz r2,0(r2) /* get pmd entry */
502 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
503 beq- InstructionAddressInvalid /* return if no mapping */
504 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
505 lwz r0,0(r2) /* get linux-style pte */
506 andc. r1,r1,r0 /* check access & ~permission */
507 bne- InstructionAddressInvalid /* return if access not permitted */
508 ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
510 * NOTE! We are assuming this is not an SMP system, otherwise
511 * we would need to update the pte atomically with lwarx/stwcx.
513 stw r0,0(r2) /* update PTE (accessed bit) */
514 /* Convert linux-style PTE to low word of PPC-style PTE */
515 rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */
516 rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
517 and r1,r1,r2 /* writable if _RW and _DIRTY */
518 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
519 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
520 ori r1,r1,0xe04 /* clear out reserved bits */
521 andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
523 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
524 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
527 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
530 InstructionAddressInvalid:
532 rlwinm r1,r3,9,6,6 /* Get load/store bit */
535 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
536 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
539 mfspr r1,SPRN_IMISS /* Get failing address */
540 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
541 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
543 mtspr SPRN_DAR,r1 /* Set fault address */
544 mfmsr r0 /* Restore "normal" registers */
545 xoris r0,r0,MSR_TGPR>>16
546 mtcrf 0x80,r3 /* Restore CR0 */
551 * Handle TLB miss for DATA Load operation on 603/603e
557 * r1: linux style pte ( later becomes ppc hardware pte )
558 * r2: ptr to linux-style pte
561 /* Get PTE (linux-style) and check access */
563 lis r1,PAGE_OFFSET@h /* check if kernel address */
565 mfspr r2,SPRN_SPRG_THREAD
566 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
569 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
570 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
571 lis r2,swapper_pg_dir@ha /* if kernel address, use */
572 addi r2,r2,swapper_pg_dir@l /* kernel page table */
574 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
575 lwz r2,0(r2) /* get pmd entry */
576 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
577 beq- DataAddressInvalid /* return if no mapping */
578 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
579 lwz r0,0(r2) /* get linux-style pte */
580 andc. r1,r1,r0 /* check access & ~permission */
581 bne- DataAddressInvalid /* return if access not permitted */
582 ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
584 * NOTE! We are assuming this is not an SMP system, otherwise
585 * we would need to update the pte atomically with lwarx/stwcx.
587 stw r0,0(r2) /* update PTE (accessed bit) */
588 /* Convert linux-style PTE to low word of PPC-style PTE */
589 rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */
590 rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
591 and r1,r1,r2 /* writable if _RW and _DIRTY */
592 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
593 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
594 ori r1,r1,0xe04 /* clear out reserved bits */
595 andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
597 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
598 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
600 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
602 BEGIN_MMU_FTR_SECTION
604 mfspr r1,SPRN_SPRG_603_LRU
605 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
609 mtspr SPRN_SPRG_603_LRU,r1
611 rlwimi r2,r0,31-14,14,14
613 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
618 rlwinm r1,r3,9,6,6 /* Get load/store bit */
621 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
623 mfspr r1,SPRN_DMISS /* Get failing address */
624 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
625 beq 20f /* Jump if big endian */
627 20: mtspr SPRN_DAR,r1 /* Set fault address */
628 mfmsr r0 /* Restore "normal" registers */
629 xoris r0,r0,MSR_TGPR>>16
630 mtcrf 0x80,r3 /* Restore CR0 */
635 * Handle TLB miss for DATA Store on 603/603e
641 * r1: linux style pte ( later becomes ppc hardware pte )
642 * r2: ptr to linux-style pte
645 /* Get PTE (linux-style) and check access */
647 lis r1,PAGE_OFFSET@h /* check if kernel address */
649 mfspr r2,SPRN_SPRG_THREAD
650 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
653 mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
654 rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
655 lis r2,swapper_pg_dir@ha /* if kernel address, use */
656 addi r2,r2,swapper_pg_dir@l /* kernel page table */
658 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
659 lwz r2,0(r2) /* get pmd entry */
660 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
661 beq- DataAddressInvalid /* return if no mapping */
662 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
663 lwz r0,0(r2) /* get linux-style pte */
664 andc. r1,r1,r0 /* check access & ~permission */
665 bne- DataAddressInvalid /* return if access not permitted */
666 ori r0,r0,_PAGE_ACCESSED|_PAGE_DIRTY
668 * NOTE! We are assuming this is not an SMP system, otherwise
669 * we would need to update the pte atomically with lwarx/stwcx.
671 stw r0,0(r2) /* update PTE (accessed/dirty bits) */
672 /* Convert linux-style PTE to low word of PPC-style PTE */
673 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
674 li r1,0xe05 /* clear out reserved bits & PP lsb */
675 andc r1,r0,r1 /* PP = user? 2: 0 */
677 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
678 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
680 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
682 BEGIN_MMU_FTR_SECTION
684 mfspr r1,SPRN_SPRG_603_LRU
685 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
689 mtspr SPRN_SPRG_603_LRU,r1
691 rlwimi r2,r0,31-14,14,14
693 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
697 #ifndef CONFIG_ALTIVEC
698 #define altivec_assist_exception unknown_exception
701 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
702 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
703 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
704 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
705 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
706 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
707 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
708 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
709 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
710 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
711 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
712 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
714 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
715 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
716 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
717 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
718 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
719 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
720 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
723 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
724 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
725 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
726 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
727 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
728 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
729 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
731 .globl mol_trampoline
732 .set mol_trampoline, i0x2f00
738 #ifdef CONFIG_ALTIVEC
740 bl load_up_altivec /* if from user, just load it up */
741 b fast_exception_return
742 #endif /* CONFIG_ALTIVEC */
743 1: addi r3,r1,STACK_FRAME_OVERHEAD
744 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
748 addi r3,r1,STACK_FRAME_OVERHEAD
749 EXC_XFER_STD(0xf00, performance_monitor_exception)
753 * This code is jumped to from the startup code to copy
754 * the kernel image to physical address PHYSICAL_START.
757 addis r9,r26,klimit@ha /* fetch klimit */
759 addis r25,r25,-KERNELBASE@h
760 lis r3,PHYSICAL_START@h /* Destination base address */
761 li r6,0 /* Destination offset */
762 li r5,0x4000 /* # bytes of memory to copy */
763 bl copy_and_flush /* copy the first 0x4000 bytes */
764 addi r0,r3,4f@l /* jump to the address of 4f */
765 mtctr r0 /* in copy and do the rest. */
766 bctr /* jump to the copy */
768 bl copy_and_flush /* copy the rest */
772 * Copy routine used to copy the kernel to start at physical address 0
773 * and flush and invalidate the caches as needed.
774 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
775 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
777 _ENTRY(copy_and_flush)
780 4: li r0,L1_CACHE_BYTES/4
782 3: addi r6,r6,4 /* copy a cache line */
786 dcbst r6,r3 /* write it to memory */
788 icbi r6,r3 /* flush the icache line */
791 sync /* additional sync needed on g4 */
799 .globl __secondary_start_gemini
800 __secondary_start_gemini:
809 #endif /* CONFIG_GEMINI */
811 .globl __secondary_start_mpc86xx
812 __secondary_start_mpc86xx:
814 stw r3, __secondary_hold_acknowledge@l(0)
815 mr r24, r3 /* cpu # */
818 .globl __secondary_start_pmac_0
819 __secondary_start_pmac_0:
820 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
829 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
830 set to map the 0xf0000000 - 0xffffffff region */
832 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
837 .globl __secondary_start
839 /* Copy some CPU settings from CPU 0 */
840 bl __restore_cpu_setup
844 bl call_setup_cpu /* Call setup_cpu for this CPU */
848 #endif /* CONFIG_6xx */
850 /* get current_thread_info and current */
851 lis r1,secondary_ti@ha
853 lwz r1,secondary_ti@l(r1)
858 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
863 /* load up the MMU */
866 /* ptr to phys current thread */
868 addi r4,r4,THREAD /* phys address of our thread_struct */
870 mtspr SPRN_SPRG_THREAD,r4
872 mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
874 /* enable MMU and jump to start_secondary */
877 lis r3,start_secondary@h
878 ori r3,r3,start_secondary@l
883 #endif /* CONFIG_SMP */
886 * Those generic dummy functions are kept for CPUs not
887 * included in CONFIG_6xx
889 #if !defined(CONFIG_6xx)
890 _ENTRY(__save_cpu_setup)
892 _ENTRY(__restore_cpu_setup)
894 #endif /* !defined(CONFIG_6xx) */
898 * Load stuff into the MMU. Intended to be called with
902 sync /* Force all PTE updates to finish */
904 tlbia /* Clear all TLB entries */
905 sync /* wait for tlbia/tlbie to finish */
906 TLBSYNC /* ... on all CPUs */
907 /* Load the SDR1 register (hash table base & size) */
912 li r0,16 /* load up segment register values */
913 mtctr r0 /* for context 0 */
914 lis r3,0x2000 /* Ku = 1, VSID = 0 */
917 addi r3,r3,0x111 /* increment VSID */
918 addis r4,r4,0x1000 /* address of next segment */
921 /* Load the BAT registers with the values set up by MMU_init.
922 MMU_init takes care of whether we're on a 601 or not. */
933 BEGIN_MMU_FTR_SECTION
938 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
942 * This is where the main kernel code starts.
947 ori r2,r2,init_task@l
948 /* Set up for using our exception vectors */
949 /* ptr to phys current thread */
951 addi r4,r4,THREAD /* init task's THREAD */
953 mtspr SPRN_SPRG_THREAD,r4
955 mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
958 lis r1,init_thread_union@ha
959 addi r1,r1,init_thread_union@l
961 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
963 * Do early platform-specific initialization,
964 * and set up the MMU.
973 * Go back to running unmapped so we can load up new values
974 * for SDR1 (hash table pointer) and the segment registers
975 * and change to using our exception vectors.
980 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
986 /* Load up the kernel context */
989 #ifdef CONFIG_BDI_SWITCH
990 /* Add helper information for the Abatron bdiGDB debugger.
991 * We do this here because we know the mmu is disabled, and
992 * will be enabled for real in just a few instructions.
994 lis r5, abatron_pteptrs@h
995 ori r5, r5, abatron_pteptrs@l
996 stw r5, 0xf0(r0) /* This much match your Abatron config */
997 lis r6, swapper_pg_dir@h
998 ori r6, r6, swapper_pg_dir@l
1001 #endif /* CONFIG_BDI_SWITCH */
1003 /* Now turn on the MMU for real! */
1006 lis r3,start_kernel@h
1007 ori r3,r3,start_kernel@l
1014 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1016 * Set up the segment registers for a new context.
1018 _ENTRY(switch_mmu_context)
1019 lwz r3,MMCONTEXTID(r4)
1022 mulli r3,r3,897 /* multiply context by skew factor */
1023 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1024 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1025 li r0,NUM_USER_SEGMENTS
1028 #ifdef CONFIG_BDI_SWITCH
1029 /* Context switch the PTE pointer for the Abatron BDI2000.
1030 * The PGDIR is passed as second argument.
1033 lis r5, KERNELBASE@h
1041 addi r3,r3,0x111 /* next VSID */
1042 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1043 addis r4,r4,0x1000 /* address of next segment */
1049 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1053 * An undocumented "feature" of 604e requires that the v bit
1054 * be cleared before changing BAT values.
1056 * Also, newer IBM firmware does not clear bat3 and 4 so
1057 * this makes sure it's done.
1063 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1067 mtspr SPRN_DBAT0U,r10
1068 mtspr SPRN_DBAT0L,r10
1069 mtspr SPRN_DBAT1U,r10
1070 mtspr SPRN_DBAT1L,r10
1071 mtspr SPRN_DBAT2U,r10
1072 mtspr SPRN_DBAT2L,r10
1073 mtspr SPRN_DBAT3U,r10
1074 mtspr SPRN_DBAT3L,r10
1076 mtspr SPRN_IBAT0U,r10
1077 mtspr SPRN_IBAT0L,r10
1078 mtspr SPRN_IBAT1U,r10
1079 mtspr SPRN_IBAT1L,r10
1080 mtspr SPRN_IBAT2U,r10
1081 mtspr SPRN_IBAT2L,r10
1082 mtspr SPRN_IBAT3U,r10
1083 mtspr SPRN_IBAT3L,r10
1084 BEGIN_MMU_FTR_SECTION
1085 /* Here's a tweak: at this point, CPU setup have
1086 * not been called yet, so HIGH_BAT_EN may not be
1087 * set in HID0 for the 745x processors. However, it
1088 * seems that doesn't affect our ability to actually
1089 * write to these SPRs.
1091 mtspr SPRN_DBAT4U,r10
1092 mtspr SPRN_DBAT4L,r10
1093 mtspr SPRN_DBAT5U,r10
1094 mtspr SPRN_DBAT5L,r10
1095 mtspr SPRN_DBAT6U,r10
1096 mtspr SPRN_DBAT6L,r10
1097 mtspr SPRN_DBAT7U,r10
1098 mtspr SPRN_DBAT7L,r10
1099 mtspr SPRN_IBAT4U,r10
1100 mtspr SPRN_IBAT4L,r10
1101 mtspr SPRN_IBAT5U,r10
1102 mtspr SPRN_IBAT5L,r10
1103 mtspr SPRN_IBAT6U,r10
1104 mtspr SPRN_IBAT6L,r10
1105 mtspr SPRN_IBAT7U,r10
1106 mtspr SPRN_IBAT7L,r10
1107 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1112 1: addic. r10, r10, -0x1000
1119 addi r4, r3, __after_mmu_off - _start
1121 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1130 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1131 * (we keep one for debugging) and on others, we use one 256M BAT.
1134 lis r11,PAGE_OFFSET@h
1136 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1139 ori r11,r11,4 /* set up BAT registers for 601 */
1140 li r8,0x7f /* valid, block length = 8MB */
1141 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1142 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1143 addis r11,r11,0x800000@h
1144 addis r8,r8,0x800000@h
1145 mtspr SPRN_IBAT1U,r11
1146 mtspr SPRN_IBAT1L,r8
1147 addis r11,r11,0x800000@h
1148 addis r8,r8,0x800000@h
1149 mtspr SPRN_IBAT2U,r11
1150 mtspr SPRN_IBAT2L,r8
1156 ori r8,r8,0x12 /* R/W access, M=1 */
1158 ori r8,r8,2 /* R/W access */
1159 #endif /* CONFIG_SMP */
1160 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1162 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1163 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1164 mtspr SPRN_IBAT0L,r8
1165 mtspr SPRN_IBAT0U,r11
1170 #ifdef CONFIG_BOOTX_TEXT
1173 * setup the display bat prepared for us in prom.c
1178 addis r8,r3,disp_BAT@ha
1179 addi r8,r8,disp_BAT@l
1185 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1188 mtspr SPRN_DBAT3L,r8
1189 mtspr SPRN_DBAT3U,r11
1191 1: mtspr SPRN_IBAT3L,r8
1192 mtspr SPRN_IBAT3U,r11
1194 #endif /* CONFIG_BOOTX_TEXT */
1196 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1200 mtspr SPRN_DBAT1L, r8
1203 ori r11, r11, (BL_1M << 2) | 2
1204 mtspr SPRN_DBAT1U, r11
1209 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1211 /* prepare a BAT for early io */
1212 #if defined(CONFIG_GAMECUBE)
1214 #elif defined(CONFIG_WII)
1217 #error Invalid platform for USB Gecko based early debugging.
1220 * The virtual address used must match the virtual address
1221 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1223 lis r11, 0xfffe /* top 128K */
1224 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1225 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1226 mtspr SPRN_DBAT1L, r8
1227 mtspr SPRN_DBAT1U, r11
1232 /* Jump into the system reset for the rom.
1233 * We first disable the MMU, and then jump to the ROM reset address.
1235 * r3 is the board info structure, r4 is the location for starting.
1236 * I use this for building a small kernel that can load other kernels,
1237 * rather than trying to write or rely on a rom monitor that can tftp load.
1242 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1246 mfspr r11, SPRN_HID0
1248 ori r10,r10,HID0_ICE|HID0_DCE
1250 mtspr SPRN_HID0, r11
1252 li r5, MSR_ME|MSR_RI
1254 addis r6,r6,-KERNELBASE@h
1268 * We put a few things here that have to be page-aligned.
1269 * This stuff goes at the beginning of the data segment,
1270 * which is page-aligned.
1275 .globl empty_zero_page
1279 .globl swapper_pg_dir
1281 .space PGD_TABLE_SIZE
1283 .globl intercept_table
1285 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1286 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1287 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1288 .long 0, 0, 0, 0, 0, 0, 0, 0
1289 .long 0, 0, 0, 0, 0, 0, 0, 0
1290 .long 0, 0, 0, 0, 0, 0, 0, 0
1292 /* Room for two PTE pointers, usually the kernel and current user pointers
1293 * to their respective root page table.