2 * arch/ppc/kernel/head_44x.S
4 * Kernel execution entry point code.
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2005 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
33 #include <linux/config.h>
34 #include <asm/processor.h>
37 #include <asm/pgtable.h>
38 #include <asm/ibm4xx.h>
39 #include <asm/ibm44x.h>
40 #include <asm/cputable.h>
41 #include <asm/thread_info.h>
42 #include <asm/ppc_asm.h>
43 #include <asm/offsets.h>
44 #include "head_booke.h"
47 /* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
62 * Reserve a word at a fixed location to store the address
67 * Save parameters we are passed
74 li r24,0 /* CPU number */
77 * Set up the initial MMU state
79 * We are still executing code at the virtual address
80 * mappings set by the firmware for the base of RAM.
82 * We first invalidate all TLB entries but the one
83 * we are running from. We then load the KERNELBASE
84 * mappings so we can begin to use kernel addresses
85 * natively and so the interrupt vector locations are
86 * permanently pinned (necessary since Book E
87 * implementations always have translation enabled).
89 * TODO: Use the known TLB entry we are running from to
90 * determine which physical region we are located
91 * in. This can be used to determine where in RAM
92 * (on a shared CPU system) or PCI memory space
93 * (on a DRAMless system) we are located.
94 * For now, we assume a perfect world which means
95 * we are located at the base of DRAM (physical 0).
99 * Search TLB for entry that we are currently using.
100 * Invalidate all entries but the one we are using.
102 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
103 mfspr r3,SPRN_PID /* Get PID */
104 mfmsr r4 /* Get MSR */
105 andi. r4,r4,MSR_IS@l /* TS=1? */
106 beq wmmucr /* If not, leave STS=0 */
107 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
108 wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
111 bl invstr /* Find our address */
112 invstr: mflr r5 /* Make it accessible */
113 tlbsx r23,0,r5 /* Find entry we are in */
114 li r4,0 /* Start at TLB entry 0 */
115 li r3,0 /* Set PAGEID inval value */
116 1: cmpw r23,r4 /* Is this our entry? */
117 beq skpinv /* If so, skip the inval */
118 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
119 skpinv: addi r4,r4,1 /* Increment */
120 cmpwi r4,64 /* Are we done? */
121 bne 1b /* If not, repeat */
122 isync /* If so, context change */
125 * Configure and load pinned entry into TLB slot 63.
128 lis r3,KERNELBASE@h /* Load the kernel virtual address */
129 ori r3,r3,KERNELBASE@l
131 /* Kernel is at the base of RAM */
132 li r4, 0 /* Load the kernel physical address */
134 /* Load the kernel PID = 0 */
139 /* Initialize MMUCR */
145 clrrwi r3,r3,10 /* Mask off the effective page number */
146 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
149 clrrwi r4,r4,10 /* Mask off the real page number */
150 /* ERPN is 0 for first 4GB page */
153 /* Added guarded bit to protect against speculative loads/stores */
155 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
157 li r0,63 /* TLB slot 63 */
159 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
160 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
161 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
163 /* Force context change */
172 /* If necessary, invalidate original entry we used */
176 tlbwe r6,r23,PPC44x_TLB_PAGEID
180 #ifdef CONFIG_SERIAL_TEXT_DEBUG
182 * Add temporary UART mapping for early debug. This
183 * mapping must be identical to that used by the early
184 * bootloader code since the same asm/serial.h parameters
185 * are used for polled operation.
188 lis r3,UART0_IO_BASE@h
189 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
192 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
193 ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
197 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
199 li r0,1 /* TLB slot 1 */
201 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
202 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
203 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
205 /* Force context change */
207 #endif /* CONFIG_SERIAL_TEXT_DEBUG */
209 /* Establish the interrupt vector offsets */
210 SET_IVOR(0, CriticalInput);
211 SET_IVOR(1, MachineCheck);
212 SET_IVOR(2, DataStorage);
213 SET_IVOR(3, InstructionStorage);
214 SET_IVOR(4, ExternalInput);
215 SET_IVOR(5, Alignment);
216 SET_IVOR(6, Program);
217 SET_IVOR(7, FloatingPointUnavailable);
218 SET_IVOR(8, SystemCall);
219 SET_IVOR(9, AuxillaryProcessorUnavailable);
220 SET_IVOR(10, Decrementer);
221 SET_IVOR(11, FixedIntervalTimer);
222 SET_IVOR(12, WatchdogTimer);
223 SET_IVOR(13, DataTLBError);
224 SET_IVOR(14, InstructionTLBError);
227 /* Establish the interrupt vector base */
228 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
232 * This is where the main kernel code starts.
237 ori r2,r2,init_task@l
239 /* ptr to current thread */
240 addi r4,r2,THREAD /* init task's THREAD */
244 lis r1,init_thread_union@h
245 ori r1,r1,init_thread_union@l
247 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
252 * Decide what sort of machine this is and initialize the MMU.
262 /* Setup PTE pointers for the Abatron bdiGDB */
263 lis r6, swapper_pg_dir@h
264 ori r6, r6, swapper_pg_dir@l
265 lis r5, abatron_pteptrs@h
266 ori r5, r5, abatron_pteptrs@l
268 ori r4, r4, KERNELBASE@l
269 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
273 lis r4,start_kernel@h
274 ori r4,r4,start_kernel@l
276 ori r3,r3,MSR_KERNEL@l
279 rfi /* change context and jump to start_kernel */
282 * Interrupt vector entry code
284 * The Book E MMUs are always on so we don't need to handle
285 * interrupts in real mode as with previous PPC processors. In
286 * this case we handle interrupts in the kernel virtual address
289 * Interrupt vectors are dynamically placed relative to the
290 * interrupt prefix as determined by the address of interrupt_base.
291 * The interrupt vectors offsets are programmed using the labels
292 * for each interrupt vector entry.
294 * Interrupt vectors must be aligned on a 16 byte boundary.
295 * We align on a 32 byte cache line boundary for good measure.
299 /* Critical Input Interrupt */
300 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException)
302 /* Machine Check Interrupt */
304 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
306 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException)
309 /* Data Storage Interrupt */
310 START_EXCEPTION(DataStorage)
311 mtspr SPRN_SPRG0, r10 /* Save some working registers */
312 mtspr SPRN_SPRG1, r11
313 mtspr SPRN_SPRG4W, r12
314 mtspr SPRN_SPRG5W, r13
316 mtspr SPRN_SPRG7W, r11
319 * Check if it was a store fault, if not then bail
320 * because a user tried to access a kernel or
321 * read-protected page. Otherwise, get the
322 * offending address and handle it.
325 andis. r10, r10, ESR_ST@h
328 mfspr r10, SPRN_DEAR /* Get faulting address */
330 /* If we are faulting a kernel address, we have to use the
331 * kernel page tables.
333 andis. r11, r10, 0x8000
335 lis r11, swapper_pg_dir@h
336 ori r11, r11, swapper_pg_dir@l
339 rlwinm r12,r12,0,0,23 /* Clear TID */
343 /* Get the PGD for the current thread */
348 /* Load PID into MMUCR TID */
349 mfspr r12,SPRN_MMUCR /* Get MMUCR */
350 mfspr r13,SPRN_PID /* Get PID */
351 rlwimi r12,r13,0,24,31 /* Set TID */
356 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
357 lwzx r11, r12, r11 /* Get pgd/pmd entry */
358 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
359 beq 2f /* Bail if no table */
361 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
362 lwz r11, 4(r12) /* Get pte entry */
364 andi. r13, r11, _PAGE_RW /* Is it writeable? */
365 beq 2f /* Bail if not */
369 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
370 stw r11, 4(r12) /* Update Linux page table */
372 li r13, PPC44x_TLB_SR@l /* Set SR */
373 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
374 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
375 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
376 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
377 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
378 and r12, r12, r11 /* HWEXEC/RW & USER */
379 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
380 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
382 rlwimi r11,r13,0,26,31 /* Insert static perms */
384 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
386 /* find the TLB index that caused the fault. It has to be here. */
389 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
391 /* Done...restore registers and get out of here.
393 mfspr r11, SPRN_SPRG7R
395 mfspr r13, SPRN_SPRG5R
396 mfspr r12, SPRN_SPRG4R
398 mfspr r11, SPRN_SPRG1
399 mfspr r10, SPRN_SPRG0
400 rfi /* Force context change */
404 * The bailout. Restore registers to pre-exception conditions
405 * and call the heavyweights to help us out.
407 mfspr r11, SPRN_SPRG7R
409 mfspr r13, SPRN_SPRG5R
410 mfspr r12, SPRN_SPRG4R
412 mfspr r11, SPRN_SPRG1
413 mfspr r10, SPRN_SPRG0
416 /* Instruction Storage Interrupt */
417 INSTRUCTION_STORAGE_EXCEPTION
419 /* External Input Interrupt */
420 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
422 /* Alignment Interrupt */
425 /* Program Interrupt */
428 /* Floating Point Unavailable Interrupt */
429 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE)
431 /* System Call Interrupt */
432 START_EXCEPTION(SystemCall)
433 NORMAL_EXCEPTION_PROLOG
434 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
436 /* Auxillary Processor Unavailable Interrupt */
437 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE)
439 /* Decrementer Interrupt */
440 DECREMENTER_EXCEPTION
442 /* Fixed Internal Timer Interrupt */
443 /* TODO: Add FIT support */
444 EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE)
446 /* Watchdog Timer Interrupt */
447 /* TODO: Add watchdog support */
448 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException)
450 /* Data TLB Error Interrupt */
451 START_EXCEPTION(DataTLBError)
452 mtspr SPRN_SPRG0, r10 /* Save some working registers */
453 mtspr SPRN_SPRG1, r11
454 mtspr SPRN_SPRG4W, r12
455 mtspr SPRN_SPRG5W, r13
457 mtspr SPRN_SPRG7W, r11
458 mfspr r10, SPRN_DEAR /* Get faulting address */
460 /* If we are faulting a kernel address, we have to use the
461 * kernel page tables.
463 andis. r11, r10, 0x8000
465 lis r11, swapper_pg_dir@h
466 ori r11, r11, swapper_pg_dir@l
469 rlwinm r12,r12,0,0,23 /* Clear TID */
473 /* Get the PGD for the current thread */
478 /* Load PID into MMUCR TID */
480 mfspr r13,SPRN_PID /* Get PID */
481 rlwimi r12,r13,0,24,31 /* Set TID */
486 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
487 lwzx r11, r12, r11 /* Get pgd/pmd entry */
488 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
489 beq 2f /* Bail if no table */
491 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
492 lwz r11, 4(r12) /* Get pte entry */
493 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
494 beq 2f /* Bail if not present */
496 ori r11, r11, _PAGE_ACCESSED
499 /* Jump to common tlb load */
503 /* The bailout. Restore registers to pre-exception conditions
504 * and call the heavyweights to help us out.
506 mfspr r11, SPRN_SPRG7R
508 mfspr r13, SPRN_SPRG5R
509 mfspr r12, SPRN_SPRG4R
510 mfspr r11, SPRN_SPRG1
511 mfspr r10, SPRN_SPRG0
514 /* Instruction TLB Error Interrupt */
516 * Nearly the same as above, except we get our
517 * information from different registers and bailout
518 * to a different point.
520 START_EXCEPTION(InstructionTLBError)
521 mtspr SPRN_SPRG0, r10 /* Save some working registers */
522 mtspr SPRN_SPRG1, r11
523 mtspr SPRN_SPRG4W, r12
524 mtspr SPRN_SPRG5W, r13
526 mtspr SPRN_SPRG7W, r11
527 mfspr r10, SPRN_SRR0 /* Get faulting address */
529 /* If we are faulting a kernel address, we have to use the
530 * kernel page tables.
532 andis. r11, r10, 0x8000
534 lis r11, swapper_pg_dir@h
535 ori r11, r11, swapper_pg_dir@l
538 rlwinm r12,r12,0,0,23 /* Clear TID */
542 /* Get the PGD for the current thread */
547 /* Load PID into MMUCR TID */
549 mfspr r13,SPRN_PID /* Get PID */
550 rlwimi r12,r13,0,24,31 /* Set TID */
555 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
556 lwzx r11, r12, r11 /* Get pgd/pmd entry */
557 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
558 beq 2f /* Bail if no table */
560 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
561 lwz r11, 4(r12) /* Get pte entry */
562 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
563 beq 2f /* Bail if not present */
565 ori r11, r11, _PAGE_ACCESSED
568 /* Jump to common TLB load point */
572 /* The bailout. Restore registers to pre-exception conditions
573 * and call the heavyweights to help us out.
575 mfspr r11, SPRN_SPRG7R
577 mfspr r13, SPRN_SPRG5R
578 mfspr r12, SPRN_SPRG4R
579 mfspr r11, SPRN_SPRG1
580 mfspr r10, SPRN_SPRG0
583 /* Debug Interrupt */
590 * Data TLB exceptions will bail out to this point
591 * if they can't resolve the lightweight TLB fault.
594 NORMAL_EXCEPTION_PROLOG
595 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
597 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
598 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
602 * Both the instruction and data TLB miss get to this
603 * point to load the TLB.
605 * r11 - available to use
606 * r12 - Pointer to the 64-bit PTE
607 * r13 - available to use
608 * MMUCR - loaded with proper value when we get here
609 * Upon exit, we reload everything and RFI.
613 * We set execute, because we don't have the granularity to
614 * properly set this at the page level (Linux problem).
615 * If shared is set, we cause a zero PID->TID load.
616 * Many of these bits are software only. Bits we don't set
617 * here we (properly should) assume have the appropriate value.
620 /* Load the next available TLB index */
621 lis r13, tlb_44x_index@ha
622 lwz r13, tlb_44x_index@l(r13)
623 /* Load the TLB high watermark */
624 lis r11, tlb_44x_hwater@ha
625 lwz r11, tlb_44x_hwater@l(r11)
627 /* Increment, rollover, and store TLB index */
629 cmpw 0, r13, r11 /* reserve entries */
633 /* Store the next available TLB index */
634 lis r11, tlb_44x_index@ha
635 stw r13, tlb_44x_index@l(r11)
637 lwz r11, 0(r12) /* Get MS word of PTE */
638 lwz r12, 4(r12) /* Get LS word of PTE */
639 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
640 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
643 * Create PAGEID. This is the faulting address,
644 * page size, and valid flag.
646 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
647 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
648 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
650 li r10, PPC44x_TLB_SR@l /* Set SR */
651 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
652 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
653 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
654 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
655 and r11, r12, r11 /* HWEXEC & USER */
656 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
658 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
659 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
660 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
662 /* Done...restore registers and get out of here.
664 mfspr r11, SPRN_SPRG7R
666 mfspr r13, SPRN_SPRG5R
667 mfspr r12, SPRN_SPRG4R
668 mfspr r11, SPRN_SPRG1
669 mfspr r10, SPRN_SPRG0
670 rfi /* Force context change */
677 * extern void giveup_altivec(struct task_struct *prev)
679 * The 44x core does not have an AltiVec unit.
681 _GLOBAL(giveup_altivec)
685 * extern void giveup_fpu(struct task_struct *prev)
687 * The 44x core does not have an FPU.
693 * extern void abort(void)
695 * At present, this routine just applies a system reset.
699 oris r13,r13,DBCR0_RST_SYSTEM@h
704 #ifdef CONFIG_BDI_SWITCH
705 /* Context switch the PTE pointer for the Abatron BDI2000.
706 * The PGDIR is the second parameter.
708 lis r5, abatron_pteptrs@h
709 ori r5, r5, abatron_pteptrs@l
713 isync /* Force context change */
717 * We put a few things here that have to be page-aligned. This stuff
718 * goes at the beginning of the data segment, which is page-aligned.
722 _GLOBAL(empty_zero_page)
726 * To support >32-bit physical addresses, we use an 8KB pgdir.
728 _GLOBAL(swapper_pg_dir)
731 /* Reserved 4k for the critical exception stack & 4k for the machine
732 * check stack per CPU for kernel mode exceptions */
735 exception_stack_bottom:
736 .space BOOKE_EXCEPTION_STACK_SIZE
737 _GLOBAL(exception_stack_top)
740 * This space gets a copy of optional info passed to us by the bootstrap
741 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
747 * Room for two PTE pointers, usually the kernel and current user pointers
748 * to their respective root page table.