2 * Kernel execution entry point code.
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
33 #include <linux/init.h>
34 #include <linux/threads.h>
35 #include <asm/processor.h>
38 #include <asm/pgtable.h>
39 #include <asm/cputable.h>
40 #include <asm/thread_info.h>
41 #include <asm/ppc_asm.h>
42 #include <asm/asm-offsets.h>
43 #include <asm/cache.h>
44 #include "head_booke.h"
46 /* As with the other PowerPC ports, it is expected that when code
47 * execution begins here, the following registers contain valid, yet
48 * optional, information:
50 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
51 * r4 - Starting address of the init RAM disk
52 * r5 - Ending address of the init RAM disk
53 * r6 - Start of kernel command line string (e.g. "mem=128")
54 * r7 - End of kernel command line string
61 * Reserve a word at a fixed location to store the address
66 * Save parameters we are passed
73 li r25,0 /* phys kernel start (low) */
74 li r24,0 /* CPU number */
75 li r23,0 /* phys kernel start (high) */
77 /* We try to not make any assumptions about how the boot loader
78 * setup or used the TLBs. We invalidate all mappings from the
79 * boot loader and load a single entry in TLB1[0] to map the
80 * first 64M of kernel memory. Any boot info passed from the
81 * bootloader needs to live in this first 64M.
83 * Requirement on bootloader:
84 * - The page we're executing in needs to reside in TLB1 and
85 * have IPROT=1. If not an invalidate broadcast could
86 * evict the entry we're currently executing in.
88 * r3 = Index of TLB1 were executing in
89 * r4 = Current MSR[IS]
90 * r5 = Index of TLB1 temp mapping
92 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
97 /* 1. Find the index of the entry we're executing in */
98 bl invstr /* Find our address */
99 invstr: mflr r6 /* Make it accessible */
101 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
106 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
108 andis. r7,r7,MAS1_VALID@h
112 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
114 bne match_TLB /* skip if NPIDS != 3 */
120 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
122 andis. r7,r7,MAS1_VALID@h
128 tlbsx 0,r6 /* Fall through, we had to match */
132 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
134 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
135 oris r7,r7,MAS1_IPROT@h
139 /* 2. Invalidate all entries except the entry we're executing in */
140 mfspr r9,SPRN_TLB1CFG
142 li r6,0 /* Set Entry counter to 0 */
143 1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
144 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
148 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
150 beq skpinv /* Dont update the current execution TLB */
154 skpinv: addi r6,r6,1 /* Increment */
155 cmpw r6,r9 /* Are we done? */
156 bne 1b /* If not, repeat */
158 /* Invalidate TLB0 */
162 /* Invalidate TLB1 */
167 /* 3. Setup a temp mapping and jump to it */
168 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
170 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
171 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
175 /* grab and fixup the RPN */
176 mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
177 rlwinm r6,r6,25,27,31
180 slw r6,r8,r6 /* convert to mask */
182 bl 1f /* Find our address */
186 #ifdef CONFIG_PHYS_64BIT
194 ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
196 /* Just modify the entry ID and EPN for the temp mapping */
197 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
198 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
200 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
202 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
203 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
206 li r7,0 /* temp EPN = 0 */
213 slwi r6,r6,5 /* setup new context with other address space */
214 bl 1f /* Find our address */
222 /* 4. Clear out PIDs & Search info */
228 rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
230 bne 2f /* skip if NPIDS != 3 */
235 /* 5. Invalidate mapping we started in */
237 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
238 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
242 rlwinm r6,r6,0,2,0 /* clear IPROT */
245 /* Invalidate TLB1 */
250 /* The mapping only needs to be cache-coherent on SMP */
252 #define M_IF_SMP MAS2_M
257 /* 6. Setup KERNELBASE mapping in TLB1[0] */
258 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
260 lis r6,(MAS1_VALID|MAS1_IPROT)@h
261 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
263 lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
264 ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
269 /* 7. Jump to KERNELBASE mapping */
270 lis r6,(KERNELBASE & ~0xfff)@h
271 ori r6,r6,(KERNELBASE & ~0xfff)@l
273 ori r7,r7,MSR_KERNEL@l
274 bl 1f /* Find our address */
280 rfi /* start execution out of TLB1[0] entry */
282 /* 8. Clear out the temp mapping */
283 2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
284 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
288 rlwinm r8,r8,0,2,0 /* clear IPROT */
291 /* Invalidate TLB1 */
296 /* Establish the interrupt vector offsets */
297 SET_IVOR(0, CriticalInput);
298 SET_IVOR(1, MachineCheck);
299 SET_IVOR(2, DataStorage);
300 SET_IVOR(3, InstructionStorage);
301 SET_IVOR(4, ExternalInput);
302 SET_IVOR(5, Alignment);
303 SET_IVOR(6, Program);
304 SET_IVOR(7, FloatingPointUnavailable);
305 SET_IVOR(8, SystemCall);
306 SET_IVOR(9, AuxillaryProcessorUnavailable);
307 SET_IVOR(10, Decrementer);
308 SET_IVOR(11, FixedIntervalTimer);
309 SET_IVOR(12, WatchdogTimer);
310 SET_IVOR(13, DataTLBError);
311 SET_IVOR(14, InstructionTLBError);
312 SET_IVOR(15, DebugCrit);
314 /* Establish the interrupt vector base */
315 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
318 /* Setup the defaults for TLB entries */
319 li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
321 oris r2,r2,MAS4_TLBSELD(1)@h
328 oris r2,r2,HID0_DOZE@h
332 #if !defined(CONFIG_BDI_SWITCH)
334 * The Abatron BDI JTAG debugger does not tolerate others
335 * mucking with the debug registers.
340 /* clear any residual debug events */
346 /* Check to see if we're the second processor, and jump
347 * to the secondary_start code if so
351 bne __secondary_start
355 * This is where the main kernel code starts.
360 ori r2,r2,init_task@l
362 /* ptr to current thread */
363 addi r4,r2,THREAD /* init task's THREAD */
364 mtspr SPRN_SPRG_THREAD,r4
367 lis r1,init_thread_union@h
368 ori r1,r1,init_thread_union@l
370 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
374 #ifdef CONFIG_RELOCATABLE
375 lis r3,kernstart_addr@ha
376 la r3,kernstart_addr@l(r3)
377 #ifdef CONFIG_PHYS_64BIT
386 * Decide what sort of machine this is and initialize the MMU.
396 /* Setup PTE pointers for the Abatron bdiGDB */
397 lis r6, swapper_pg_dir@h
398 ori r6, r6, swapper_pg_dir@l
399 lis r5, abatron_pteptrs@h
400 ori r5, r5, abatron_pteptrs@l
402 ori r4, r4, KERNELBASE@l
403 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
407 lis r4,start_kernel@h
408 ori r4,r4,start_kernel@l
410 ori r3,r3,MSR_KERNEL@l
413 rfi /* change context and jump to start_kernel */
415 /* Macros to hide the PTE size differences
417 * FIND_PTE -- walks the page tables given EA & pgdir pointer
419 * r11 -- PGDIR pointer
421 * label 2: is the bailout case
423 * if we find the pte (fall through):
424 * r11 is low pte word
425 * r12 is pointer to the pte
427 #ifdef CONFIG_PTE_64BIT
429 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
430 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
431 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
432 beq 2f; /* Bail if no table */ \
433 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
434 lwz r11, 4(r12); /* Get pte entry */
437 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
438 lwz r11, 0(r11); /* Get L1 entry */ \
439 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
440 beq 2f; /* Bail if no table */ \
441 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
442 lwz r11, 0(r12); /* Get Linux PTE */
446 * Interrupt vector entry code
448 * The Book E MMUs are always on so we don't need to handle
449 * interrupts in real mode as with previous PPC processors. In
450 * this case we handle interrupts in the kernel virtual address
453 * Interrupt vectors are dynamically placed relative to the
454 * interrupt prefix as determined by the address of interrupt_base.
455 * The interrupt vectors offsets are programmed using the labels
456 * for each interrupt vector entry.
458 * Interrupt vectors must be aligned on a 16 byte boundary.
459 * We align on a 32 byte cache line boundary for good measure.
463 /* Critical Input Interrupt */
464 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
466 /* Machine Check Interrupt */
468 /* no RFMCI, MCSRRs on E200 */
469 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
471 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
474 /* Data Storage Interrupt */
475 START_EXCEPTION(DataStorage)
476 NORMAL_EXCEPTION_PROLOG
477 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
479 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
480 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
482 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
484 addi r3,r1,STACK_FRAME_OVERHEAD
485 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
487 /* Instruction Storage Interrupt */
488 INSTRUCTION_STORAGE_EXCEPTION
490 /* External Input Interrupt */
491 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
493 /* Alignment Interrupt */
496 /* Program Interrupt */
499 /* Floating Point Unavailable Interrupt */
500 #ifdef CONFIG_PPC_FPU
501 FP_UNAVAILABLE_EXCEPTION
504 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
505 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
507 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
511 /* System Call Interrupt */
512 START_EXCEPTION(SystemCall)
513 NORMAL_EXCEPTION_PROLOG
514 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
516 /* Auxillary Processor Unavailable Interrupt */
517 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
519 /* Decrementer Interrupt */
520 DECREMENTER_EXCEPTION
522 /* Fixed Internal Timer Interrupt */
523 /* TODO: Add FIT support */
524 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
526 /* Watchdog Timer Interrupt */
527 #ifdef CONFIG_BOOKE_WDT
528 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
530 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
533 /* Data TLB Error Interrupt */
534 START_EXCEPTION(DataTLBError)
535 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
536 mtspr SPRN_SPRG_WSCRATCH1, r11
537 mtspr SPRN_SPRG_WSCRATCH2, r12
538 mtspr SPRN_SPRG_WSCRATCH3, r13
540 mtspr SPRN_SPRG_WSCRATCH4, r11
541 mfspr r10, SPRN_DEAR /* Get faulting address */
543 /* If we are faulting a kernel address, we have to use the
544 * kernel page tables.
546 lis r11, PAGE_OFFSET@h
549 lis r11, swapper_pg_dir@h
550 ori r11, r11, swapper_pg_dir@l
552 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
553 rlwinm r12,r12,0,16,1
558 /* Get the PGD for the current thread */
560 mfspr r11,SPRN_SPRG_THREAD
564 /* Mask of required permission bits. Note that while we
565 * do copy ESR:ST to _PAGE_RW position as trying to write
566 * to an RO page is pretty common, we don't do it with
567 * _PAGE_DIRTY. We could do it, but it's a fairly rare
568 * event so I'd rather take the overhead when it happens
569 * rather than adding an instruction here. We should measure
570 * whether the whole thing is worth it in the first place
571 * as we could avoid loading SPRN_ESR completely in the first
574 * TODO: Is it worth doing that mfspr & rlwimi in the first
575 * place or can we save a couple of instructions here ?
578 #ifdef CONFIG_PTE_64BIT
580 oris r13,r13,_PAGE_ACCESSED@h
582 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
584 rlwimi r13,r12,11,29,29
587 andc. r13,r13,r11 /* Check permission */
589 #ifdef CONFIG_PTE_64BIT
591 subf r10,r11,r12 /* create false data dep */
592 lwzx r13,r11,r10 /* Get upper pte bits */
594 lwz r13,0(r12) /* Get upper pte bits */
598 bne 2f /* Bail if permission/valid mismach */
600 /* Jump to common tlb load */
603 /* The bailout. Restore registers to pre-exception conditions
604 * and call the heavyweights to help us out.
606 mfspr r11, SPRN_SPRG_RSCRATCH4
608 mfspr r13, SPRN_SPRG_RSCRATCH3
609 mfspr r12, SPRN_SPRG_RSCRATCH2
610 mfspr r11, SPRN_SPRG_RSCRATCH1
611 mfspr r10, SPRN_SPRG_RSCRATCH0
614 /* Instruction TLB Error Interrupt */
616 * Nearly the same as above, except we get our
617 * information from different registers and bailout
618 * to a different point.
620 START_EXCEPTION(InstructionTLBError)
621 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
622 mtspr SPRN_SPRG_WSCRATCH1, r11
623 mtspr SPRN_SPRG_WSCRATCH2, r12
624 mtspr SPRN_SPRG_WSCRATCH3, r13
626 mtspr SPRN_SPRG_WSCRATCH4, r11
627 mfspr r10, SPRN_SRR0 /* Get faulting address */
629 /* If we are faulting a kernel address, we have to use the
630 * kernel page tables.
632 lis r11, PAGE_OFFSET@h
635 lis r11, swapper_pg_dir@h
636 ori r11, r11, swapper_pg_dir@l
638 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
639 rlwinm r12,r12,0,16,1
644 /* Get the PGD for the current thread */
646 mfspr r11,SPRN_SPRG_THREAD
650 /* Make up the required permissions */
651 #ifdef CONFIG_PTE_64BIT
652 li r13,_PAGE_PRESENT | _PAGE_EXEC
653 oris r13,r13,_PAGE_ACCESSED@h
655 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
659 andc. r13,r13,r11 /* Check permission */
661 #ifdef CONFIG_PTE_64BIT
663 subf r10,r11,r12 /* create false data dep */
664 lwzx r13,r11,r10 /* Get upper pte bits */
666 lwz r13,0(r12) /* Get upper pte bits */
670 bne 2f /* Bail if permission mismach */
672 /* Jump to common TLB load point */
676 /* The bailout. Restore registers to pre-exception conditions
677 * and call the heavyweights to help us out.
679 mfspr r11, SPRN_SPRG_RSCRATCH4
681 mfspr r13, SPRN_SPRG_RSCRATCH3
682 mfspr r12, SPRN_SPRG_RSCRATCH2
683 mfspr r11, SPRN_SPRG_RSCRATCH1
684 mfspr r10, SPRN_SPRG_RSCRATCH0
688 /* SPE Unavailable */
689 START_EXCEPTION(SPEUnavailable)
690 NORMAL_EXCEPTION_PROLOG
692 addi r3,r1,STACK_FRAME_OVERHEAD
693 EXC_XFER_EE_LITE(0x2010, KernelSPE)
695 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
696 #endif /* CONFIG_SPE */
698 /* SPE Floating Point Data */
700 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
702 /* SPE Floating Point Round */
703 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
705 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
706 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
707 #endif /* CONFIG_SPE */
709 /* Performance Monitor */
710 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
712 EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD)
714 CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception)
716 /* Debug Interrupt */
717 DEBUG_DEBUG_EXCEPTION
725 * Both the instruction and data TLB miss get to this
726 * point to load the TLB.
727 * r10 - available to use
728 * r11 - TLB (info from Linux PTE)
729 * r12 - available to use
730 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
731 * CR5 - results of addr >= PAGE_OFFSET
732 * MAS0, MAS1 - loaded with proper value when we get here
733 * MAS2, MAS3 - will need additional info from Linux PTE
734 * Upon exit, we reload everything and RFI.
738 * We set execute, because we don't have the granularity to
739 * properly set this at the page level (Linux problem).
740 * Many of these bits are software only. Bits we don't set
741 * here we (properly should) assume have the appropriate value.
745 #ifdef CONFIG_PTE_64BIT
746 rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
748 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
755 #ifdef CONFIG_PTE_64BIT
756 rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
757 andi. r10, r11, _PAGE_DIRTY
759 li r10, MAS3_SW | MAS3_UW
761 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
762 rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
764 BEGIN_MMU_FTR_SECTION
765 srwi r10, r13, 12 /* grab RPN[12:31] */
767 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
769 li r10, (_PAGE_EXEC | _PAGE_PRESENT)
770 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
772 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
776 rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
780 /* Round robin TLB1 entries assignment */
783 /* Extract TLB1CFG(NENTRY) */
784 mfspr r11, SPRN_TLB1CFG
785 andi. r11, r11, 0xfff
787 /* Extract MAS0(NV) */
788 andi. r13, r12, 0xfff
793 /* check if we need to wrap */
796 /* wrap back to first free tlbcam entry */
797 lis r13, tlbcam_index@ha
798 lwz r13, tlbcam_index@l(r13)
799 rlwimi r12, r13, 0, 20, 31
802 #endif /* CONFIG_E200 */
806 /* Done...restore registers and get out of here. */
807 mfspr r11, SPRN_SPRG_RSCRATCH4
809 mfspr r13, SPRN_SPRG_RSCRATCH3
810 mfspr r12, SPRN_SPRG_RSCRATCH2
811 mfspr r11, SPRN_SPRG_RSCRATCH1
812 mfspr r10, SPRN_SPRG_RSCRATCH0
813 rfi /* Force context change */
816 /* Note that the SPE support is closely modeled after the AltiVec
817 * support. Changes to one are likely to be applicable to the
821 * Disable SPE for the task which had SPE previously,
822 * and save its SPE registers in its thread_struct.
823 * Enables SPE for use in the kernel on return.
824 * On SMP we know the SPE units are free, since we give it up every
829 mtmsr r5 /* enable use of SPE now */
832 * For SMP, we don't do lazy SPE switching because it just gets too
833 * horrendously complex, especially when a task switches from one CPU
834 * to another. Instead we call giveup_spe in switch_to.
837 lis r3,last_task_used_spe@ha
838 lwz r4,last_task_used_spe@l(r3)
841 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
842 SAVE_32EVRS(0,r10,r4)
843 evxor evr10, evr10, evr10 /* clear out evr10 */
844 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
846 evstddx evr10, r4, r5 /* save off accumulator */
848 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
850 andc r4,r4,r10 /* disable SPE for previous task */
851 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
853 #endif /* !CONFIG_SMP */
854 /* enable use of SPE after return */
856 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
859 stw r4,THREAD_USED_SPE(r5)
862 REST_32EVRS(0,r10,r5)
865 stw r4,last_task_used_spe@l(r3)
866 #endif /* !CONFIG_SMP */
867 /* restore registers and return */
868 2: REST_4GPRS(3, r11)
883 * SPE unavailable trap from kernel - print a message, but let
884 * the task use SPE in the kernel until it returns to user mode.
889 stw r3,_MSR(r1) /* enable use of SPE after return */
892 mr r4,r2 /* current */
896 87: .string "SPE used in kernel (task=%p, pc=%x) \n"
899 #endif /* CONFIG_SPE */
905 /* Adjust or setup IVORs for e200 */
906 _GLOBAL(__setup_e200_ivors)
909 li r3,SPEUnavailable@l
911 li r3,SPEFloatingPointData@l
913 li r3,SPEFloatingPointRound@l
918 /* Adjust or setup IVORs for e500v1/v2 */
919 _GLOBAL(__setup_e500_ivors)
922 li r3,SPEUnavailable@l
924 li r3,SPEFloatingPointData@l
926 li r3,SPEFloatingPointRound@l
928 li r3,PerformanceMonitor@l
933 /* Adjust or setup IVORs for e500mc */
934 _GLOBAL(__setup_e500mc_ivors)
937 li r3,PerformanceMonitor@l
941 li r3,CriticalDoorbell@l
947 * extern void loadcam_entry(unsigned int index)
949 * Load TLBCAM[index] entry in to the L2 CAM MMU
951 _GLOBAL(loadcam_entry)
954 mulli r5,r3,TLBCAM_SIZE
969 * extern void giveup_altivec(struct task_struct *prev)
971 * The e500 core does not have an AltiVec unit.
973 _GLOBAL(giveup_altivec)
978 * extern void giveup_spe(struct task_struct *prev)
984 mtmsr r5 /* enable use of SPE now */
987 beqlr- /* if no previous owner, done */
988 addi r3,r3,THREAD /* want THREAD of task */
991 SAVE_32EVRS(0, r4, r3)
992 evxor evr6, evr6, evr6 /* clear out evr6 */
993 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
995 evstddx evr6, r4, r3 /* save off accumulator */
996 mfspr r6,SPRN_SPEFSCR
997 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
999 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1001 andc r4,r4,r3 /* disable SPE for previous task */
1002 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1006 lis r4,last_task_used_spe@ha
1007 stw r5,last_task_used_spe@l(r4)
1008 #endif /* !CONFIG_SMP */
1010 #endif /* CONFIG_SPE */
1013 * extern void giveup_fpu(struct task_struct *prev)
1015 * Not all FSL Book-E cores have an FPU
1017 #ifndef CONFIG_PPC_FPU
1023 * extern void abort(void)
1025 * At present, this routine just applies a system reset.
1029 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1032 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1035 mfspr r13,SPRN_DBCR0
1036 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1037 mtspr SPRN_DBCR0,r13
1040 _GLOBAL(set_context)
1042 #ifdef CONFIG_BDI_SWITCH
1043 /* Context switch the PTE pointer for the Abatron BDI2000.
1044 * The PGDIR is the second parameter.
1046 lis r5, abatron_pteptrs@h
1047 ori r5, r5, abatron_pteptrs@l
1051 isync /* Force context change */
1054 _GLOBAL(flush_dcache_L1)
1055 mfspr r3,SPRN_L1CFG0
1057 rlwinm r5,r3,9,3 /* Extract cache block size */
1058 twlgti r5,1 /* Only 32 and 64 byte cache blocks
1059 * are currently defined.
1062 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
1063 * log2(number of ways)
1065 slw r5,r4,r5 /* r5 = cache block size */
1067 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
1068 mulli r7,r7,13 /* An 8-way cache will require 13
1073 /* save off HID0 and set DCFA */
1075 ori r9,r8,HID0_DCFA@l
1082 1: lwz r3,0(r4) /* Load... */
1090 1: dcbf 0,r4 /* ...and flush. */
1101 /* When we get here, r24 needs to hold the CPU # */
1102 .globl __secondary_start
1104 lis r3,__secondary_hold_acknowledge@h
1105 ori r3,r3,__secondary_hold_acknowledge@l
1109 mr r4,r24 /* Why? */
1112 lis r3,tlbcam_index@ha
1113 lwz r3,tlbcam_index@l(r3)
1115 li r26,0 /* r26 safe? */
1117 /* Load each CAM entry */
1123 /* get current_thread_info and current */
1124 lis r1,secondary_ti@ha
1125 lwz r1,secondary_ti@l(r1)
1129 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1133 /* ptr to current thread */
1134 addi r4,r2,THREAD /* address of our thread_struct */
1135 mtspr SPRN_SPRG_THREAD,r4
1137 /* Setup the defaults for TLB entries */
1138 li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1141 /* Jump to start_secondary */
1143 ori r4,r4,MSR_KERNEL@l
1144 lis r3,start_secondary@h
1145 ori r3,r3,start_secondary@l
1152 .globl __secondary_hold_acknowledge
1153 __secondary_hold_acknowledge:
1158 * We put a few things here that have to be page-aligned. This stuff
1159 * goes at the beginning of the data segment, which is page-aligned.
1165 .globl empty_zero_page
1168 .globl swapper_pg_dir
1170 .space PGD_TABLE_SIZE
1173 * Room for two PTE pointers, usually the kernel and current user pointers
1174 * to their respective root page table.