2 * Kernel execution entry point code.
4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 * Initial PowerPC version.
6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 * Low-level exception handers, MMU support, and rewrite.
10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 * PowerPC 8xx modifications.
12 * Copyright (c) 1998-1999 TiVo, Inc.
13 * PowerPC 403GCX modifications.
14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 * PowerPC 403GCX/405GP modifications.
16 * Copyright 2000 MontaVista Software Inc.
17 * PPC405 modifications
18 * PowerPC 403GCX/405GP modifications.
19 * Author: MontaVista Software, Inc.
20 * frank_rowand@mvista.com or source@mvista.com
21 * debbie_chu@mvista.com
22 * Copyright 2002-2004 MontaVista Software, Inc.
23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 * Copyright 2004 Freescale Semiconductor, Inc
25 * PowerPC e500 modifications, Kumar Gala <galak@kernel.crashing.org>
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
33 #include <linux/init.h>
34 #include <linux/threads.h>
35 #include <asm/processor.h>
38 #include <asm/pgtable.h>
39 #include <asm/cputable.h>
40 #include <asm/thread_info.h>
41 #include <asm/ppc_asm.h>
42 #include <asm/asm-offsets.h>
43 #include <asm/cache.h>
44 #include <asm/ptrace.h>
45 #include "head_booke.h"
47 /* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
62 * Reserve a word at a fixed location to store the address
67 /* Translate device tree address to physical, save in r30/r31 */
70 rlwinm r17,r17,16,0x3fff0000 /* turn PID into MAS6[SPID] */
71 rlwimi r17,r16,28,0x00000001 /* turn MSR[DS] into MAS6[SAS] */
74 tlbsx 0,r3 /* must succeed */
78 rlwinm r17,r16,25,0x1f /* r17 = log2(page size) */
80 slw r18,r18,r17 /* r18 = page size */
82 and r19,r3,r18 /* r19 = page offset */
83 andc r31,r20,r18 /* r31 = page base */
84 or r31,r31,r19 /* r31 = devtree phys addr */
87 li r25,0 /* phys kernel start (low) */
88 li r24,0 /* CPU number */
89 li r23,0 /* phys kernel start (high) */
91 /* We try to not make any assumptions about how the boot loader
92 * setup or used the TLBs. We invalidate all mappings from the
93 * boot loader and load a single entry in TLB1[0] to map the
94 * first 64M of kernel memory. Any boot info passed from the
95 * bootloader needs to live in this first 64M.
97 * Requirement on bootloader:
98 * - The page we're executing in needs to reside in TLB1 and
99 * have IPROT=1. If not an invalidate broadcast could
100 * evict the entry we're currently executing in.
102 * r3 = Index of TLB1 were executing in
103 * r4 = Current MSR[IS]
104 * r5 = Index of TLB1 temp mapping
106 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
110 _ENTRY(__early_start)
112 #define ENTRY_MAPPING_BOOT_SETUP
113 #include "fsl_booke_entry_mapping.S"
114 #undef ENTRY_MAPPING_BOOT_SETUP
116 /* Establish the interrupt vector offsets */
117 SET_IVOR(0, CriticalInput);
118 SET_IVOR(1, MachineCheck);
119 SET_IVOR(2, DataStorage);
120 SET_IVOR(3, InstructionStorage);
121 SET_IVOR(4, ExternalInput);
122 SET_IVOR(5, Alignment);
123 SET_IVOR(6, Program);
124 SET_IVOR(7, FloatingPointUnavailable);
125 SET_IVOR(8, SystemCall);
126 SET_IVOR(9, AuxillaryProcessorUnavailable);
127 SET_IVOR(10, Decrementer);
128 SET_IVOR(11, FixedIntervalTimer);
129 SET_IVOR(12, WatchdogTimer);
130 SET_IVOR(13, DataTLBError);
131 SET_IVOR(14, InstructionTLBError);
132 SET_IVOR(15, DebugCrit);
134 /* Establish the interrupt vector base */
135 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
138 /* Setup the defaults for TLB entries */
139 li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
141 oris r2,r2,MAS4_TLBSELD(1)@h
148 oris r2,r2,HID0_DOZE@h
152 #if !defined(CONFIG_BDI_SWITCH)
154 * The Abatron BDI JTAG debugger does not tolerate others
155 * mucking with the debug registers.
160 /* clear any residual debug events */
166 /* Check to see if we're the second processor, and jump
167 * to the secondary_start code if so
169 lis r24, boot_cpuid@h
170 ori r24, r24, boot_cpuid@l
174 bne __secondary_start
178 * This is where the main kernel code starts.
183 ori r2,r2,init_task@l
185 /* ptr to current thread */
186 addi r4,r2,THREAD /* init task's THREAD */
187 mtspr SPRN_SPRG_THREAD,r4
190 lis r1,init_thread_union@h
191 ori r1,r1,init_thread_union@l
193 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
195 CURRENT_THREAD_INFO(r22, r1)
200 #ifdef CONFIG_DYNAMIC_MEMSTART
201 lis r3,kernstart_addr@ha
202 la r3,kernstart_addr@l(r3)
203 #ifdef CONFIG_PHYS_64BIT
212 * Decide what sort of machine this is and initialize the MMU.
219 /* Setup PTE pointers for the Abatron bdiGDB */
220 lis r6, swapper_pg_dir@h
221 ori r6, r6, swapper_pg_dir@l
222 lis r5, abatron_pteptrs@h
223 ori r5, r5, abatron_pteptrs@l
225 ori r4, r4, KERNELBASE@l
226 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
230 lis r4,start_kernel@h
231 ori r4,r4,start_kernel@l
233 ori r3,r3,MSR_KERNEL@l
236 rfi /* change context and jump to start_kernel */
238 /* Macros to hide the PTE size differences
240 * FIND_PTE -- walks the page tables given EA & pgdir pointer
242 * r11 -- PGDIR pointer
244 * label 2: is the bailout case
246 * if we find the pte (fall through):
247 * r11 is low pte word
248 * r12 is pointer to the pte
249 * r10 is the pshift from the PGD, if we're a hugepage
251 #ifdef CONFIG_PTE_64BIT
252 #ifdef CONFIG_HUGETLB_PAGE
254 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
255 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
256 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
257 blt 1000f; /* Normal non-huge page */ \
258 beq 2f; /* Bail if no table */ \
259 oris r11, r11, PD_HUGE@h; /* Put back address bit */ \
260 andi. r10, r11, HUGEPD_SHIFT_MASK@l; /* extract size field */ \
261 xor r12, r10, r11; /* drop size bits from pointer */ \
263 1000: rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
264 li r10, 0; /* clear r10 */ \
265 1001: lwz r11, 4(r12); /* Get pte entry */
268 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
269 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
270 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
271 beq 2f; /* Bail if no table */ \
272 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
273 lwz r11, 4(r12); /* Get pte entry */
274 #endif /* HUGEPAGE */
275 #else /* !PTE_64BIT */
277 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
278 lwz r11, 0(r11); /* Get L1 entry */ \
279 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
280 beq 2f; /* Bail if no table */ \
281 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
282 lwz r11, 0(r12); /* Get Linux PTE */
286 * Interrupt vector entry code
288 * The Book E MMUs are always on so we don't need to handle
289 * interrupts in real mode as with previous PPC processors. In
290 * this case we handle interrupts in the kernel virtual address
293 * Interrupt vectors are dynamically placed relative to the
294 * interrupt prefix as determined by the address of interrupt_base.
295 * The interrupt vectors offsets are programmed using the labels
296 * for each interrupt vector entry.
298 * Interrupt vectors must be aligned on a 16 byte boundary.
299 * We align on a 32 byte cache line boundary for good measure.
303 /* Critical Input Interrupt */
304 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
306 /* Machine Check Interrupt */
308 /* no RFMCI, MCSRRs on E200 */
309 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
310 machine_check_exception)
312 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
315 /* Data Storage Interrupt */
316 START_EXCEPTION(DataStorage)
317 NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
318 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
320 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
321 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
323 EXC_XFER_LITE(0x0300, handle_page_fault)
325 addi r3,r1,STACK_FRAME_OVERHEAD
326 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
328 /* Instruction Storage Interrupt */
329 INSTRUCTION_STORAGE_EXCEPTION
331 /* External Input Interrupt */
332 EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
334 /* Alignment Interrupt */
337 /* Program Interrupt */
340 /* Floating Point Unavailable Interrupt */
341 #ifdef CONFIG_PPC_FPU
342 FP_UNAVAILABLE_EXCEPTION
345 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
346 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
347 program_check_exception, EXC_XFER_EE)
349 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
350 unknown_exception, EXC_XFER_EE)
354 /* System Call Interrupt */
355 START_EXCEPTION(SystemCall)
356 NORMAL_EXCEPTION_PROLOG(SYSCALL)
357 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
359 /* Auxiliary Processor Unavailable Interrupt */
360 EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
361 unknown_exception, EXC_XFER_EE)
363 /* Decrementer Interrupt */
364 DECREMENTER_EXCEPTION
366 /* Fixed Internal Timer Interrupt */
367 /* TODO: Add FIT support */
368 EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
369 unknown_exception, EXC_XFER_EE)
371 /* Watchdog Timer Interrupt */
372 #ifdef CONFIG_BOOKE_WDT
373 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
375 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
378 /* Data TLB Error Interrupt */
379 START_EXCEPTION(DataTLBError)
380 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
381 mfspr r10, SPRN_SPRG_THREAD
382 stw r11, THREAD_NORMSAVE(0)(r10)
383 #ifdef CONFIG_KVM_BOOKE_HV
386 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
388 stw r12, THREAD_NORMSAVE(1)(r10)
389 stw r13, THREAD_NORMSAVE(2)(r10)
391 stw r13, THREAD_NORMSAVE(3)(r10)
392 DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
393 mfspr r10, SPRN_DEAR /* Get faulting address */
395 /* If we are faulting a kernel address, we have to use the
396 * kernel page tables.
398 lis r11, PAGE_OFFSET@h
401 lis r11, swapper_pg_dir@h
402 ori r11, r11, swapper_pg_dir@l
404 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
405 rlwinm r12,r12,0,16,1
410 /* Get the PGD for the current thread */
412 mfspr r11,SPRN_SPRG_THREAD
416 /* Mask of required permission bits. Note that while we
417 * do copy ESR:ST to _PAGE_RW position as trying to write
418 * to an RO page is pretty common, we don't do it with
419 * _PAGE_DIRTY. We could do it, but it's a fairly rare
420 * event so I'd rather take the overhead when it happens
421 * rather than adding an instruction here. We should measure
422 * whether the whole thing is worth it in the first place
423 * as we could avoid loading SPRN_ESR completely in the first
426 * TODO: Is it worth doing that mfspr & rlwimi in the first
427 * place or can we save a couple of instructions here ?
430 #ifdef CONFIG_PTE_64BIT
432 oris r13,r13,_PAGE_ACCESSED@h
434 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
436 rlwimi r13,r12,11,29,29
439 andc. r13,r13,r11 /* Check permission */
441 #ifdef CONFIG_PTE_64BIT
443 subf r13,r11,r12 /* create false data dep */
444 lwzx r13,r11,r13 /* Get upper pte bits */
446 lwz r13,0(r12) /* Get upper pte bits */
450 bne 2f /* Bail if permission/valid mismach */
452 /* Jump to common tlb load */
455 /* The bailout. Restore registers to pre-exception conditions
456 * and call the heavyweights to help us out.
458 mfspr r10, SPRN_SPRG_THREAD
459 lwz r11, THREAD_NORMSAVE(3)(r10)
461 lwz r13, THREAD_NORMSAVE(2)(r10)
462 lwz r12, THREAD_NORMSAVE(1)(r10)
463 lwz r11, THREAD_NORMSAVE(0)(r10)
464 mfspr r10, SPRN_SPRG_RSCRATCH0
467 /* Instruction TLB Error Interrupt */
469 * Nearly the same as above, except we get our
470 * information from different registers and bailout
471 * to a different point.
473 START_EXCEPTION(InstructionTLBError)
474 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
475 mfspr r10, SPRN_SPRG_THREAD
476 stw r11, THREAD_NORMSAVE(0)(r10)
477 #ifdef CONFIG_KVM_BOOKE_HV
480 END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
482 stw r12, THREAD_NORMSAVE(1)(r10)
483 stw r13, THREAD_NORMSAVE(2)(r10)
485 stw r13, THREAD_NORMSAVE(3)(r10)
486 DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
487 mfspr r10, SPRN_SRR0 /* Get faulting address */
489 /* If we are faulting a kernel address, we have to use the
490 * kernel page tables.
492 lis r11, PAGE_OFFSET@h
495 lis r11, swapper_pg_dir@h
496 ori r11, r11, swapper_pg_dir@l
498 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
499 rlwinm r12,r12,0,16,1
502 /* Make up the required permissions for kernel code */
503 #ifdef CONFIG_PTE_64BIT
504 li r13,_PAGE_PRESENT | _PAGE_BAP_SX
505 oris r13,r13,_PAGE_ACCESSED@h
507 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
511 /* Get the PGD for the current thread */
513 mfspr r11,SPRN_SPRG_THREAD
516 /* Make up the required permissions for user code */
517 #ifdef CONFIG_PTE_64BIT
518 li r13,_PAGE_PRESENT | _PAGE_BAP_UX
519 oris r13,r13,_PAGE_ACCESSED@h
521 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
526 andc. r13,r13,r11 /* Check permission */
528 #ifdef CONFIG_PTE_64BIT
530 subf r13,r11,r12 /* create false data dep */
531 lwzx r13,r11,r13 /* Get upper pte bits */
533 lwz r13,0(r12) /* Get upper pte bits */
537 bne 2f /* Bail if permission mismach */
539 /* Jump to common TLB load point */
543 /* The bailout. Restore registers to pre-exception conditions
544 * and call the heavyweights to help us out.
546 mfspr r10, SPRN_SPRG_THREAD
547 lwz r11, THREAD_NORMSAVE(3)(r10)
549 lwz r13, THREAD_NORMSAVE(2)(r10)
550 lwz r12, THREAD_NORMSAVE(1)(r10)
551 lwz r11, THREAD_NORMSAVE(0)(r10)
552 mfspr r10, SPRN_SPRG_RSCRATCH0
556 /* SPE Unavailable */
557 START_EXCEPTION(SPEUnavailable)
558 NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
561 b fast_exception_return
562 1: addi r3,r1,STACK_FRAME_OVERHEAD
563 EXC_XFER_EE_LITE(0x2010, KernelSPE)
565 EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
566 unknown_exception, EXC_XFER_EE)
567 #endif /* CONFIG_SPE */
569 /* SPE Floating Point Data */
571 EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
572 SPEFloatingPointException, EXC_XFER_EE);
574 /* SPE Floating Point Round */
575 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
576 SPEFloatingPointRoundException, EXC_XFER_EE)
578 EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
579 unknown_exception, EXC_XFER_EE)
580 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
581 unknown_exception, EXC_XFER_EE)
582 #endif /* CONFIG_SPE */
584 /* Performance Monitor */
585 EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
586 performance_monitor_exception, EXC_XFER_STD)
588 EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
590 CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
591 CriticalDoorbell, unknown_exception)
593 /* Debug Interrupt */
594 DEBUG_DEBUG_EXCEPTION
597 GUEST_DOORBELL_EXCEPTION
599 CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
603 EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
605 /* Embedded Hypervisor Privilege */
606 EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
613 * Both the instruction and data TLB miss get to this
614 * point to load the TLB.
615 * r10 - tsize encoding (if HUGETLB_PAGE) or available to use
616 * r11 - TLB (info from Linux PTE)
617 * r12 - available to use
618 * r13 - upper bits of PTE (if PTE_64BIT) or available to use
619 * CR5 - results of addr >= PAGE_OFFSET
620 * MAS0, MAS1 - loaded with proper value when we get here
621 * MAS2, MAS3 - will need additional info from Linux PTE
622 * Upon exit, we reload everything and RFI.
625 #ifdef CONFIG_HUGETLB_PAGE
626 cmpwi 6, r10, 0 /* check for huge page */
627 beq 6, finish_tlb_load_cont /* !huge */
629 /* Alas, we need more scratch registers for hugepages */
630 mfspr r12, SPRN_SPRG_THREAD
631 stw r14, THREAD_NORMSAVE(4)(r12)
632 stw r15, THREAD_NORMSAVE(5)(r12)
633 stw r16, THREAD_NORMSAVE(6)(r12)
634 stw r17, THREAD_NORMSAVE(7)(r12)
636 /* Get the next_tlbcam_idx percpu var */
638 lwz r12, THREAD_INFO-THREAD(r12)
640 lis r14, __per_cpu_offset@h
641 ori r14, r14, __per_cpu_offset@l
642 rlwinm r15, r15, 2, 0, 29
647 lis r17, next_tlbcam_idx@h
648 ori r17, r17, next_tlbcam_idx@l
649 add r17, r17, r16 /* r17 = *next_tlbcam_idx */
650 lwz r15, 0(r17) /* r15 = next_tlbcam_idx */
652 lis r14, MAS0_TLBSEL(1)@h /* select TLB1 (TLBCAM) */
653 rlwimi r14, r15, 16, 4, 15 /* next_tlbcam_idx entry */
656 /* Extract TLB1CFG(NENTRY) */
657 mfspr r16, SPRN_TLB1CFG
658 andi. r16, r16, 0xfff
660 /* Update next_tlbcam_idx, wrapping when necessary */
664 lis r14, tlbcam_index@h
665 ori r14, r14, tlbcam_index@l
670 * Calc MAS1_TSIZE from r10 (which has pshift encoded)
671 * tlb_enc = (pshift - 10).
675 rlwimi r16, r15, 7, 20, 24
678 /* copy the pshift for use later */
683 #endif /* CONFIG_HUGETLB_PAGE */
686 * We set execute, because we don't have the granularity to
687 * properly set this at the page level (Linux problem).
688 * Many of these bits are software only. Bits we don't set
689 * here we (properly should) assume have the appropriate value.
691 finish_tlb_load_cont:
692 #ifdef CONFIG_PTE_64BIT
693 rlwinm r12, r11, 32-2, 26, 31 /* Move in perm bits */
694 andi. r10, r11, _PAGE_DIRTY
696 li r10, MAS3_SW | MAS3_UW
698 1: rlwimi r12, r13, 20, 0, 11 /* grab RPN[32:43] */
699 rlwimi r12, r11, 20, 12, 19 /* grab RPN[44:51] */
700 2: mtspr SPRN_MAS3, r12
701 BEGIN_MMU_FTR_SECTION
702 srwi r10, r13, 12 /* grab RPN[12:31] */
704 END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
706 li r10, (_PAGE_EXEC | _PAGE_PRESENT)
708 rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
710 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
714 rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
719 #ifdef CONFIG_PTE_64BIT
720 rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */
722 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
724 #ifdef CONFIG_HUGETLB_PAGE
725 beq 6, 3f /* don't mask if page isn't huge */
729 rlwinm r13, r13, 0, 0, 19 /* bottom bits used for WIMGE/etc */
730 andc r12, r12, r13 /* mask off ea bits within the page */
732 3: mtspr SPRN_MAS2, r12
735 /* Round robin TLB1 entries assignment */
738 /* Extract TLB1CFG(NENTRY) */
739 mfspr r11, SPRN_TLB1CFG
740 andi. r11, r11, 0xfff
742 /* Extract MAS0(NV) */
743 andi. r13, r12, 0xfff
748 /* check if we need to wrap */
751 /* wrap back to first free tlbcam entry */
752 lis r13, tlbcam_index@ha
753 lwz r13, tlbcam_index@l(r13)
754 rlwimi r12, r13, 0, 20, 31
757 #endif /* CONFIG_E200 */
762 /* Done...restore registers and get out of here. */
763 mfspr r10, SPRN_SPRG_THREAD
764 #ifdef CONFIG_HUGETLB_PAGE
765 beq 6, 8f /* skip restore for 4k page faults */
766 lwz r14, THREAD_NORMSAVE(4)(r10)
767 lwz r15, THREAD_NORMSAVE(5)(r10)
768 lwz r16, THREAD_NORMSAVE(6)(r10)
769 lwz r17, THREAD_NORMSAVE(7)(r10)
771 8: lwz r11, THREAD_NORMSAVE(3)(r10)
773 lwz r13, THREAD_NORMSAVE(2)(r10)
774 lwz r12, THREAD_NORMSAVE(1)(r10)
775 lwz r11, THREAD_NORMSAVE(0)(r10)
776 mfspr r10, SPRN_SPRG_RSCRATCH0
777 rfi /* Force context change */
780 /* Note that the SPE support is closely modeled after the AltiVec
781 * support. Changes to one are likely to be applicable to the
785 * Disable SPE for the task which had SPE previously,
786 * and save its SPE registers in its thread_struct.
787 * Enables SPE for use in the kernel on return.
788 * On SMP we know the SPE units are free, since we give it up every
793 mtmsr r5 /* enable use of SPE now */
796 * For SMP, we don't do lazy SPE switching because it just gets too
797 * horrendously complex, especially when a task switches from one CPU
798 * to another. Instead we call giveup_spe in switch_to.
801 lis r3,last_task_used_spe@ha
802 lwz r4,last_task_used_spe@l(r3)
805 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
806 SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
807 evxor evr10, evr10, evr10 /* clear out evr10 */
808 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
810 evstddx evr10, r4, r5 /* save off accumulator */
812 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
814 andc r4,r4,r10 /* disable SPE for previous task */
815 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
817 #endif /* !CONFIG_SMP */
818 /* enable use of SPE after return */
820 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
823 stw r4,THREAD_USED_SPE(r5)
826 REST_32EVRS(0,r10,r5,THREAD_EVR0)
829 stw r4,last_task_used_spe@l(r3)
830 #endif /* !CONFIG_SMP */
834 * SPE unavailable trap from kernel - print a message, but let
835 * the task use SPE in the kernel until it returns to user mode.
840 stw r3,_MSR(r1) /* enable use of SPE after return */
844 mr r4,r2 /* current */
850 87: .string "SPE used in kernel (task=%p, pc=%x) \n"
854 #endif /* CONFIG_SPE */
860 /* Adjust or setup IVORs for e200 */
861 _GLOBAL(__setup_e200_ivors)
864 li r3,SPEUnavailable@l
866 li r3,SPEFloatingPointData@l
868 li r3,SPEFloatingPointRound@l
873 /* Adjust or setup IVORs for e500v1/v2 */
874 _GLOBAL(__setup_e500_ivors)
877 li r3,SPEUnavailable@l
879 li r3,SPEFloatingPointData@l
881 li r3,SPEFloatingPointRound@l
883 li r3,PerformanceMonitor@l
888 /* Adjust or setup IVORs for e500mc */
889 _GLOBAL(__setup_e500mc_ivors)
892 li r3,PerformanceMonitor@l
896 li r3,CriticalDoorbell@l
900 * We only want to touch IVOR38-41 if we're running on hardware
901 * that supports category E.HV. The architectural way to determine
902 * this is MMUCFG[LPIDSIZE].
904 mfspr r3, SPRN_MMUCFG
905 andis. r3, r3, MMUCFG_LPIDSIZE@h
907 li r3,GuestDoorbell@l
909 li r3,CriticalGuestDoorbell@l
919 lwz r3, CPU_SPEC_FEATURES(r5)
920 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
921 stw r3, CPU_SPEC_FEATURES(r5)
926 * extern void giveup_spe(struct task_struct *prev)
932 mtmsr r5 /* enable use of SPE now */
935 beqlr- /* if no previous owner, done */
936 addi r3,r3,THREAD /* want THREAD of task */
939 SAVE_32EVRS(0, r4, r3, THREAD_EVR0)
940 evxor evr6, evr6, evr6 /* clear out evr6 */
941 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
943 evstddx evr6, r4, r3 /* save off accumulator */
945 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
947 andc r4,r4,r3 /* disable SPE for previous task */
948 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
952 lis r4,last_task_used_spe@ha
953 stw r5,last_task_used_spe@l(r4)
954 #endif /* !CONFIG_SMP */
956 #endif /* CONFIG_SPE */
959 * extern void giveup_fpu(struct task_struct *prev)
961 * Not all FSL Book-E cores have an FPU
963 #ifndef CONFIG_PPC_FPU
969 * extern void abort(void)
971 * At present, this routine just applies a system reset.
975 mtspr SPRN_DBCR0,r13 /* disable all debug events */
978 ori r13,r13,MSR_DE@l /* Enable Debug Events */
982 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
988 #ifdef CONFIG_BDI_SWITCH
989 /* Context switch the PTE pointer for the Abatron BDI2000.
990 * The PGDIR is the second parameter.
992 lis r5, abatron_pteptrs@h
993 ori r5, r5, abatron_pteptrs@l
997 isync /* Force context change */
1000 _GLOBAL(flush_dcache_L1)
1001 mfspr r3,SPRN_L1CFG0
1003 rlwinm r5,r3,9,3 /* Extract cache block size */
1004 twlgti r5,1 /* Only 32 and 64 byte cache blocks
1005 * are currently defined.
1008 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
1009 * log2(number of ways)
1011 slw r5,r4,r5 /* r5 = cache block size */
1013 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
1014 mulli r7,r7,13 /* An 8-way cache will require 13
1019 /* save off HID0 and set DCFA */
1021 ori r9,r8,HID0_DCFA@l
1028 1: lwz r3,0(r4) /* Load... */
1036 1: dcbf 0,r4 /* ...and flush. */
1047 /* When we get here, r24 needs to hold the CPU # */
1048 .globl __secondary_start
1050 lis r3,__secondary_hold_acknowledge@h
1051 ori r3,r3,__secondary_hold_acknowledge@l
1055 mr r4,r24 /* Why? */
1058 lis r3,tlbcam_index@ha
1059 lwz r3,tlbcam_index@l(r3)
1061 li r26,0 /* r26 safe? */
1063 /* Load each CAM entry */
1069 /* get current_thread_info and current */
1070 lis r1,secondary_ti@ha
1071 lwz r1,secondary_ti@l(r1)
1075 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1079 /* ptr to current thread */
1080 addi r4,r2,THREAD /* address of our thread_struct */
1081 mtspr SPRN_SPRG_THREAD,r4
1083 /* Setup the defaults for TLB entries */
1084 li r4,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
1087 /* Jump to start_secondary */
1089 ori r4,r4,MSR_KERNEL@l
1090 lis r3,start_secondary@h
1091 ori r3,r3,start_secondary@l
1098 .globl __secondary_hold_acknowledge
1099 __secondary_hold_acknowledge:
1104 * We put a few things here that have to be page-aligned. This stuff
1105 * goes at the beginning of the data segment, which is page-aligned.
1111 .globl empty_zero_page
1114 .globl swapper_pg_dir
1116 .space PGD_TABLE_SIZE
1119 * Room for two PTE pointers, usually the kernel and current user pointers
1120 * to their respective root page table.