2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
11 * PPC44x port. Copyright (C) 2011, IBM Corporation
12 * Author: Suzuki Poulose <suzuki@in.ibm.com>
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/sys.h>
22 #include <asm/unistd.h>
23 #include <asm/errno.h>
26 #include <asm/cache.h>
27 #include <asm/cputable.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/thread_info.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/processor.h>
33 #include <asm/kexec.h>
35 #include <asm/ptrace.h>
40 * We store the saved ksp_limit in the unused part
41 * of the STACK_FRAME_OVERHEAD
43 _GLOBAL(call_do_softirq)
46 lwz r10,THREAD+KSP_LIMIT(r2)
47 addi r11,r3,THREAD_INFO_GAP
48 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
51 stw r11,THREAD+KSP_LIMIT(r2)
56 stw r10,THREAD+KSP_LIMIT(r2)
61 * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
66 lwz r10,THREAD+KSP_LIMIT(r2)
67 addi r11,r4,THREAD_INFO_GAP
68 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
71 stw r11,THREAD+KSP_LIMIT(r2)
76 stw r10,THREAD+KSP_LIMIT(r2)
81 * This returns the high 64 bits of the product of two 64-bit numbers.
93 1: beqlr cr1 /* all done if high part of A is 0 */
108 * sub_reloc_offset(x) returns x - reloc_offset().
110 _GLOBAL(sub_reloc_offset)
122 * reloc_got2 runs through the .got2 section adding an offset
127 lis r7,__got2_start@ha
128 addi r7,r7,__got2_start@l
130 addi r8,r8,__got2_end@l
150 * call_setup_cpu - call the setup_cpu function for this cpu
151 * r3 = data offset, r24 = cpu number
153 * Setup function is called with:
155 * r4 = ptr to CPU spec (relocated)
157 _GLOBAL(call_setup_cpu)
158 addis r4,r3,cur_cpu_spec@ha
159 addi r4,r4,cur_cpu_spec@l
162 lwz r5,CPU_SPEC_SETUP(r4)
169 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
171 /* This gets called by via-pmu.c to switch the PLL selection
172 * on 750fx CPU. This function should really be moved to some
173 * other place (as most of the cpufreq code in via-pmu
175 _GLOBAL(low_choose_750fx_pll)
181 /* If switching to PLL1, disable HID0:BTIC */
192 /* Calc new HID1 value */
193 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
194 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
195 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
199 /* Store new HID1 image */
200 CURRENT_THREAD_INFO(r6, r1)
203 addis r6,r6,nap_save_hid1@ha
204 stw r4,nap_save_hid1@l(r6)
206 /* If switching to PLL0, enable HID0:BTIC */
221 _GLOBAL(low_choose_7447a_dfs)
227 /* Calc new HID1 value */
229 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
239 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
242 * complement mask on the msr then "or" some values on.
243 * _nmask_and_or_msr(nmask, value_to_or)
245 _GLOBAL(_nmask_and_or_msr)
246 mfmsr r0 /* Get current msr */
247 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
248 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
249 SYNC /* Some chip revs have problems here... */
250 mtmsr r0 /* Update machine state */
257 * Do an IO access in real mode
275 * Do an IO access in real mode
292 #endif /* CONFIG_40x */
296 * Flush instruction cache.
297 * This is a no-op on the 601.
299 _GLOBAL(flush_instruction_cache)
300 #if defined(CONFIG_8xx)
303 mtspr SPRN_IC_CST, r5
304 #elif defined(CONFIG_4xx)
316 #elif CONFIG_FSL_BOOKE
319 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
320 /* msync; isync recommended here */
324 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
326 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
330 rlwinm r3,r3,16,16,31
332 beqlr /* for 601, do nothing */
333 /* 603/604 processor - use invalidate-all bit in HID0 */
337 #endif /* CONFIG_8xx/4xx */
342 * Write any modified data cache blocks out to memory
343 * and invalidate the corresponding instruction cache blocks.
344 * This is a no-op on the 601.
346 * flush_icache_range(unsigned long start, unsigned long stop)
348 _KPROBE(flush_icache_range)
351 blr /* for 601, do nothing */
352 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
353 li r5,L1_CACHE_BYTES-1
357 srwi. r4,r4,L1_CACHE_SHIFT
362 addi r3,r3,L1_CACHE_BYTES
364 sync /* wait for dcbst's to get to ram */
368 addi r6,r6,L1_CACHE_BYTES
371 /* Flash invalidate on 44x because we are passed kmapped addresses and
372 this doesn't work for userspace pages due to the virtually tagged
376 sync /* additional sync needed on g4 */
380 * Write any modified data cache blocks out to memory.
381 * Does not invalidate the corresponding cache lines (especially for
382 * any corresponding instruction cache).
384 * clean_dcache_range(unsigned long start, unsigned long stop)
386 _GLOBAL(clean_dcache_range)
387 li r5,L1_CACHE_BYTES-1
391 srwi. r4,r4,L1_CACHE_SHIFT
396 addi r3,r3,L1_CACHE_BYTES
398 sync /* wait for dcbst's to get to ram */
402 * Write any modified data cache blocks out to memory and invalidate them.
403 * Does not invalidate the corresponding instruction cache blocks.
405 * flush_dcache_range(unsigned long start, unsigned long stop)
407 _GLOBAL(flush_dcache_range)
408 li r5,L1_CACHE_BYTES-1
412 srwi. r4,r4,L1_CACHE_SHIFT
417 addi r3,r3,L1_CACHE_BYTES
419 sync /* wait for dcbst's to get to ram */
423 * Like above, but invalidate the D-cache. This is used by the 8xx
424 * to invalidate the cache so the PPC core doesn't get stale data
425 * from the CPM (no cache snooping here :-).
427 * invalidate_dcache_range(unsigned long start, unsigned long stop)
429 _GLOBAL(invalidate_dcache_range)
430 li r5,L1_CACHE_BYTES-1
434 srwi. r4,r4,L1_CACHE_SHIFT
439 addi r3,r3,L1_CACHE_BYTES
441 sync /* wait for dcbi's to get to ram */
445 * Flush a particular page from the data cache to RAM.
446 * Note: this is necessary because the instruction cache does *not*
447 * snoop from the data cache.
448 * This is a no-op on the 601 which has a unified cache.
450 * void __flush_dcache_icache(void *page)
452 _GLOBAL(__flush_dcache_icache)
455 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
456 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
457 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
460 0: dcbst 0,r3 /* Write line to ram */
461 addi r3,r3,L1_CACHE_BYTES
465 /* We don't flush the icache on 44x. Those have a virtual icache
466 * and we don't have access to the virtual address here (it's
467 * not the page vaddr but where it's mapped in user space). The
468 * flushing of the icache on these is handled elsewhere, when
469 * a change in the address space occurs, before returning to
472 BEGIN_MMU_FTR_SECTION
474 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
475 #endif /* CONFIG_44x */
478 addi r6,r6,L1_CACHE_BYTES
486 * Flush a particular page from the data cache to RAM, identified
487 * by its physical address. We turn off the MMU so we can just use
488 * the physical address (this may be a highmem page without a kernel
491 * void __flush_dcache_icache_phys(unsigned long physaddr)
493 _GLOBAL(__flush_dcache_icache_phys)
495 blr /* for 601, do nothing */
496 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
498 rlwinm r0,r10,0,28,26 /* clear DR */
501 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
502 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
505 0: dcbst 0,r3 /* Write line to ram */
506 addi r3,r3,L1_CACHE_BYTES
511 addi r6,r6,L1_CACHE_BYTES
514 mtmsr r10 /* restore DR */
517 #endif /* CONFIG_BOOKE */
520 * Clear pages using the dcbz instruction, which doesn't cause any
521 * memory traffic (except to write out any cache lines which get
522 * displaced). This only works on cacheable memory.
524 * void clear_pages(void *page, int order) ;
527 li r0,PAGE_SIZE/L1_CACHE_BYTES
531 addi r3,r3,L1_CACHE_BYTES
536 * Copy a whole page. We use the dcbz instruction on the destination
537 * to reduce memory traffic (it eliminates the unnecessary reads of
538 * the destination into cache). This requires that the destination
541 #define COPY_16_BYTES \
557 #if MAX_COPY_PREFETCH > 1
558 li r0,MAX_COPY_PREFETCH
562 addi r11,r11,L1_CACHE_BYTES
564 #else /* MAX_COPY_PREFETCH == 1 */
566 li r11,L1_CACHE_BYTES+4
567 #endif /* MAX_COPY_PREFETCH */
568 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
576 #if L1_CACHE_BYTES >= 32
578 #if L1_CACHE_BYTES >= 64
581 #if L1_CACHE_BYTES >= 128
591 crnot 4*cr0+eq,4*cr0+eq
592 li r0,MAX_COPY_PREFETCH
597 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
598 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
600 _GLOBAL(atomic_clear_mask)
607 _GLOBAL(atomic_set_mask)
616 * Extended precision shifts.
618 * Updated to be valid for shift counts from 0 to 63 inclusive.
621 * R3/R4 has 64 bit value
625 * ashrdi3: arithmetic right shift (sign propagation)
626 * lshrdi3: logical right shift
627 * ashldi3: left shift
631 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
632 addi r7,r5,32 # could be xori, or addi with -32
633 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
634 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
635 sraw r7,r3,r7 # t2 = MSW >> (count-32)
636 or r4,r4,r6 # LSW |= t1
637 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
638 sraw r3,r3,r5 # MSW = MSW >> count
639 or r4,r4,r7 # LSW |= t2
644 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
645 addi r7,r5,32 # could be xori, or addi with -32
646 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
647 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
648 or r3,r3,r6 # MSW |= t1
649 slw r4,r4,r5 # LSW = LSW << count
650 or r3,r3,r7 # MSW |= t2
655 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
656 addi r7,r5,32 # could be xori, or addi with -32
657 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
658 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
659 or r4,r4,r6 # LSW |= t1
660 srw r3,r3,r5 # MSW = MSW >> count
661 or r4,r4,r7 # LSW |= t2
665 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
666 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
684 rlwimi r9,r4,24,16,23
685 rlwimi r10,r3,24,16,23
697 _GLOBAL(start_secondary_resume)
699 CURRENT_THREAD_INFO(r1, r1)
700 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
702 stw r3,0(r1) /* Zero the stack frame pointer */
705 #endif /* CONFIG_SMP */
708 * This routine is just here to keep GCC happy - sigh...
715 * Must be relocatable PIC code callable as a C function.
717 .globl relocate_new_kernel
720 /* r4 = reboot_code_buffer */
721 /* r5 = start_address */
723 #ifdef CONFIG_FSL_BOOKE
729 #define ENTRY_MAPPING_KEXEC_SETUP
730 #include "fsl_booke_entry_mapping.S"
731 #undef ENTRY_MAPPING_KEXEC_SETUP
738 #elif defined(CONFIG_44x)
740 /* Save our parameters */
745 #ifdef CONFIG_PPC_47x
746 /* Check for 47x cores */
749 cmplwi cr0,r3,PVR_476@h
751 cmplwi cr0,r3,PVR_476_ISS@h
753 #endif /* CONFIG_PPC_47x */
756 * Code for setting up 1:1 mapping for PPC440x for KEXEC
758 * We cannot switch off the MMU on PPC44x.
760 * 1) Invalidate all the mappings except the one we are running from.
761 * 2) Create a tmp mapping for our code in the other address space(TS) and
762 * jump to it. Invalidate the entry we started in.
763 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
764 * 4) Jump to the 1:1 mapping in original TS.
765 * 5) Invalidate the tmp mapping.
767 * - Based on the kexec support code for FSL BookE
772 * Load the PID with kernel PID (0).
773 * Also load our MSR_IS and TID to MMUCR for TLB search.
780 oris r3,r3,PPC44x_MMUCR_STS@h
786 * Invalidate all the TLB entries except the current entry
787 * where we are running from
789 bl 0f /* Find our address */
790 0: mflr r5 /* Make it accessible */
791 tlbsx r23,0,r5 /* Find entry we are in */
792 li r4,0 /* Start at TLB entry 0 */
793 li r3,0 /* Set PAGEID inval value */
794 1: cmpw r23,r4 /* Is this our entry? */
795 beq skip /* If so, skip the inval */
796 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
798 addi r4,r4,1 /* Increment */
799 cmpwi r4,64 /* Are we done? */
800 bne 1b /* If not, repeat */
803 /* Create a temp mapping and jump to it */
804 andi. r6, r23, 1 /* Find the index to use */
805 addi r24, r6, 1 /* r24 will contain 1 or 2 */
807 mfmsr r9 /* get the MSR */
808 rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
809 xori r7, r5, 1 /* Use the other address space */
811 /* Read the current mapping entries */
812 tlbre r3, r23, PPC44x_TLB_PAGEID
813 tlbre r4, r23, PPC44x_TLB_XLAT
814 tlbre r5, r23, PPC44x_TLB_ATTRIB
816 /* Save our current XLAT entry */
819 /* Extract the TLB PageSize */
820 li r10, 1 /* r10 will hold PageSize */
821 rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
823 /* XXX: As of now we use 256M, 4K pages */
824 cmpwi r11, PPC44x_TLB_256M
826 rotlwi r10, r10, 28 /* r10 = 256M */
829 cmpwi r11, PPC44x_TLB_4K
831 rotlwi r10, r10, 12 /* r10 = 4K */
834 rotlwi r10, r10, 10 /* r10 = 1K */
838 * Write out the tmp 1:1 mapping for this code in other address space
839 * Fixup EPN = RPN , TS=other address space
841 insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
843 /* Write out the tmp mapping entries */
844 tlbwe r3, r24, PPC44x_TLB_PAGEID
845 tlbwe r4, r24, PPC44x_TLB_XLAT
846 tlbwe r5, r24, PPC44x_TLB_ATTRIB
848 subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
849 not r10, r11 /* Mask for PageNum */
851 /* Switch to other address space in MSR */
852 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
856 addi r8, r8, (2f-1b) /* Find the target offset */
858 /* Jump to the tmp mapping */
864 /* Invalidate the entry we were executing from */
866 tlbwe r3, r23, PPC44x_TLB_PAGEID
868 /* attribute fields. rwx for SUPERVISOR mode */
870 ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
872 /* Create 1:1 mapping in 256M pages */
873 xori r7, r7, 1 /* Revert back to Original TS */
875 li r8, 0 /* PageNumber */
876 li r6, 3 /* TLB Index, start at 3 */
879 rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
880 mr r4, r3 /* RPN = EPN */
881 ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
882 insrwi r3, r7, 1, 23 /* Set TS from r7 */
884 tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
885 tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
886 tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
888 addi r8, r8, 1 /* Increment PN */
889 addi r6, r6, 1 /* Increment TLB Index */
890 cmpwi r8, 8 /* Are we done ? */
894 /* Jump to the new mapping 1:1 */
896 insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
900 and r8, r8, r11 /* Get our offset within page */
903 and r5, r25, r10 /* Get our target PageNum */
904 or r8, r8, r5 /* Target jump address */
910 /* Invalidate the tmp entry we used */
912 tlbwe r3, r24, PPC44x_TLB_PAGEID
916 #ifdef CONFIG_PPC_47x
918 /* 1:1 mapping for 47x */
923 * Load the kernel pid (0) to PID and also to MMUCR[TID].
924 * Also set the MSR IS->MMUCR STS
927 mtspr SPRN_PID, r3 /* Set PID */
928 mfmsr r4 /* Get MSR */
929 andi. r4, r4, MSR_IS@l /* TS=1? */
930 beq 1f /* If not, leave STS=0 */
931 oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
932 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
935 /* Find the entry we are running from */
939 tlbre r24, r23, 0 /* TLB Word 0 */
940 tlbre r25, r23, 1 /* TLB Word 1 */
941 tlbre r26, r23, 2 /* TLB Word 2 */
945 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
946 * of 4k page size in all 4 ways (0-3 in r3).
947 * This would invalidate the entire UTLB including the one we are
948 * running from. However the shadow TLB entries would help us
949 * to continue the execution, until we flush them (rfi/isync).
951 addis r3, 0, 0x8000 /* specify the way */
952 addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
956 /* Align the loop to speed things up. from head_44x.S */
964 addis r3, r3, 0x2000 /* Increment the way */
968 addis r4, r4, 0x100 /* Increment the EPN */
972 /* Create the entries in the other address space */
974 rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
975 xori r7, r7, 1 /* r7 = !TS */
977 insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
980 * write out the TLB entries for the tmp mapping
981 * Use way '0' so that we could easily invalidate it later.
983 lis r3, 0x8000 /* Way '0' */
989 /* Update the msr to the new TS */
1001 * Now we are in the tmp address space.
1002 * Create a 1:1 mapping for 0-2GiB in the original TS.
1006 li r4, 0 /* TLB Word 0 */
1007 li r5, 0 /* TLB Word 1 */
1009 ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
1011 li r8, 0 /* PageIndex */
1013 xori r7, r7, 1 /* revert back to original TS */
1016 rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
1017 /* ERPN = 0 as we don't use memory above 2G */
1019 mr r4, r5 /* EPN = RPN */
1020 ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
1021 insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
1023 tlbwe r4, r3, 0 /* Write out the entries */
1027 cmpwi r8, 8 /* Have we completed ? */
1030 /* make sure we complete the TLB write up */
1034 * Prepare to jump to the 1:1 mapping.
1035 * 1) Extract page size of the tmp mapping
1036 * DSIZ = TLB_Word0[22:27]
1037 * 2) Calculate the physical address of the address
1040 rlwinm r10, r24, 0, 22, 27
1042 cmpwi r10, PPC47x_TLB0_4K
1044 li r10, 0x1000 /* r10 = 4k */
1048 /* Defaults to 256M */
1053 addi r4, r4, (2f-1b) /* virtual address of 2f */
1055 subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
1056 not r10, r11 /* Pagemask = ~(offsetmask) */
1058 and r5, r25, r10 /* Physical page */
1059 and r6, r4, r11 /* offset within the current page */
1061 or r5, r5, r6 /* Physical address for 2f */
1063 /* Switch the TS in MSR to the original one */
1065 insrwi r8, r7, 1, 26
1072 /* Invalidate the tmp mapping */
1073 lis r3, 0x8000 /* Way '0' */
1075 clrrwi r24, r24, 12 /* Clear the valid bit */
1080 /* Make sure we complete the TLB write and flush the shadow TLB */
1088 /* Restore the parameters */
1098 * Set Machine Status Register to a known status,
1099 * switch the MMU off and jump to 1: in a single step.
1103 ori r8, r8, MSR_RI|MSR_ME
1105 addi r8, r4, 1f - relocate_new_kernel
1112 /* from this point address translation is turned off */
1113 /* and interrupts are disabled */
1115 /* set a new stack at the bottom of our page... */
1116 /* (not really needed now) */
1117 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
1121 li r6, 0 /* checksum */
1125 0: /* top, read another word for the indirection page */
1129 /* is it a destination page? (r8) */
1130 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1133 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1136 2: /* is it an indirection page? (r3) */
1137 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1140 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1144 2: /* are we done? */
1145 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1149 2: /* is it a source page? (r9) */
1150 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1153 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1155 li r7, PAGE_SIZE / 4
1160 lwzu r0, 4(r9) /* do the copy */
1174 /* To be certain of avoiding problems with self-modifying code
1175 * execute a serializing instruction here.
1180 mfspr r3, SPRN_PIR /* current core we are running on */
1181 mr r4, r5 /* load physical address of chunk called */
1183 /* jump to the entry point, usually the setup routine */
1189 relocate_new_kernel_end:
1191 .globl relocate_new_kernel_size
1192 relocate_new_kernel_size:
1193 .long relocate_new_kernel_end - relocate_new_kernel