2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
10 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/sys.h>
20 #include <asm/unistd.h>
21 #include <asm/errno.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/processor.h>
31 #include <asm/kexec.h>
33 #include <asm/ptrace.h>
37 _GLOBAL(call_do_softirq)
40 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
48 _GLOBAL(call_handle_irq)
52 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
61 * This returns the high 64 bits of the product of two 64-bit numbers.
73 1: beqlr cr1 /* all done if high part of A is 0 */
88 * sub_reloc_offset(x) returns x - reloc_offset().
90 _GLOBAL(sub_reloc_offset)
102 * reloc_got2 runs through the .got2 section adding an offset
107 lis r7,__got2_start@ha
108 addi r7,r7,__got2_start@l
110 addi r8,r8,__got2_end@l
130 * call_setup_cpu - call the setup_cpu function for this cpu
131 * r3 = data offset, r24 = cpu number
133 * Setup function is called with:
135 * r4 = ptr to CPU spec (relocated)
137 _GLOBAL(call_setup_cpu)
138 addis r4,r3,cur_cpu_spec@ha
139 addi r4,r4,cur_cpu_spec@l
142 lwz r5,CPU_SPEC_SETUP(r4)
149 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
151 /* This gets called by via-pmu.c to switch the PLL selection
152 * on 750fx CPU. This function should really be moved to some
153 * other place (as most of the cpufreq code in via-pmu
155 _GLOBAL(low_choose_750fx_pll)
161 /* If switching to PLL1, disable HID0:BTIC */
172 /* Calc new HID1 value */
173 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
174 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
175 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
179 /* Store new HID1 image */
180 rlwinm r6,r1,0,0,(31-THREAD_SHIFT)
183 addis r6,r6,nap_save_hid1@ha
184 stw r4,nap_save_hid1@l(r6)
186 /* If switching to PLL0, enable HID0:BTIC */
201 _GLOBAL(low_choose_7447a_dfs)
207 /* Calc new HID1 value */
209 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
219 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
222 * complement mask on the msr then "or" some values on.
223 * _nmask_and_or_msr(nmask, value_to_or)
225 _GLOBAL(_nmask_and_or_msr)
226 mfmsr r0 /* Get current msr */
227 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
228 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
229 SYNC /* Some chip revs have problems here... */
230 mtmsr r0 /* Update machine state */
237 * Do an IO access in real mode
255 * Do an IO access in real mode
272 #endif /* CONFIG_40x */
276 * Flush instruction cache.
277 * This is a no-op on the 601.
279 _GLOBAL(flush_instruction_cache)
280 #if defined(CONFIG_8xx)
283 mtspr SPRN_IC_CST, r5
284 #elif defined(CONFIG_4xx)
296 #elif CONFIG_FSL_BOOKE
299 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
300 /* msync; isync recommended here */
304 END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
306 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
310 rlwinm r3,r3,16,16,31
312 beqlr /* for 601, do nothing */
313 /* 603/604 processor - use invalidate-all bit in HID0 */
317 #endif /* CONFIG_8xx/4xx */
322 * Write any modified data cache blocks out to memory
323 * and invalidate the corresponding instruction cache blocks.
324 * This is a no-op on the 601.
326 * flush_icache_range(unsigned long start, unsigned long stop)
328 _KPROBE(__flush_icache_range)
330 blr /* for 601, do nothing */
331 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
332 li r5,L1_CACHE_BYTES-1
336 srwi. r4,r4,L1_CACHE_SHIFT
341 addi r3,r3,L1_CACHE_BYTES
343 sync /* wait for dcbst's to get to ram */
347 addi r6,r6,L1_CACHE_BYTES
350 /* Flash invalidate on 44x because we are passed kmapped addresses and
351 this doesn't work for userspace pages due to the virtually tagged
355 sync /* additional sync needed on g4 */
359 * Write any modified data cache blocks out to memory.
360 * Does not invalidate the corresponding cache lines (especially for
361 * any corresponding instruction cache).
363 * clean_dcache_range(unsigned long start, unsigned long stop)
365 _GLOBAL(clean_dcache_range)
366 li r5,L1_CACHE_BYTES-1
370 srwi. r4,r4,L1_CACHE_SHIFT
375 addi r3,r3,L1_CACHE_BYTES
377 sync /* wait for dcbst's to get to ram */
381 * Write any modified data cache blocks out to memory and invalidate them.
382 * Does not invalidate the corresponding instruction cache blocks.
384 * flush_dcache_range(unsigned long start, unsigned long stop)
386 _GLOBAL(flush_dcache_range)
387 li r5,L1_CACHE_BYTES-1
391 srwi. r4,r4,L1_CACHE_SHIFT
396 addi r3,r3,L1_CACHE_BYTES
398 sync /* wait for dcbst's to get to ram */
402 * Like above, but invalidate the D-cache. This is used by the 8xx
403 * to invalidate the cache so the PPC core doesn't get stale data
404 * from the CPM (no cache snooping here :-).
406 * invalidate_dcache_range(unsigned long start, unsigned long stop)
408 _GLOBAL(invalidate_dcache_range)
409 li r5,L1_CACHE_BYTES-1
413 srwi. r4,r4,L1_CACHE_SHIFT
418 addi r3,r3,L1_CACHE_BYTES
420 sync /* wait for dcbi's to get to ram */
424 * Flush a particular page from the data cache to RAM.
425 * Note: this is necessary because the instruction cache does *not*
426 * snoop from the data cache.
427 * This is a no-op on the 601 which has a unified cache.
429 * void __flush_dcache_icache(void *page)
431 _GLOBAL(__flush_dcache_icache)
434 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
435 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
436 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
439 0: dcbst 0,r3 /* Write line to ram */
440 addi r3,r3,L1_CACHE_BYTES
444 /* We don't flush the icache on 44x. Those have a virtual icache
445 * and we don't have access to the virtual address here (it's
446 * not the page vaddr but where it's mapped in user space). The
447 * flushing of the icache on these is handled elsewhere, when
448 * a change in the address space occurs, before returning to
451 BEGIN_MMU_FTR_SECTION
453 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
454 #endif /* CONFIG_44x */
457 addi r6,r6,L1_CACHE_BYTES
465 * Flush a particular page from the data cache to RAM, identified
466 * by its physical address. We turn off the MMU so we can just use
467 * the physical address (this may be a highmem page without a kernel
470 * void __flush_dcache_icache_phys(unsigned long physaddr)
472 _GLOBAL(__flush_dcache_icache_phys)
474 blr /* for 601, do nothing */
475 END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
477 rlwinm r0,r10,0,28,26 /* clear DR */
480 rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
481 li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
484 0: dcbst 0,r3 /* Write line to ram */
485 addi r3,r3,L1_CACHE_BYTES
490 addi r6,r6,L1_CACHE_BYTES
493 mtmsr r10 /* restore DR */
496 #endif /* CONFIG_BOOKE */
499 * Clear pages using the dcbz instruction, which doesn't cause any
500 * memory traffic (except to write out any cache lines which get
501 * displaced). This only works on cacheable memory.
503 * void clear_pages(void *page, int order) ;
506 li r0,PAGE_SIZE/L1_CACHE_BYTES
510 addi r3,r3,L1_CACHE_BYTES
515 * Copy a whole page. We use the dcbz instruction on the destination
516 * to reduce memory traffic (it eliminates the unnecessary reads of
517 * the destination into cache). This requires that the destination
520 #define COPY_16_BYTES \
536 #if MAX_COPY_PREFETCH > 1
537 li r0,MAX_COPY_PREFETCH
541 addi r11,r11,L1_CACHE_BYTES
543 #else /* MAX_COPY_PREFETCH == 1 */
545 li r11,L1_CACHE_BYTES+4
546 #endif /* MAX_COPY_PREFETCH */
547 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
555 #if L1_CACHE_BYTES >= 32
557 #if L1_CACHE_BYTES >= 64
560 #if L1_CACHE_BYTES >= 128
570 crnot 4*cr0+eq,4*cr0+eq
571 li r0,MAX_COPY_PREFETCH
576 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
577 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
579 _GLOBAL(atomic_clear_mask)
586 _GLOBAL(atomic_set_mask)
595 * Extended precision shifts.
597 * Updated to be valid for shift counts from 0 to 63 inclusive.
600 * R3/R4 has 64 bit value
604 * ashrdi3: arithmetic right shift (sign propagation)
605 * lshrdi3: logical right shift
606 * ashldi3: left shift
610 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
611 addi r7,r5,32 # could be xori, or addi with -32
612 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
613 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
614 sraw r7,r3,r7 # t2 = MSW >> (count-32)
615 or r4,r4,r6 # LSW |= t1
616 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
617 sraw r3,r3,r5 # MSW = MSW >> count
618 or r4,r4,r7 # LSW |= t2
623 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
624 addi r7,r5,32 # could be xori, or addi with -32
625 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
626 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
627 or r3,r3,r6 # MSW |= t1
628 slw r4,r4,r5 # LSW = LSW << count
629 or r3,r3,r7 # MSW |= t2
634 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
635 addi r7,r5,32 # could be xori, or addi with -32
636 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
637 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
638 or r4,r4,r6 # LSW |= t1
639 srw r3,r3,r5 # MSW = MSW >> count
640 or r4,r4,r7 # LSW |= t2
644 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
645 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
665 * Create a kernel thread
666 * kernel_thread(fn, arg, flags)
668 _GLOBAL(kernel_thread)
672 mr r30,r3 /* function */
673 mr r31,r4 /* argument */
674 ori r3,r5,CLONE_VM /* flags */
675 oris r3,r3,CLONE_UNTRACED>>16
676 li r4,0 /* new sp (unused) */
679 bns+ 1f /* did system call indicate error? */
680 neg r3,r3 /* if so, make return code negative */
681 1: cmpwi 0,r3,0 /* parent or child? */
682 bne 2f /* return if parent */
683 li r0,0 /* make top-level stack frame */
685 mtlr r30 /* fn addr in lr */
686 mr r3,r31 /* load arg and call fn */
689 li r0,__NR_exit /* exit if function returns */
698 * This routine is just here to keep GCC happy - sigh...
705 * Must be relocatable PIC code callable as a C function.
707 .globl relocate_new_kernel
710 /* r4 = reboot_code_buffer */
711 /* r5 = start_address */
713 #ifdef CONFIG_FSL_BOOKE
719 #define ENTRY_MAPPING_KEXEC_SETUP
720 #include "fsl_booke_entry_mapping.S"
721 #undef ENTRY_MAPPING_KEXEC_SETUP
732 * Set Machine Status Register to a known status,
733 * switch the MMU off and jump to 1: in a single step.
737 ori r8, r8, MSR_RI|MSR_ME
739 addi r8, r4, 1f - relocate_new_kernel
746 /* from this point address translation is turned off */
747 /* and interrupts are disabled */
749 /* set a new stack at the bottom of our page... */
750 /* (not really needed now) */
751 addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
755 li r6, 0 /* checksum */
759 0: /* top, read another word for the indirection page */
763 /* is it a destination page? (r8) */
764 rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
767 rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
770 2: /* is it an indirection page? (r3) */
771 rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
774 rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
778 2: /* are we done? */
779 rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
783 2: /* is it a source page? (r9) */
784 rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
787 rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
794 lwzu r0, 4(r9) /* do the copy */
808 /* To be certain of avoiding problems with self-modifying code
809 * execute a serializing instruction here.
814 mfspr r3, SPRN_PIR /* current core we are running on */
815 mr r4, r5 /* load physical address of chunk called */
817 /* jump to the entry point, usually the setup routine */
823 relocate_new_kernel_end:
825 .globl relocate_new_kernel_size
826 relocate_new_kernel_size:
827 .long relocate_new_kernel_end - relocate_new_kernel