1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
11 #include <linux/sys.h>
12 #include <asm/unistd.h>
13 #include <asm/errno.h>
16 #include <asm/cache.h>
17 #include <asm/cputable.h>
19 #include <asm/ppc_asm.h>
20 #include <asm/thread_info.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/processor.h>
24 #include <asm/ptrace.h>
25 #include <asm/export.h>
26 #include <asm/feature-fixups.h>
31 * We store the saved ksp_limit in the unused part
32 * of the STACK_FRAME_OVERHEAD
34 _GLOBAL(call_do_softirq)
37 lwz r10,THREAD+KSP_LIMIT(r2)
38 stw r3, THREAD+KSP_LIMIT(r2)
39 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
46 stw r10,THREAD+KSP_LIMIT(r2)
51 * void call_do_irq(struct pt_regs *regs, void *sp);
56 lwz r10,THREAD+KSP_LIMIT(r2)
57 stw r4, THREAD+KSP_LIMIT(r2)
58 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
65 stw r10,THREAD+KSP_LIMIT(r2)
70 * This returns the high 64 bits of the product of two 64-bit numbers.
82 1: beqlr cr1 /* all done if high part of A is 0 */
96 * reloc_got2 runs through the .got2 section adding an offset
101 lis r7,__got2_start@ha
102 addi r7,r7,__got2_start@l
104 addi r8,r8,__got2_end@l
124 * call_setup_cpu - call the setup_cpu function for this cpu
125 * r3 = data offset, r24 = cpu number
127 * Setup function is called with:
129 * r4 = ptr to CPU spec (relocated)
131 _GLOBAL(call_setup_cpu)
132 addis r4,r3,cur_cpu_spec@ha
133 addi r4,r4,cur_cpu_spec@l
136 lwz r5,CPU_SPEC_SETUP(r4)
143 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
145 /* This gets called by via-pmu.c to switch the PLL selection
146 * on 750fx CPU. This function should really be moved to some
147 * other place (as most of the cpufreq code in via-pmu
149 _GLOBAL(low_choose_750fx_pll)
155 /* If switching to PLL1, disable HID0:BTIC */
166 /* Calc new HID1 value */
167 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
168 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
169 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
174 /* Store new HID1 image */
180 addis r6,r6,nap_save_hid1@ha
181 stw r4,nap_save_hid1@l(r6)
183 /* If switching to PLL0, enable HID0:BTIC */
198 _GLOBAL(low_choose_7447a_dfs)
204 /* Calc new HID1 value */
206 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
216 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
221 * Do an IO access in real mode
225 rlwinm r0,r7,0,~MSR_DR
236 _ASM_NOKPROBE_SYMBOL(real_readb)
239 * Do an IO access in real mode
243 rlwinm r0,r7,0,~MSR_DR
254 _ASM_NOKPROBE_SYMBOL(real_writeb)
256 #endif /* CONFIG_40x */
259 * Copy a whole page. We use the dcbz instruction on the destination
260 * to reduce memory traffic (it eliminates the unnecessary reads of
261 * the destination into cache). This requires that the destination
264 #define COPY_16_BYTES \
275 rlwinm r5, r3, 0, L1_CACHE_BYTES - 1
278 0: twnei r5, 0 /* WARN if r3 is not cache aligned */
279 EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
285 #if MAX_COPY_PREFETCH > 1
286 li r0,MAX_COPY_PREFETCH
290 addi r11,r11,L1_CACHE_BYTES
292 #else /* MAX_COPY_PREFETCH == 1 */
294 li r11,L1_CACHE_BYTES+4
295 #endif /* MAX_COPY_PREFETCH */
296 li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
304 #if L1_CACHE_BYTES >= 32
306 #if L1_CACHE_BYTES >= 64
309 #if L1_CACHE_BYTES >= 128
319 crnot 4*cr0+eq,4*cr0+eq
320 li r0,MAX_COPY_PREFETCH
323 EXPORT_SYMBOL(copy_page)
326 * Extended precision shifts.
328 * Updated to be valid for shift counts from 0 to 63 inclusive.
331 * R3/R4 has 64 bit value
335 * ashrdi3: arithmetic right shift (sign propagation)
336 * lshrdi3: logical right shift
337 * ashldi3: left shift
341 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
342 addi r7,r5,32 # could be xori, or addi with -32
343 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
344 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
345 sraw r7,r3,r7 # t2 = MSW >> (count-32)
346 or r4,r4,r6 # LSW |= t1
347 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
348 sraw r3,r3,r5 # MSW = MSW >> count
349 or r4,r4,r7 # LSW |= t2
351 EXPORT_SYMBOL(__ashrdi3)
355 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
356 addi r7,r5,32 # could be xori, or addi with -32
357 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
358 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
359 or r3,r3,r6 # MSW |= t1
360 slw r4,r4,r5 # LSW = LSW << count
361 or r3,r3,r7 # MSW |= t2
363 EXPORT_SYMBOL(__ashldi3)
367 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
368 addi r7,r5,32 # could be xori, or addi with -32
369 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
370 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
371 or r4,r4,r6 # LSW |= t1
372 srw r3,r3,r5 # MSW = MSW >> count
373 or r4,r4,r7 # LSW |= t2
375 EXPORT_SYMBOL(__lshrdi3)
378 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
379 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
391 EXPORT_SYMBOL(__cmpdi2)
393 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
394 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
406 EXPORT_SYMBOL(__ucmpdi2)
413 rlwimi r9,r4,24,16,23
414 rlwimi r10,r3,24,16,23
418 EXPORT_SYMBOL(__bswapdi2)
421 _GLOBAL(start_secondary_resume)
423 rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT
424 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
426 stw r3,0(r1) /* Zero the stack frame pointer */
429 #endif /* CONFIG_SMP */
432 * This routine is just here to keep GCC happy - sigh...