2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/sys.h>
17 #include <asm/unistd.h>
18 #include <asm/errno.h>
19 #include <asm/processor.h>
25 LG_CACHE_LINE_SIZE = 5
28 LG_CACHE_LINE_SIZE = 4
29 #endif /* CONFIG_8xx */
34 * Returns (address we're running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
48 /* void __no_use_save_flags(unsigned long *flags) */
49 _GLOBAL(__no_use_save_flags)
54 /* void __no_use_restore_flags(unsigned long flags) */
55 _GLOBAL(__no_use_restore_flags)
58 lis r4,ppc_n_lost_interrupts@ha
59 lwz r4,ppc_n_lost_interrupts@l(r4)
60 cmpi 0,r4,0 /* lost interrupts to process first? */
61 bne- do_lost_interrupts
68 mfmsr r0 /* Get current interrupt state */
69 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
70 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
71 sync /* Some chip revs have problems here... */
72 mtmsr r0 /* Update machine state */
76 lis r4,ppc_n_lost_interrupts@ha
77 lwz r4,ppc_n_lost_interrupts@l(r4)
78 mfmsr r3 /* Get current state */
79 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
80 cmpi 0,r4,0 /* lost interrupts to process first? */
81 bne- do_lost_interrupts
82 sync /* Some chip revs have problems here... */
83 mtmsr r3 /* Update machine state */
87 * We were about to enable interrupts but we have to simulate
88 * some interrupts that were lost by enable_irq first.
90 .globl do_lost_interrupts
97 lis r4,ppc_n_lost_interrupts@ha
98 lwz r4,ppc_n_lost_interrupts@l(r4)
111 * complement mask on the msr then "or" some values on.
112 * _nmask_and_or_msr(nmask, value_to_or)
114 _GLOBAL(_nmask_and_or_msr)
115 mfmsr r0 /* Get current msr */
116 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
117 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
118 sync /* Some chip revs have problems here... */
119 mtmsr r0 /* Update machine state */
137 * Flush MMU TLB for a particular address
149 * Flush instruction cache.
150 * This is a no-op on the 601.
152 _GLOBAL(flush_instruction_cache)
154 rlwinm r3,r3,16,16,31
156 beqlr /* for 601, do nothing */
157 /* 603/604 processor - use invalidate-all bit in HID0 */
165 * Write any modified data cache blocks out to memory
166 * and invalidate the corresponding instruction cache blocks.
167 * This is a no-op on the 601.
169 * flush_icache_range(unsigned long start, unsigned long stop)
171 _GLOBAL(flush_icache_range)
173 rlwinm r5,r5,16,16,31
175 beqlr /* for 601, do nothing */
176 li r5,CACHE_LINE_SIZE-1
180 srwi. r4,r4,LG_CACHE_LINE_SIZE
185 addi r3,r3,CACHE_LINE_SIZE
187 sync /* wait for dcbst's to get to ram */
190 addi r6,r6,CACHE_LINE_SIZE
197 * Like above, but only do the D-cache.
199 * flush_dcache_range(unsigned long start, unsigned long stop)
201 _GLOBAL(flush_dcache_range)
202 li r5,CACHE_LINE_SIZE-1
206 srwi. r4,r4,LG_CACHE_LINE_SIZE
211 addi r3,r3,CACHE_LINE_SIZE
213 sync /* wait for dcbst's to get to ram */
217 * Flush a particular page from the DATA cache
218 * Note: this is necessary because the instruction cache does *not*
219 * snoop from the data cache.
220 * This is a no-op on the 601 which has a unified cache.
222 * void flush_page_to_ram(void *page)
224 _GLOBAL(flush_page_to_ram)
226 rlwinm r5,r5,16,16,31
228 beqlr /* for 601, do nothing */
230 andc r3,r3,r4 /* Get page base address */
231 li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
234 0: dcbst 0,r3 /* Write line to ram */
235 addi r3,r3,CACHE_LINE_SIZE
240 addi r6,r6,CACHE_LINE_SIZE
247 * Clear a page using the dcbz instruction, which doesn't cause any
248 * memory traffic (except to write out any cache lines which get
249 * displaced). This only works on cacheable memory.
252 li r0,4096/CACHE_LINE_SIZE
255 addi r3,r3,CACHE_LINE_SIZE
260 * Atomic [test&set] exchange
262 * unsigned long xchg_u32(void *ptr, unsigned long val)
263 * Changes the memory location '*ptr' to be val and returns
264 * the previous value stored there.
267 mr r5,r3 /* Save pointer */
268 10: lwarx r3,0,r5 /* Fetch old value & reserve */
269 stwcx. r4,0,r5 /* Update with new value */
270 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
274 * Try to acquire a spinlock.
275 * Only does the stwcx. if the load returned 0 - the Programming
276 * Environments Manual suggests not doing unnecessary stcwx.'s
277 * since they may inhibit forward progress by other CPUs in getting
280 _GLOBAL(__spin_trylock)
282 eieio /* prevent reordering of stores */
284 lwarx r3,0,r4 /* fetch old value, establish reservation */
285 cmpwi 0,r3,0 /* is it 0? */
286 bnelr- /* return failure if not */
287 stwcx. r5,0,r4 /* try to update with new value */
288 bne- 1f /* if we failed */
289 eieio /* prevent reordering of stores */
291 1: li r3,1 /* return non-zero for failure */
295 * Atomic add/sub/inc/dec operations
297 * void atomic_add(int c, int *v)
298 * void atomic_sub(int c, int *v)
299 * void atomic_inc(int *v)
300 * void atomic_dec(int *v)
301 * int atomic_dec_and_test(int *v)
302 * int atomic_inc_return(int *v)
303 * int atomic_dec_return(int *v)
304 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
305 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
308 10: lwarx r5,0,r4 /* Fetch old value & reserve */
309 add r5,r5,r3 /* Perform 'add' operation */
310 stwcx. r5,0,r4 /* Update with new value */
311 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
313 _GLOBAL(atomic_add_return)
314 10: lwarx r5,0,r4 /* Fetch old value & reserve */
315 add r5,r5,r3 /* Perform 'add' operation */
316 stwcx. r5,0,r4 /* Update with new value */
317 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
321 10: lwarx r5,0,r4 /* Fetch old value & reserve */
322 sub r5,r5,r3 /* Perform 'add' operation */
323 stwcx. r5,0,r4 /* Update with new value */
324 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
327 10: lwarx r5,0,r3 /* Fetch old value & reserve */
328 addi r5,r5,1 /* Perform 'add' operation */
329 stwcx. r5,0,r3 /* Update with new value */
330 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
332 _GLOBAL(atomic_inc_return)
333 10: lwarx r5,0,r3 /* Fetch old value & reserve */
334 addi r5,r5,1 /* Perform 'add' operation */
335 stwcx. r5,0,r3 /* Update with new value */
336 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
337 mr r3,r5 /* Return new value */
340 10: lwarx r5,0,r3 /* Fetch old value & reserve */
341 subi r5,r5,1 /* Perform 'add' operation */
342 stwcx. r5,0,r3 /* Update with new value */
343 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
345 _GLOBAL(atomic_dec_return)
346 10: lwarx r5,0,r3 /* Fetch old value & reserve */
347 subi r5,r5,1 /* Perform 'add' operation */
348 stwcx. r5,0,r3 /* Update with new value */
349 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
350 mr r3,r5 /* Return new value */
352 _GLOBAL(atomic_dec_and_test)
353 10: lwarx r5,0,r3 /* Fetch old value & reserve */
354 subi r5,r5,1 /* Perform 'add' operation */
355 stwcx. r5,0,r3 /* Update with new value */
356 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
357 cmpi 0,r5,0 /* Return 'true' IFF 0 */
362 _GLOBAL(atomic_clear_mask)
368 _GLOBAL(atomic_set_mask)
376 * I/O string operations
378 * insb(port, buf, len)
379 * outsb(port, buf, len)
380 * insw(port, buf, len)
381 * outsw(port, buf, len)
382 * insl(port, buf, len)
383 * outsl(port, buf, len)
384 * insw_ns(port, buf, len)
385 * outsw_ns(port, buf, len)
386 * insl_ns(port, buf, len)
387 * outsl_ns(port, buf, len)
389 * The *_ns versions don't do byte-swapping.
484 * Extended precision shifts
486 * R3/R4 has 64 bit value
490 * ashrdi3: XXXYYY/ZZZAAA -> SSSXXX/YYYZZZ
491 * ashldi3: XXXYYY/ZZZAAA -> YYYZZZ/AAA000
492 * lshrdi3: XXXYYY/ZZZAAA -> 000XXX/YYYZZZ
497 slw r7,r3,r6 /* isolate YYY */
498 srw r4,r4,r5 /* isolate ZZZ */
499 or r4,r4,r7 /* YYYZZZ */
500 sraw r3,r3,r5 /* SSSXXX */
506 srw r7,r4,r6 /* isolate ZZZ */
507 slw r4,r4,r5 /* AAA000 */
508 slw r3,r3,r5 /* YYY--- */
509 or r3,r3,r7 /* YYYZZZ */
515 slw r7,r3,r6 /* isolate YYY */
516 srw r4,r4,r5 /* isolate ZZZ */
517 or r4,r4,r7 /* YYYZZZ */
518 srw r3,r3,r5 /* 000XXX */
528 mr r3,r1 /* Close enough */
560 Copyright © 1997-1998 by PowerLogix R & D, Inc.
562 This program is free software; you can redistribute it and/or modify
563 it under the terms of the GNU General Public License as published by
564 the Free Software Foundation; either version 2 of the License, or
565 (at your option) any later version.
567 This program is distributed in the hope that it will be useful,
568 but WITHOUT ANY WARRANTY; without even the implied warranty of
569 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
570 GNU General Public License for more details.
572 You should have received a copy of the GNU General Public License
573 along with this program; if not, write to the Free Software
574 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
578 - First public release, contributed by PowerLogix.
580 Author: Terry Greeniaus (tgree@phys.ualberta.ca)
581 Please e-mail updates to this file to me, thanks!
585 When setting the L2CR register, you must do a few special
586 things. If you are enabling the cache, you must perform a
587 global invalidate. If you are disabling the cache, you must
588 flush the cache contents first. This routine takes care of
589 doing these things. When first enabling the cache, make sure
590 you pass in the L2CR you want, as well as passing in the
591 global invalidate bit set. A global invalidate will only be
592 performed if the L2I bit is set in applyThis. When enabling
593 the cache, you should also set the L2E bit in applyThis. If
594 you want to modify the L2CR contents after the cache has been
595 enabled, the recommended procedure is to first call
596 __setL2CR(0) to disable the cache and then call it again with
597 the new values for L2CR. Examples:
599 _setL2CR(0) - disables the cache
600 _setL2CR(0xB3A04000) - enables my G3 upgrade card:
601 - L2E set to turn on the cache
604 - L2RAM set to pipelined synchronous late-write
605 - L2I set to perform a global invalidation
607 - L2DF set because this upgrade card
610 A similar call should work for your card. You need to know
611 the correct setting for your card and then place them in the
612 fields I have outlined above. Other fields support optional
613 features, such as L2DO which caches only data, or L2TS which
614 causes cache pushes from the L1 cache to go to the L2 cache
615 instead of to main memory.
619 /* Make sure this is a 750 chip */
621 rlwinm r4,r4,16,16,31
628 /* Get the current enable bit of the L2CR into r4 */
632 /* See if we want to perform a global inval this time. */
633 rlwinm r6,r3,0,10,10 /* r6 contains the new invalidate bit */
634 rlwinm. r5,r3,0,0,0 /* r5 contains the new enable bit */
635 rlwinm r3,r3,0,11,9 /* Turn off the invalidate bit */
636 rlwimi r3,r4,0,0,0 /* Keep the enable bit the same as it was. */
637 bne dontDisableCache /* Only disable the cache if L2CRApply
638 has the enable bit off */
641 /* Disable the cache. First, we turn off interrupts.
642 An interrupt while we are flushing the cache could bring
643 in data which may not get properly flushed. */
644 rlwinm r4,r7,0,17,15 /* Turn off EE bit */
650 Now, read the first 2MB of memory to put new data in the cache.
651 (Actually we only need the size of the L2 cache plus the size
652 of the L1 cache, but 2MB will cover everything just to be safe).
658 addi r4,r4,0x0020 /* Go to start of next cache line */
661 /* Now, flush the first 2MB of memory */
667 addi r4,r4,0x0020 /* Go to start of next cache line */
670 /* Turn off the L2CR enable bit. */
674 /* Set up the L2CR configuration bits */
679 /* Reenable interrupts if necessary. */
686 /* Perform a global invalidation */
692 /* Wait for the invalidation to complete */
694 rlwinm. r4,r3,0,31,31
697 rlwinm r3,r3,0,11,9 /* Turn off the L2I bit */
703 /* See if we need to enable the cache */
707 /* Enable the cache */
714 /* Make sure this is a 750 chip */
716 rlwinm r3,r3,16,16,31
721 /* Return the L2CR contents */
725 /* --- End of PowerLogix code ---
740 * These are used in the alignment trap handler when emulating
741 * single-precision loads and stores.
742 * We restore and save the fpscr so the task gets the same result
743 * and exceptions as if the cpu had performed the load or store.
747 lfd 0,-4(r5) /* load up fpscr value */
751 mffs 0 /* save new fpscr value */
757 lfd 0,-4(r5) /* load up fpscr value */
761 mffs 0 /* save new fpscr value */
765 .globl __clear_msr_me
767 mfmsr r0 /* Get current interrupt state */
770 andc r0,r0,r3 /* Clears bit in (r4) */
771 sync /* Some chip revs have problems here */
772 mtmsr r0 /* Update machine state */
776 * Create a kernel thread
777 * kernel_thread(fn, arg, flags)
779 _GLOBAL(kernel_thread)
780 mr r6,r3 /* function */
781 ori r3,r5,CLONE_VM /* flags */
784 cmpi 0,r3,0 /* parent or child? */
785 bnelr /* return if parent */
786 li r0,0 /* clear out p->thread.regs */
787 stw r0,THREAD+PT_REGS(r2) /* since we don't have user ctx */
788 mtlr r6 /* fn addr in lr */
789 mr r3,r4 /* load arg and call fn */
791 li r0,__NR_exit /* exit after child exits */
796 * This routine is just here to keep GCC happy - sigh...
801 #define SYSCALL(name) \
807 stw r3,errno@l(r4); \
811 #define __NR__exit __NR_exit
822 SYSCALL(delete_module)
827 /* Why isn't this a) automatic, b) written in 'C'? */
830 .globl sys_call_table
832 .long sys_ni_syscall /* 0 - old "setup()" system call */
837 .long sys_open /* 5 */
842 .long sys_unlink /* 10 */
847 .long sys_chmod /* 15 */
849 .long sys_ni_syscall /* old break syscall holder */
852 .long sys_getpid /* 20 */
857 .long sys_stime /* 25 */
862 .long sys_utime /* 30 */
863 .long sys_ni_syscall /* old stty syscall holder */
864 .long sys_ni_syscall /* old gtty syscall holder */
867 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
872 .long sys_rmdir /* 40 */
876 .long sys_ni_syscall /* old prof syscall holder */
877 .long sys_brk /* 45 */
882 .long sys_getegid /* 50 */
884 .long sys_umount /* recycled never used phys() */
885 .long sys_ni_syscall /* old lock syscall holder */
887 .long sys_fcntl /* 55 */
888 .long sys_ni_syscall /* old mpx syscall holder */
890 .long sys_ni_syscall /* old ulimit syscall holder */
892 .long sys_umask /* 60 */
897 .long sys_getpgrp /* 65 */
902 .long sys_setreuid /* 70 */
906 .long sys_sethostname
907 .long sys_setrlimit /* 75 */
910 .long sys_gettimeofday
911 .long sys_settimeofday
912 .long sys_getgroups /* 80 */
917 .long sys_readlink /* 85 */
922 .long sys_mmap /* 90 */
927 .long sys_fchown /* 95 */
928 .long sys_getpriority
929 .long sys_setpriority
930 .long sys_ni_syscall /* old profil syscall holder */
932 .long sys_fstatfs /* 100 */
937 .long sys_getitimer /* 105 */
942 .long sys_iopl /* 110 */
944 .long sys_ni_syscall /* old 'idle' syscall */
947 .long sys_swapoff /* 115 */
952 .long sys_clone /* 120 */
953 .long sys_setdomainname
957 .long sys_mprotect /* 125 */
958 .long sys_sigprocmask
959 .long sys_create_module
960 .long sys_init_module
961 .long sys_delete_module
962 .long sys_get_kernel_syms /* 130 */
967 .long sys_sysfs /* 135 */
968 .long sys_personality
969 .long sys_ni_syscall /* for afs_syscall */
972 .long sys_llseek /* 140 */
977 .long sys_readv /* 145 */
982 .long sys_mlock /* 150 */
986 .long sys_sched_setparam
987 .long sys_sched_getparam /* 155 */
988 .long sys_sched_setscheduler
989 .long sys_sched_getscheduler
990 .long sys_sched_yield
991 .long sys_sched_get_priority_max
992 .long sys_sched_get_priority_min /* 160 */
993 .long sys_sched_rr_get_interval
997 .long sys_getresuid /* 165 */
998 .long sys_query_module
1001 .long sys_nfsservctl
1003 .long sys_ni_syscall
1006 .long sys_getresgid /* 170 */
1008 .long sys_rt_sigreturn
1009 .long sys_rt_sigaction
1010 .long sys_rt_sigprocmask
1011 .long sys_rt_sigpending /* 175 */
1012 .long sys_rt_sigtimedwait
1013 .long sys_rt_sigqueueinfo
1014 .long sys_rt_sigsuspend
1016 .long sys_pwrite /* 180 */
1021 .long sys_sigaltstack /* 185 */
1023 .long sys_ni_syscall /* streams1 */
1024 .long sys_ni_syscall /* streams2 */
1026 .space (NR_syscalls-183)*4