2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <asm/asm-offsets.h>
27 /* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
33 #include <asm/assembly.h> /* for LDREG/STREG defines */
34 #include <asm/pgtable.h>
35 #include <asm/signal.h>
36 #include <asm/unistd.h>
37 #include <asm/thread_info.h>
53 .import pa_dbit_lock,data
55 /* space_to_prot macro creates a prot id from a space id */
57 #if (SPACEID_SHIFT) == 0
58 .macro space_to_prot spc prot
59 depd,z \spc,62,31,\prot
62 .macro space_to_prot spc prot
63 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
67 /* Switch to virtual mapping, trashing only %r1 */
70 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
74 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
77 load32 KERNEL_PSW, %r1
79 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
82 mtctl %r0, %cr17 /* Clear IIASQ tail */
83 mtctl %r0, %cr17 /* Clear IIASQ head */
86 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 mtctl %r1, %cr18 /* Set IIAOQ head */
95 * The "get_stack" macros are responsible for determining the
100 * Already using a kernel stack, so call the
101 * get_stack_use_r30 macro to push a pt_regs structure
102 * on the stack, and store registers there.
104 * Need to set up a kernel stack, so call the
105 * get_stack_use_cr30 macro to set up a pointer
106 * to the pt_regs structure contained within the
107 * task pointer pointed to by cr30. Set the stack
108 * pointer to point to the end of the task structure.
112 * Already using a kernel stack, check to see if r30
113 * is already pointing to the per processor interrupt
114 * stack. If it is, call the get_stack_use_r30 macro
115 * to push a pt_regs structure on the stack, and store
116 * registers there. Otherwise, call get_stack_use_cr31
117 * to get a pointer to the base of the interrupt stack
118 * and push a pt_regs structure on that stack.
120 * Need to set up a kernel stack, so call the
121 * get_stack_use_cr30 macro to set up a pointer
122 * to the pt_regs structure contained within the
123 * task pointer pointed to by cr30. Set the stack
124 * pointer to point to the end of the task structure.
125 * N.B: We don't use the interrupt stack for the
126 * first interrupt from userland, because signals/
127 * resched's are processed when returning to userland,
128 * and we can sleep in those cases.
130 * Note that we use shadowed registers for temps until
131 * we can save %r26 and %r29. %r26 is used to preserve
132 * %r8 (a shadowed register) which temporarily contained
133 * either the fault type ("code") or the eirr. We need
134 * to use a non-shadowed register to carry the value over
135 * the rfir in virt_map. We use %r26 since this value winds
136 * up being passed as the argument to either do_cpu_irq_mask
137 * or handle_interruption. %r29 is used to hold a pointer
138 * the register save area, and once again, it needs to
139 * be a non-shadowed register so that it survives the rfir.
141 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
144 .macro get_stack_use_cr30
146 /* we save the registers in the task struct */
150 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 ldo TASK_REGS(%r9),%r9
153 STREG %r30, PT_GR30(%r9)
154 STREG %r29,PT_GR29(%r9)
155 STREG %r26,PT_GR26(%r9)
158 ldo THREAD_SZ_ALGN(%r1), %r30
161 .macro get_stack_use_r30
163 /* we put a struct pt_regs on the stack and save the registers there */
166 STREG %r30,PT_GR30(%r9)
167 ldo PT_SZ_ALGN(%r30),%r30
168 STREG %r29,PT_GR29(%r9)
169 STREG %r26,PT_GR26(%r9)
174 LDREG PT_GR1(%r29), %r1
175 LDREG PT_GR30(%r29),%r30
176 LDREG PT_GR29(%r29),%r29
179 /* default interruption handler
180 * (calls traps.c:handle_interruption) */
187 /* Interrupt interruption handler
188 * (calls irq.c:do_cpu_irq_mask) */
195 .import os_hpmc, code
199 nop /* must be a NOP, will be patched later */
200 load32 PA(os_hpmc), %r3
203 .word 0 /* checksum (will be patched) */
204 .word PA(os_hpmc) /* address of handler */
205 .word 0 /* length of handler */
209 * Performance Note: Instructions will be moved up into
210 * this part of the code later on, once we are sure
211 * that the tlb miss handlers are close to final form.
214 /* Register definitions for tlb miss handler macros */
216 va = r8 /* virtual address for which the trap occured */
217 spc = r24 /* space for which the trap occured */
222 * itlb miss interruption handler (parisc 1.1 - 32 bit)
236 * itlb miss interruption handler (parisc 2.0)
253 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 * Note: naitlb misses will be treated
256 * as an ordinary itlb miss for now.
257 * However, note that naitlb misses
258 * have the faulting address in the
262 .macro naitlb_11 code
267 /* FIXME: If user causes a naitlb miss, the priv level may not be in
268 * lower bits of va, where the itlb miss handler is expecting them
276 * naitlb miss interruption handler (parisc 2.0)
278 * Note: naitlb misses will be treated
279 * as an ordinary itlb miss for now.
280 * However, note that naitlb misses
281 * have the faulting address in the
285 .macro naitlb_20 code
294 /* FIXME: If user causes a naitlb miss, the priv level may not be in
295 * lower bits of va, where the itlb miss handler is expecting them
303 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
317 * dtlb miss interruption handler (parisc 2.0)
334 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336 .macro nadtlb_11 code
346 /* nadtlb miss interruption handler (parisc 2.0) */
348 .macro nadtlb_20 code
363 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
377 * dirty bit trap interruption handler (parisc 2.0)
393 /* The following are simple 32 vs 64 bit instruction
394 * abstractions for the macros */
395 .macro EXTR reg1,start,length,reg2
397 extrd,u \reg1,32+\start,\length,\reg2
399 extrw,u \reg1,\start,\length,\reg2
403 .macro DEP reg1,start,length,reg2
405 depd \reg1,32+\start,\length,\reg2
407 depw \reg1,\start,\length,\reg2
411 .macro DEPI val,start,length,reg
413 depdi \val,32+\start,\length,\reg
415 depwi \val,\start,\length,\reg
419 /* In LP64, the space contains part of the upper 32 bits of the
420 * fault. We have to extract this and place it in the va,
421 * zeroing the corresponding bits in the space register */
422 .macro space_adjust spc,va,tmp
424 extrd,u \spc,63,SPACEID_SHIFT,\tmp
425 depd %r0,63,SPACEID_SHIFT,\spc
426 depd \tmp,31,SPACEID_SHIFT,\va
430 .import swapper_pg_dir,code
432 /* Get the pgd. For faults on space zero (kernel space), this
433 * is simply swapper_pg_dir. For user space faults, the
434 * pgd is stored in %cr25 */
435 .macro get_pgd spc,reg
436 ldil L%PA(swapper_pg_dir),\reg
437 ldo R%PA(swapper_pg_dir)(\reg),\reg
438 or,COND(=) %r0,\spc,%r0
443 space_check(spc,tmp,fault)
445 spc - The space we saw the fault with.
446 tmp - The place to store the current space.
447 fault - Function to call on failure.
449 Only allow faults on different spaces from the
450 currently active one if we're the kernel
453 .macro space_check spc,tmp,fault
455 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
456 * as kernel, so defeat the space
459 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
460 cmpb,COND(<>),n \tmp,\spc,\fault
463 /* Look up a PTE in a 2-Level scheme (faulting at each
464 * level if the entry isn't present
466 * NOTE: we use ldw even for LP64, since the short pointers
467 * can address up to 1TB
469 .macro L2_ptep pmd,pte,index,va,fault
471 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 ldw,s \index(\pmd),\pmd
478 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
479 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
482 shld %r9,PxD_VALUE_SHIFT,\pmd
484 shlw %r9,PxD_VALUE_SHIFT,\pmd
486 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
487 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
488 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
489 LDREG %r0(\pmd),\pte /* pmd is now pte */
490 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
493 /* Look up PTE in a 3-Level scheme.
495 * Here we implement a Hybrid L2/L3 scheme: we allocate the
496 * first pmd adjacent to the pgd. This means that we can
497 * subtract a constant offset to get to it. The pmd and pgd
498 * sizes are arranged so that a single pmd covers 4GB (giving
499 * a full LP64 process access to 8TB) so our lookups are
500 * effectively L2 for the first 4GB of the kernel (i.e. for
501 * all ILP32 processes and all the kernel for machines with
502 * under 4GB of memory) */
503 .macro L3_ptep pgd,pte,index,va,fault
504 #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
507 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
515 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
518 L2_ptep \pgd,\pte,\index,\va,\fault
521 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
522 * don't needlessly dirty the cache line if it was already set */
523 .macro update_ptep ptep,pte,tmp,tmp1
524 ldi _PAGE_ACCESSED,\tmp1
526 and,COND(<>) \tmp1,\pte,%r0
530 /* Set the dirty bit (and accessed bit). No need to be
531 * clever, this is only used from the dirty fault */
532 .macro update_dirty ptep,pte,tmp
533 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
538 /* Convert the pte and prot to tlb insertion values. How
539 * this happens is quite subtle, read below */
540 .macro make_insert_tlb spc,pte,prot
541 space_to_prot \spc \prot /* create prot id from space */
542 /* The following is the real subtlety. This is depositing
543 * T <-> _PAGE_REFTRAP
545 * B <-> _PAGE_DMB (memory break)
547 * Then incredible subtlety: The access rights are
548 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
549 * See 3-14 of the parisc 2.0 manual
551 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
552 * trigger an access rights trap in user space if the user
553 * tries to read an unreadable page */
556 /* PAGE_USER indicates the page can be read with user privileges,
557 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
558 * contains _PAGE_READ */
559 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
561 /* If we're a gateway page, drop PL2 back to zero for promotion
562 * to kernel privilege (so we can execute the page as kernel).
563 * Any privilege promotion page always denys read and write */
564 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
565 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
567 /* Enforce uncacheable pages.
568 * This should ONLY be use for MMIO on PA 2.0 machines.
569 * Memory/DMA is cache coherent on all PA2.0 machines we support
570 * (that means T-class is NOT supported) and the memory controllers
571 * on most of those machines only handles cache transactions.
573 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
576 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
577 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58),64-PAGE_SHIFT,\pte
578 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,63-58,\pte
581 /* Identical macro to make_insert_tlb above, except it
582 * makes the tlb entry for the differently formatted pa11
583 * insertion instructions */
584 .macro make_insert_tlb_11 spc,pte,prot
585 zdep \spc,30,15,\prot
587 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
589 extru,= \pte,_PAGE_USER_BIT,1,%r0
590 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
591 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
592 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
594 /* Get rid of prot bits and convert to page addr for iitlba */
596 depi _PAGE_SIZE_ENCODING_DEFAULT,31,ASM_PFN_PTE_SHIFT,\pte
597 extru \pte,24,25,\pte
600 /* This is for ILP32 PA2.0 only. The TLB insertion needs
601 * to extend into I/O space if the address is 0xfXXXXXXX
602 * so we extend the f's into the top word of the pte in
604 .macro f_extend pte,tmp
605 extrd,s \pte,42,4,\tmp
607 extrd,s \pte,63,25,\pte
610 /* The alias region is an 8MB aligned 16MB to do clear and
611 * copy user pages at addresses congruent with the user
614 * To use the alias page, you set %r26 up with the to TLB
615 * entry (identifying the physical page) and %r23 up with
616 * the from tlb entry (or nothing if only a to entry---for
617 * clear_user_page_asm) */
618 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
619 cmpib,COND(<>),n 0,\spc,\fault
620 ldil L%(TMPALIAS_MAP_START),\tmp
621 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
622 /* on LP64, ldi will sign extend into the upper 32 bits,
623 * which is behaviour we don't want */
628 cmpb,COND(<>),n \tmp,\tmp1,\fault
629 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
630 depd,z \prot,8,7,\prot
632 * OK, it is in the temp alias region, check whether "from" or "to".
633 * Check "subtle" note in pacache.S re: r23/r26.
636 extrd,u,*= \va,41,1,%r0
638 extrw,u,= \va,9,1,%r0
640 or,COND(tr) %r23,%r0,\pte
646 * Align fault_vector_20 on 4K boundary so that both
647 * fault_vector_11 and fault_vector_20 are on the
648 * same page. This is only necessary as long as we
649 * write protect the kernel text, which we may stop
650 * doing once we use large page translations to cover
651 * the static part of the kernel address space.
654 .export fault_vector_20
661 /* First vector is invalid (0) */
662 .ascii "cows can fly"
704 .export fault_vector_11
709 /* First vector is invalid (0) */
710 .ascii "cows can fly"
752 .import handle_interruption,code
753 .import do_cpu_irq_mask,code
756 * r26 = function to be called
757 * r25 = argument to pass in
758 * r24 = flags for do_fork()
760 * Kernel threads don't ever return, so they don't need
761 * a true register context. We just save away the arguments
762 * for copy_thread/ret_ to properly set up the child.
765 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
766 #define CLONE_UNTRACED 0x00800000
768 .export __kernel_thread, code
771 STREG %r2, -RP_OFFSET(%r30)
774 ldo PT_SZ_ALGN(%r30),%r30
776 /* Yo, function pointers in wide mode are little structs... -PB */
778 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
781 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
782 copy %r0, %r22 /* user_tid */
784 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
785 STREG %r25, PT_GR25(%r1)
786 ldil L%CLONE_UNTRACED, %r26
787 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
788 or %r26, %r24, %r26 /* will have kernel mappings. */
789 ldi 1, %r25 /* stack_start, signals kernel thread */
790 stw %r0, -52(%r30) /* user_tid */
792 ldo -16(%r30),%r29 /* Reference param save area */
795 copy %r1, %r24 /* pt_regs */
797 /* Parent Returns here */
799 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
800 ldo -PT_SZ_ALGN(%r30), %r30
807 * copy_thread moved args from temp save area set up above
808 * into task save area.
811 .export ret_from_kernel_thread
812 ret_from_kernel_thread:
814 /* Call schedule_tail first though */
815 BL schedule_tail, %r2
818 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
819 LDREG TASK_PT_GR25(%r1), %r26
821 LDREG TASK_PT_GR27(%r1), %r27
822 LDREG TASK_PT_GR22(%r1), %r22
824 LDREG TASK_PT_GR26(%r1), %r1
829 ldo -16(%r30),%r29 /* Reference param save area */
830 loadgp /* Thread could have been in a module */
840 .import sys_execve, code
841 .export __execve, code
845 ldo PT_SZ_ALGN(%r30), %r30
846 STREG %r26, PT_GR26(%r16)
847 STREG %r25, PT_GR25(%r16)
848 STREG %r24, PT_GR24(%r16)
850 ldo -16(%r30),%r29 /* Reference param save area */
855 cmpib,=,n 0,%r28,intr_return /* forward */
857 /* yes, this will trap and die. */
866 * struct task_struct *_switch_to(struct task_struct *prev,
867 * struct task_struct *next)
869 * switch kernel stacks and return prev */
870 .export _switch_to, code
872 STREG %r2, -RP_OFFSET(%r30)
877 load32 _switch_to_ret, %r2
879 STREG %r2, TASK_PT_KPC(%r26)
880 LDREG TASK_PT_KPC(%r25), %r2
882 STREG %r30, TASK_PT_KSP(%r26)
883 LDREG TASK_PT_KSP(%r25), %r30
884 LDREG TASK_THREAD_INFO(%r25), %r25
889 mtctl %r0, %cr0 /* Needed for single stepping */
893 LDREG -RP_OFFSET(%r30), %r2
898 * Common rfi return path for interruptions, kernel execve, and
899 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
900 * return via this path if the signal was received when the process
901 * was running; if the process was blocked on a syscall then the
902 * normal syscall_exit path is used. All syscalls for traced
903 * proceses exit via intr_restore.
905 * XXX If any syscalls that change a processes space id ever exit
906 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
913 .export syscall_exit_rfi
916 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
917 ldo TASK_REGS(%r16),%r16
918 /* Force iaoq to userspace, as the user has had access to our current
919 * context via sigcontext. Also Filter the PSW for the same reason.
921 LDREG PT_IAOQ0(%r16),%r19
923 STREG %r19,PT_IAOQ0(%r16)
924 LDREG PT_IAOQ1(%r16),%r19
926 STREG %r19,PT_IAOQ1(%r16)
927 LDREG PT_PSW(%r16),%r19
928 load32 USER_PSW_MASK,%r1
930 load32 USER_PSW_HI_MASK,%r20
933 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
935 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
936 STREG %r19,PT_PSW(%r16)
939 * If we aren't being traced, we never saved space registers
940 * (we don't store them in the sigcontext), so set them
941 * to "proper" values now (otherwise we'll wind up restoring
942 * whatever was last stored in the task structure, which might
943 * be inconsistent if an interrupt occured while on the gateway
944 * page) Note that we may be "trashing" values the user put in
945 * them, but we don't support the the user changing them.
948 STREG %r0,PT_SR2(%r16)
950 STREG %r19,PT_SR0(%r16)
951 STREG %r19,PT_SR1(%r16)
952 STREG %r19,PT_SR3(%r16)
953 STREG %r19,PT_SR4(%r16)
954 STREG %r19,PT_SR5(%r16)
955 STREG %r19,PT_SR6(%r16)
956 STREG %r19,PT_SR7(%r16)
959 /* NOTE: Need to enable interrupts incase we schedule. */
962 /* Check for software interrupts */
964 .import irq_stat,data
969 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
970 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
971 ** irq_stat[] is defined using ____cacheline_aligned.
978 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
979 #endif /* CONFIG_SMP */
983 /* check for reschedule */
985 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
986 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
991 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
992 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
996 ldo PT_FR31(%r29),%r1
1000 /* inverse of virt_map */
1002 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
1005 /* Restore space id's and special cr's from PT_REGS
1006 * structure pointed to by r29
1010 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1011 * It also restores r1 and r30.
1025 #ifndef CONFIG_PREEMPT
1026 # define intr_do_preempt intr_restore
1027 #endif /* !CONFIG_PREEMPT */
1029 .import schedule,code
1031 /* Only call schedule on return to userspace. If we're returning
1032 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
1033 * we jump back to intr_restore.
1035 LDREG PT_IASQ0(%r16), %r20
1036 CMPIB= 0, %r20, intr_do_preempt
1038 LDREG PT_IASQ1(%r16), %r20
1039 CMPIB= 0, %r20, intr_do_preempt
1043 ldo -16(%r30),%r29 /* Reference param save area */
1046 ldil L%intr_check_sig, %r2
1047 #ifndef CONFIG_64BIT
1050 load32 schedule, %r20
1053 ldo R%intr_check_sig(%r2), %r2
1055 /* preempt the current task on returning to kernel
1056 * mode from an interrupt, iff need_resched is set,
1057 * and preempt_count is 0. otherwise, we continue on
1058 * our merry way back to the current running task.
1060 #ifdef CONFIG_PREEMPT
1061 .import preempt_schedule_irq,code
1063 rsm PSW_SM_I, %r0 /* disable interrupts */
1065 /* current_thread_info()->preempt_count */
1067 LDREG TI_PRE_COUNT(%r1), %r19
1068 CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
1069 nop /* prev insn branched backwards */
1071 /* check if we interrupted a critical path */
1072 LDREG PT_PSW(%r16), %r20
1073 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1076 BL preempt_schedule_irq, %r2
1079 b intr_restore /* ssm PSW_SM_I done by intr_restore */
1080 #endif /* CONFIG_PREEMPT */
1082 .import do_signal,code
1085 This check is critical to having LWS
1086 working. The IASQ is zero on the gateway
1087 page and we cannot deliver any signals until
1088 we get off the gateway page.
1090 Only do signals if we are returning to user space
1092 LDREG PT_IASQ0(%r16), %r20
1093 CMPIB= 0,%r20,intr_restore /* backward */
1095 LDREG PT_IASQ1(%r16), %r20
1096 CMPIB= 0,%r20,intr_restore /* backward */
1099 copy %r0, %r24 /* unsigned long in_syscall */
1100 copy %r16, %r25 /* struct pt_regs *regs */
1102 ldo -16(%r30),%r29 /* Reference param save area */
1106 copy %r0, %r26 /* sigset_t *oldset = NULL */
1112 * External interrupts.
1121 #if 0 /* Interrupt Stack support not working yet! */
1124 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1142 ldo PT_FR0(%r29), %r24
1147 copy %r29, %r26 /* arg0 is pt_regs */
1148 copy %r29, %r16 /* save pt_regs */
1150 ldil L%intr_return, %r2
1153 ldo -16(%r30),%r29 /* Reference param save area */
1157 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1160 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1162 .export intr_save, code /* for os_hpmc */
1178 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1181 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1183 * 2) Once we start executing code above 4 Gb, we need
1184 * to adjust iasq/iaoq here in the same way we
1185 * adjust isr/ior below.
1188 CMPIB=,n 6,%r26,skip_save_ior
1191 mfctl %cr20, %r16 /* isr */
1192 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1193 mfctl %cr21, %r17 /* ior */
1198 * If the interrupted code was running with W bit off (32 bit),
1199 * clear the b bits (bits 0 & 1) in the ior.
1200 * save_specials left ipsw value in r8 for us to test.
1202 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1206 * FIXME: This code has hardwired assumptions about the split
1207 * between space bits and offset bits. This will change
1208 * when we allow alternate page sizes.
1211 /* adjust isr/ior. */
1212 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1213 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1214 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1216 STREG %r16, PT_ISR(%r29)
1217 STREG %r17, PT_IOR(%r29)
1224 ldo PT_FR0(%r29), %r25
1229 copy %r29, %r25 /* arg1 is pt_regs */
1231 ldo -16(%r30),%r29 /* Reference param save area */
1234 ldil L%intr_check_sig, %r2
1235 copy %r25, %r16 /* save pt_regs */
1237 b handle_interruption
1238 ldo R%intr_check_sig(%r2), %r2
1242 * Note for all tlb miss handlers:
1244 * cr24 contains a pointer to the kernel address space
1247 * cr25 contains a pointer to the current user address
1248 * space page directory.
1250 * sr3 will contain the space id of the user address space
1251 * of the current running thread while that thread is
1252 * running in the kernel.
1256 * register number allocations. Note that these are all
1257 * in the shadowed registers
1260 t0 = r1 /* temporary register 0 */
1261 va = r8 /* virtual address for which the trap occured */
1262 t1 = r9 /* temporary register 1 */
1263 pte = r16 /* pte/phys page # */
1264 prot = r17 /* prot bits */
1265 spc = r24 /* space for which the trap occured */
1266 ptp = r25 /* page directory/page table pointer */
1271 space_adjust spc,va,t0
1273 space_check spc,t0,dtlb_fault
1275 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1277 update_ptep ptp,pte,t0,t1
1279 make_insert_tlb spc,pte,prot
1286 dtlb_check_alias_20w:
1287 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1295 space_adjust spc,va,t0
1297 space_check spc,t0,nadtlb_fault
1299 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1301 update_ptep ptp,pte,t0,t1
1303 make_insert_tlb spc,pte,prot
1310 nadtlb_check_flush_20w:
1311 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1313 /* Insert a "flush only" translation */
1318 /* Get rid of prot bits and convert to page addr for idtlbt */
1321 extrd,u pte,56,52,pte
1332 space_check spc,t0,dtlb_fault
1334 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1336 update_ptep ptp,pte,t0,t1
1338 make_insert_tlb_11 spc,pte,prot
1340 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1343 idtlba pte,(%sr1,va)
1344 idtlbp prot,(%sr1,va)
1346 mtsp t0, %sr1 /* Restore sr1 */
1351 dtlb_check_alias_11:
1353 /* Check to see if fault is in the temporary alias region */
1355 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1356 ldil L%(TMPALIAS_MAP_START),t0
1359 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1360 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1361 depw,z prot,8,7,prot
1364 * OK, it is in the temp alias region, check whether "from" or "to".
1365 * Check "subtle" note in pacache.S re: r23/r26.
1369 or,tr %r23,%r0,pte /* If "from" use "from" page */
1370 or %r26,%r0,pte /* else "to", use "to" page */
1381 space_check spc,t0,nadtlb_fault
1383 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1385 update_ptep ptp,pte,t0,t1
1387 make_insert_tlb_11 spc,pte,prot
1390 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1393 idtlba pte,(%sr1,va)
1394 idtlbp prot,(%sr1,va)
1396 mtsp t0, %sr1 /* Restore sr1 */
1401 nadtlb_check_flush_11:
1402 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1404 /* Insert a "flush only" translation */
1409 /* Get rid of prot bits and convert to page addr for idtlba */
1414 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1417 idtlba pte,(%sr1,va)
1418 idtlbp prot,(%sr1,va)
1420 mtsp t0, %sr1 /* Restore sr1 */
1426 space_adjust spc,va,t0
1428 space_check spc,t0,dtlb_fault
1430 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1432 update_ptep ptp,pte,t0,t1
1434 make_insert_tlb spc,pte,prot
1443 dtlb_check_alias_20:
1444 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1454 space_check spc,t0,nadtlb_fault
1456 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1458 update_ptep ptp,pte,t0,t1
1460 make_insert_tlb spc,pte,prot
1469 nadtlb_check_flush_20:
1470 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1472 /* Insert a "flush only" translation */
1477 /* Get rid of prot bits and convert to page addr for idtlbt */
1480 extrd,u pte,56,32,pte
1490 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1491 * probei instructions. We don't want to fault for these
1492 * instructions (not only does it not make sense, it can cause
1493 * deadlocks, since some flushes are done with the mmap
1494 * semaphore held). If the translation doesn't exist, we can't
1495 * insert a translation, so have to emulate the side effects
1496 * of the instruction. Since we don't insert a translation
1497 * we can get a lot of faults during a flush loop, so it makes
1498 * sense to try to do it here with minimum overhead. We only
1499 * emulate fdc,fic,pdc,probew,prober instructions whose base
1500 * and index registers are not shadowed. We defer everything
1501 * else to the "slow" path.
1504 mfctl %cr19,%r9 /* Get iir */
1506 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1507 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1509 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1512 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1513 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1514 BL get_register,%r25
1515 extrw,u %r9,15,5,%r8 /* Get index register # */
1516 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1518 BL get_register,%r25
1519 extrw,u %r9,10,5,%r8 /* Get base register # */
1520 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1521 BL set_register,%r25
1522 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1527 or %r8,%r9,%r8 /* Set PSW_N */
1534 When there is no translation for the probe address then we
1535 must nullify the insn and return zero in the target regsiter.
1536 This will indicate to the calling code that it does not have
1537 write/read privileges to this address.
1539 This should technically work for prober and probew in PA 1.1,
1540 and also probe,r and probe,w in PA 2.0
1542 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1543 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1549 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1550 BL get_register,%r25 /* Find the target register */
1551 extrw,u %r9,31,5,%r8 /* Get target register */
1552 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1553 BL set_register,%r25
1554 copy %r0,%r1 /* Write zero to target register */
1555 b nadtlb_nullify /* Nullify return insn */
1563 * I miss is a little different, since we allow users to fault
1564 * on the gateway page which is in the kernel address space.
1567 space_adjust spc,va,t0
1569 space_check spc,t0,itlb_fault
1571 L3_ptep ptp,pte,t0,va,itlb_fault
1573 update_ptep ptp,pte,t0,t1
1575 make_insert_tlb spc,pte,prot
1587 space_check spc,t0,itlb_fault
1589 L2_ptep ptp,pte,t0,va,itlb_fault
1591 update_ptep ptp,pte,t0,t1
1593 make_insert_tlb_11 spc,pte,prot
1595 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1598 iitlba pte,(%sr1,va)
1599 iitlbp prot,(%sr1,va)
1601 mtsp t0, %sr1 /* Restore sr1 */
1609 space_check spc,t0,itlb_fault
1611 L2_ptep ptp,pte,t0,va,itlb_fault
1613 update_ptep ptp,pte,t0,t1
1615 make_insert_tlb spc,pte,prot
1629 space_adjust spc,va,t0
1631 space_check spc,t0,dbit_fault
1633 L3_ptep ptp,pte,t0,va,dbit_fault
1636 CMPIB=,n 0,spc,dbit_nolock_20w
1637 load32 PA(pa_dbit_lock),t0
1641 cmpib,= 0,t1,dbit_spin_20w
1646 update_dirty ptp,pte,t1
1648 make_insert_tlb spc,pte,prot
1652 CMPIB=,n 0,spc,dbit_nounlock_20w
1667 space_check spc,t0,dbit_fault
1669 L2_ptep ptp,pte,t0,va,dbit_fault
1672 CMPIB=,n 0,spc,dbit_nolock_11
1673 load32 PA(pa_dbit_lock),t0
1677 cmpib,= 0,t1,dbit_spin_11
1682 update_dirty ptp,pte,t1
1684 make_insert_tlb_11 spc,pte,prot
1686 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1689 idtlba pte,(%sr1,va)
1690 idtlbp prot,(%sr1,va)
1692 mtsp t1, %sr1 /* Restore sr1 */
1694 CMPIB=,n 0,spc,dbit_nounlock_11
1707 space_check spc,t0,dbit_fault
1709 L2_ptep ptp,pte,t0,va,dbit_fault
1712 CMPIB=,n 0,spc,dbit_nolock_20
1713 load32 PA(pa_dbit_lock),t0
1717 cmpib,= 0,t1,dbit_spin_20
1722 update_dirty ptp,pte,t1
1724 make_insert_tlb spc,pte,prot
1731 CMPIB=,n 0,spc,dbit_nounlock_20
1742 .import handle_interruption,code
1746 ldi 31,%r8 /* Use an unused code */
1764 /* Register saving semantics for system calls:
1766 %r1 clobbered by system call macro in userspace
1767 %r2 saved in PT_REGS by gateway page
1768 %r3 - %r18 preserved by C code (saved by signal code)
1769 %r19 - %r20 saved in PT_REGS by gateway page
1770 %r21 - %r22 non-standard syscall args
1771 stored in kernel stack by gateway page
1772 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1773 %r27 - %r30 saved in PT_REGS by gateway page
1774 %r31 syscall return pointer
1777 /* Floating point registers (FIXME: what do we do with these?)
1779 %fr0 - %fr3 status/exception, not preserved
1780 %fr4 - %fr7 arguments
1781 %fr8 - %fr11 not preserved by C code
1782 %fr12 - %fr21 preserved by C code
1783 %fr22 - %fr31 not preserved by C code
1786 .macro reg_save regs
1787 STREG %r3, PT_GR3(\regs)
1788 STREG %r4, PT_GR4(\regs)
1789 STREG %r5, PT_GR5(\regs)
1790 STREG %r6, PT_GR6(\regs)
1791 STREG %r7, PT_GR7(\regs)
1792 STREG %r8, PT_GR8(\regs)
1793 STREG %r9, PT_GR9(\regs)
1794 STREG %r10,PT_GR10(\regs)
1795 STREG %r11,PT_GR11(\regs)
1796 STREG %r12,PT_GR12(\regs)
1797 STREG %r13,PT_GR13(\regs)
1798 STREG %r14,PT_GR14(\regs)
1799 STREG %r15,PT_GR15(\regs)
1800 STREG %r16,PT_GR16(\regs)
1801 STREG %r17,PT_GR17(\regs)
1802 STREG %r18,PT_GR18(\regs)
1805 .macro reg_restore regs
1806 LDREG PT_GR3(\regs), %r3
1807 LDREG PT_GR4(\regs), %r4
1808 LDREG PT_GR5(\regs), %r5
1809 LDREG PT_GR6(\regs), %r6
1810 LDREG PT_GR7(\regs), %r7
1811 LDREG PT_GR8(\regs), %r8
1812 LDREG PT_GR9(\regs), %r9
1813 LDREG PT_GR10(\regs),%r10
1814 LDREG PT_GR11(\regs),%r11
1815 LDREG PT_GR12(\regs),%r12
1816 LDREG PT_GR13(\regs),%r13
1817 LDREG PT_GR14(\regs),%r14
1818 LDREG PT_GR15(\regs),%r15
1819 LDREG PT_GR16(\regs),%r16
1820 LDREG PT_GR17(\regs),%r17
1821 LDREG PT_GR18(\regs),%r18
1824 .export sys_fork_wrapper
1825 .export child_return
1827 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1828 ldo TASK_REGS(%r1),%r1
1831 STREG %r3, PT_CR27(%r1)
1833 STREG %r2,-RP_OFFSET(%r30)
1834 ldo FRAME_SIZE(%r30),%r30
1836 ldo -16(%r30),%r29 /* Reference param save area */
1839 /* These are call-clobbered registers and therefore
1840 also syscall-clobbered (we hope). */
1841 STREG %r2,PT_GR19(%r1) /* save for child */
1842 STREG %r30,PT_GR21(%r1)
1844 LDREG PT_GR30(%r1),%r25
1849 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1851 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1852 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1853 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1855 LDREG PT_CR27(%r1), %r3
1859 /* strace expects syscall # to be preserved in r20 */
1862 STREG %r20,PT_GR20(%r1)
1864 /* Set the return value for the child */
1866 BL schedule_tail, %r2
1869 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1870 LDREG TASK_PT_GR19(%r1),%r2
1875 .export sys_clone_wrapper
1877 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1878 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1881 STREG %r3, PT_CR27(%r1)
1883 STREG %r2,-RP_OFFSET(%r30)
1884 ldo FRAME_SIZE(%r30),%r30
1886 ldo -16(%r30),%r29 /* Reference param save area */
1889 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1890 STREG %r2,PT_GR19(%r1) /* save for child */
1891 STREG %r30,PT_GR21(%r1)
1896 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1898 .export sys_vfork_wrapper
1900 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1901 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1904 STREG %r3, PT_CR27(%r1)
1906 STREG %r2,-RP_OFFSET(%r30)
1907 ldo FRAME_SIZE(%r30),%r30
1909 ldo -16(%r30),%r29 /* Reference param save area */
1912 STREG %r2,PT_GR19(%r1) /* save for child */
1913 STREG %r30,PT_GR21(%r1)
1919 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1922 .macro execve_wrapper execve
1923 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1924 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1927 * Do we need to save/restore r3-r18 here?
1928 * I don't think so. why would new thread need old
1929 * threads registers?
1932 /* %arg0 - %arg3 are already saved for us. */
1934 STREG %r2,-RP_OFFSET(%r30)
1935 ldo FRAME_SIZE(%r30),%r30
1937 ldo -16(%r30),%r29 /* Reference param save area */
1942 ldo -FRAME_SIZE(%r30),%r30
1943 LDREG -RP_OFFSET(%r30),%r2
1945 /* If exec succeeded we need to load the args */
1948 cmpb,>>= %r28,%r1,error_\execve
1956 .export sys_execve_wrapper
1960 execve_wrapper sys_execve
1963 .export sys32_execve_wrapper
1964 .import sys32_execve
1966 sys32_execve_wrapper:
1967 execve_wrapper sys32_execve
1970 .export sys_rt_sigreturn_wrapper
1971 sys_rt_sigreturn_wrapper:
1972 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1973 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1974 /* Don't save regs, we are going to restore them from sigcontext. */
1975 STREG %r2, -RP_OFFSET(%r30)
1977 ldo FRAME_SIZE(%r30), %r30
1978 BL sys_rt_sigreturn,%r2
1979 ldo -16(%r30),%r29 /* Reference param save area */
1981 BL sys_rt_sigreturn,%r2
1982 ldo FRAME_SIZE(%r30), %r30
1985 ldo -FRAME_SIZE(%r30), %r30
1986 LDREG -RP_OFFSET(%r30), %r2
1988 /* FIXME: I think we need to restore a few more things here. */
1989 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1990 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1993 /* If the signal was received while the process was blocked on a
1994 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1995 * take us to syscall_exit_rfi and on to intr_return.
1998 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
2000 .export sys_sigaltstack_wrapper
2001 sys_sigaltstack_wrapper:
2002 /* Get the user stack pointer */
2003 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2004 ldo TASK_REGS(%r1),%r24 /* get pt regs */
2005 LDREG TASK_PT_GR30(%r24),%r24
2006 STREG %r2, -RP_OFFSET(%r30)
2008 ldo FRAME_SIZE(%r30), %r30
2009 b,l do_sigaltstack,%r2
2010 ldo -16(%r30),%r29 /* Reference param save area */
2012 bl do_sigaltstack,%r2
2013 ldo FRAME_SIZE(%r30), %r30
2016 ldo -FRAME_SIZE(%r30), %r30
2017 LDREG -RP_OFFSET(%r30), %r2
2022 .export sys32_sigaltstack_wrapper
2023 sys32_sigaltstack_wrapper:
2024 /* Get the user stack pointer */
2025 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
2026 LDREG TASK_PT_GR30(%r24),%r24
2027 STREG %r2, -RP_OFFSET(%r30)
2028 ldo FRAME_SIZE(%r30), %r30
2029 b,l do_sigaltstack32,%r2
2030 ldo -16(%r30),%r29 /* Reference param save area */
2032 ldo -FRAME_SIZE(%r30), %r30
2033 LDREG -RP_OFFSET(%r30), %r2
2038 .export sys_rt_sigsuspend_wrapper
2039 sys_rt_sigsuspend_wrapper:
2040 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2041 ldo TASK_REGS(%r1),%r24
2044 STREG %r2, -RP_OFFSET(%r30)
2046 ldo FRAME_SIZE(%r30), %r30
2047 b,l sys_rt_sigsuspend,%r2
2048 ldo -16(%r30),%r29 /* Reference param save area */
2050 bl sys_rt_sigsuspend,%r2
2051 ldo FRAME_SIZE(%r30), %r30
2054 ldo -FRAME_SIZE(%r30), %r30
2055 LDREG -RP_OFFSET(%r30), %r2
2057 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2058 ldo TASK_REGS(%r1),%r1
2064 .export syscall_exit
2067 /* NOTE: HP-UX syscalls also come through here
2068 * after hpux_syscall_exit fixes up return
2071 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2072 * via syscall_exit_rfi if the signal was received while the process
2076 /* save return value now */
2079 LDREG TI_TASK(%r1),%r1
2080 STREG %r28,TASK_PT_GR28(%r1)
2084 /* <linux/personality.h> cannot be easily included */
2085 #define PER_HPUX 0x10
2086 LDREG TASK_PERSONALITY(%r1),%r19
2088 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2089 ldo -PER_HPUX(%r19), %r19
2092 /* Save other hpux returns if personality is PER_HPUX */
2093 STREG %r22,TASK_PT_GR22(%r1)
2094 STREG %r29,TASK_PT_GR29(%r1)
2097 #endif /* CONFIG_HPUX */
2099 /* Seems to me that dp could be wrong here, if the syscall involved
2100 * calling a module, and nothing got round to restoring dp on return.
2106 /* Check for software interrupts */
2108 .import irq_stat,data
2110 load32 irq_stat,%r19
2113 /* sched.h: int processor */
2114 /* %r26 is used as scratch register to index into irq_stat[] */
2115 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2117 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2123 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2124 #endif /* CONFIG_SMP */
2126 syscall_check_resched:
2128 /* check for reschedule */
2130 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2131 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2134 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2135 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2138 /* Are we being ptraced? */
2139 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2141 LDREG TASK_PTRACE(%r1), %r19
2142 bb,< %r19,31,syscall_restore_rfi
2145 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2148 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2151 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2152 LDREG TASK_PT_GR19(%r1),%r19
2153 LDREG TASK_PT_GR20(%r1),%r20
2154 LDREG TASK_PT_GR21(%r1),%r21
2155 LDREG TASK_PT_GR22(%r1),%r22
2156 LDREG TASK_PT_GR23(%r1),%r23
2157 LDREG TASK_PT_GR24(%r1),%r24
2158 LDREG TASK_PT_GR25(%r1),%r25
2159 LDREG TASK_PT_GR26(%r1),%r26
2160 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2161 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2162 LDREG TASK_PT_GR29(%r1),%r29
2163 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2165 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2167 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2168 mfsp %sr3,%r1 /* Get users space id */
2169 mtsp %r1,%sr7 /* Restore sr7 */
2172 /* Set sr2 to zero for userspace syscalls to work. */
2174 mtsp %r1,%sr4 /* Restore sr4 */
2175 mtsp %r1,%sr5 /* Restore sr5 */
2176 mtsp %r1,%sr6 /* Restore sr6 */
2178 depi 3,31,2,%r31 /* ensure return to user mode. */
2181 /* decide whether to reset the wide mode bit
2183 * For a syscall, the W bit is stored in the lowest bit
2184 * of sp. Extract it and reset W if it is zero */
2185 extrd,u,*<> %r30,63,1,%r1
2187 /* now reset the lowest bit of sp if it was set */
2190 be,n 0(%sr3,%r31) /* return to user space */
2192 /* We have to return via an RFI, so that PSW T and R bits can be set
2194 * This sets up pt_regs so we can return via intr_restore, which is not
2195 * the most efficient way of doing things, but it works.
2197 syscall_restore_rfi:
2198 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2199 mtctl %r2,%cr0 /* for immediate trap */
2200 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2201 ldi 0x0b,%r20 /* Create new PSW */
2202 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2204 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2205 * set in include/linux/ptrace.h and converted to PA bitmap
2206 * numbers in asm-offsets.c */
2208 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2209 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2210 depi -1,27,1,%r20 /* R bit */
2212 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2213 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2214 depi -1,7,1,%r20 /* T bit */
2216 STREG %r20,TASK_PT_PSW(%r1)
2218 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2221 STREG %r25,TASK_PT_SR3(%r1)
2222 STREG %r25,TASK_PT_SR4(%r1)
2223 STREG %r25,TASK_PT_SR5(%r1)
2224 STREG %r25,TASK_PT_SR6(%r1)
2225 STREG %r25,TASK_PT_SR7(%r1)
2226 STREG %r25,TASK_PT_IASQ0(%r1)
2227 STREG %r25,TASK_PT_IASQ1(%r1)
2230 /* Now if old D bit is clear, it means we didn't save all registers
2231 * on syscall entry, so do that now. This only happens on TRACEME
2232 * calls, or if someone attached to us while we were on a syscall.
2233 * We could make this more efficient by not saving r3-r18, but
2234 * then we wouldn't be able to use the common intr_restore path.
2235 * It is only for traced processes anyway, so performance is not
2238 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2239 ldo TASK_REGS(%r1),%r25
2240 reg_save %r25 /* Save r3 to r18 */
2242 /* Save the current sr */
2244 STREG %r2,TASK_PT_SR0(%r1)
2246 /* Save the scratch sr */
2248 STREG %r2,TASK_PT_SR1(%r1)
2250 /* sr2 should be set to zero for userspace syscalls */
2251 STREG %r0,TASK_PT_SR2(%r1)
2254 LDREG TASK_PT_GR31(%r1),%r2
2255 depi 3,31,2,%r2 /* ensure return to user mode. */
2256 STREG %r2,TASK_PT_IAOQ0(%r1)
2258 STREG %r2,TASK_PT_IAOQ1(%r1)
2263 .import schedule,code
2267 ldo -16(%r30),%r29 /* Reference param save area */
2271 b syscall_check_bh /* if resched, we start over again */
2274 .import do_signal,code
2276 /* Save callee-save registers (for sigcontext).
2277 FIXME: After this point the process structure should be
2278 consistent with all the relevant state of the process
2279 before the syscall. We need to verify this. */
2280 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2281 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2284 ldi 1, %r24 /* unsigned long in_syscall */
2287 ldo -16(%r30),%r29 /* Reference param save area */
2290 copy %r0, %r26 /* sigset_t *oldset = NULL */
2292 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2293 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2296 b,n syscall_check_sig
2299 * get_register is used by the non access tlb miss handlers to
2300 * copy the value of the general register specified in r8 into
2301 * r1. This routine can't be used for shadowed registers, since
2302 * the rfir will restore the original value. So, for the shadowed
2303 * registers we put a -1 into r1 to indicate that the register
2304 * should not be used (the register being copied could also have
2305 * a -1 in it, but that is OK, it just means that we will have
2306 * to use the slow path instead).
2312 bv %r0(%r25) /* r0 */
2314 bv %r0(%r25) /* r1 - shadowed */
2316 bv %r0(%r25) /* r2 */
2318 bv %r0(%r25) /* r3 */
2320 bv %r0(%r25) /* r4 */
2322 bv %r0(%r25) /* r5 */
2324 bv %r0(%r25) /* r6 */
2326 bv %r0(%r25) /* r7 */
2328 bv %r0(%r25) /* r8 - shadowed */
2330 bv %r0(%r25) /* r9 - shadowed */
2332 bv %r0(%r25) /* r10 */
2334 bv %r0(%r25) /* r11 */
2336 bv %r0(%r25) /* r12 */
2338 bv %r0(%r25) /* r13 */
2340 bv %r0(%r25) /* r14 */
2342 bv %r0(%r25) /* r15 */
2344 bv %r0(%r25) /* r16 - shadowed */
2346 bv %r0(%r25) /* r17 - shadowed */
2348 bv %r0(%r25) /* r18 */
2350 bv %r0(%r25) /* r19 */
2352 bv %r0(%r25) /* r20 */
2354 bv %r0(%r25) /* r21 */
2356 bv %r0(%r25) /* r22 */
2358 bv %r0(%r25) /* r23 */
2360 bv %r0(%r25) /* r24 - shadowed */
2362 bv %r0(%r25) /* r25 - shadowed */
2364 bv %r0(%r25) /* r26 */
2366 bv %r0(%r25) /* r27 */
2368 bv %r0(%r25) /* r28 */
2370 bv %r0(%r25) /* r29 */
2372 bv %r0(%r25) /* r30 */
2374 bv %r0(%r25) /* r31 */
2378 * set_register is used by the non access tlb miss handlers to
2379 * copy the value of r1 into the general register specified in
2386 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2388 bv %r0(%r25) /* r1 */
2390 bv %r0(%r25) /* r2 */
2392 bv %r0(%r25) /* r3 */
2394 bv %r0(%r25) /* r4 */
2396 bv %r0(%r25) /* r5 */
2398 bv %r0(%r25) /* r6 */
2400 bv %r0(%r25) /* r7 */
2402 bv %r0(%r25) /* r8 */
2404 bv %r0(%r25) /* r9 */
2406 bv %r0(%r25) /* r10 */
2408 bv %r0(%r25) /* r11 */
2410 bv %r0(%r25) /* r12 */
2412 bv %r0(%r25) /* r13 */
2414 bv %r0(%r25) /* r14 */
2416 bv %r0(%r25) /* r15 */
2418 bv %r0(%r25) /* r16 */
2420 bv %r0(%r25) /* r17 */
2422 bv %r0(%r25) /* r18 */
2424 bv %r0(%r25) /* r19 */
2426 bv %r0(%r25) /* r20 */
2428 bv %r0(%r25) /* r21 */
2430 bv %r0(%r25) /* r22 */
2432 bv %r0(%r25) /* r23 */
2434 bv %r0(%r25) /* r24 */
2436 bv %r0(%r25) /* r25 */
2438 bv %r0(%r25) /* r26 */
2440 bv %r0(%r25) /* r27 */
2442 bv %r0(%r25) /* r28 */
2444 bv %r0(%r25) /* r29 */
2446 bv %r0(%r25) /* r30 */
2448 bv %r0(%r25) /* r31 */