2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <asm/asm-offsets.h>
27 /* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
33 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h>
36 #include <asm/signal.h>
37 #include <asm/unistd.h>
38 #include <asm/thread_info.h>
40 #include <linux/linkage.h>
48 .import pa_dbit_lock,data
50 /* space_to_prot macro creates a prot id from a space id */
52 #if (SPACEID_SHIFT) == 0
53 .macro space_to_prot spc prot
54 depd,z \spc,62,31,\prot
57 .macro space_to_prot spc prot
58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
62 /* Switch to virtual mapping, trashing only %r1 */
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
70 load32 KERNEL_PSW, %r1
72 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
73 mtctl %r0, %cr17 /* Clear IIASQ tail */
74 mtctl %r0, %cr17 /* Clear IIASQ head */
77 mtctl %r1, %cr18 /* Set IIAOQ tail */
79 mtctl %r1, %cr18 /* Set IIAOQ head */
86 * The "get_stack" macros are responsible for determining the
90 * Already using a kernel stack, so call the
91 * get_stack_use_r30 macro to push a pt_regs structure
92 * on the stack, and store registers there.
94 * Need to set up a kernel stack, so call the
95 * get_stack_use_cr30 macro to set up a pointer
96 * to the pt_regs structure contained within the
97 * task pointer pointed to by cr30. Set the stack
98 * pointer to point to the end of the task structure.
100 * Note that we use shadowed registers for temps until
101 * we can save %r26 and %r29. %r26 is used to preserve
102 * %r8 (a shadowed register) which temporarily contained
103 * either the fault type ("code") or the eirr. We need
104 * to use a non-shadowed register to carry the value over
105 * the rfir in virt_map. We use %r26 since this value winds
106 * up being passed as the argument to either do_cpu_irq_mask
107 * or handle_interruption. %r29 is used to hold a pointer
108 * the register save area, and once again, it needs to
109 * be a non-shadowed register so that it survives the rfir.
111 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
114 .macro get_stack_use_cr30
116 /* we save the registers in the task struct */
120 ldo THREAD_SZ_ALGN(%r1), %r30
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
126 ldo TASK_REGS(%r9),%r9
127 STREG %r17,PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9)
130 STREG %r16,PT_SR7(%r9)
134 .macro get_stack_use_r30
136 /* we put a struct pt_regs on the stack and save the registers there */
140 ldo PT_SZ_ALGN(%r30),%r30
141 STREG %r1,PT_GR30(%r9)
142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9)
144 STREG %r16,PT_SR7(%r9)
149 LDREG PT_GR1(%r29), %r1
150 LDREG PT_GR30(%r29),%r30
151 LDREG PT_GR29(%r29),%r29
154 /* default interruption handler
155 * (calls traps.c:handle_interruption) */
162 /* Interrupt interruption handler
163 * (calls irq.c:do_cpu_irq_mask) */
170 .import os_hpmc, code
174 nop /* must be a NOP, will be patched later */
175 load32 PA(os_hpmc), %r3
178 .word 0 /* checksum (will be patched) */
179 .word PA(os_hpmc) /* address of handler */
180 .word 0 /* length of handler */
184 * Performance Note: Instructions will be moved up into
185 * this part of the code later on, once we are sure
186 * that the tlb miss handlers are close to final form.
189 /* Register definitions for tlb miss handler macros */
191 va = r8 /* virtual address for which the trap occurred */
192 spc = r24 /* space for which the trap occurred */
197 * itlb miss interruption handler (parisc 1.1 - 32 bit)
211 * itlb miss interruption handler (parisc 2.0)
228 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
231 .macro naitlb_11 code
242 * naitlb miss interruption handler (parisc 2.0)
245 .macro naitlb_20 code
260 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
274 * dtlb miss interruption handler (parisc 2.0)
291 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
293 .macro nadtlb_11 code
303 /* nadtlb miss interruption handler (parisc 2.0) */
305 .macro nadtlb_20 code
320 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
334 * dirty bit trap interruption handler (parisc 2.0)
350 /* In LP64, the space contains part of the upper 32 bits of the
351 * fault. We have to extract this and place it in the va,
352 * zeroing the corresponding bits in the space register */
353 .macro space_adjust spc,va,tmp
355 extrd,u \spc,63,SPACEID_SHIFT,\tmp
356 depd %r0,63,SPACEID_SHIFT,\spc
357 depd \tmp,31,SPACEID_SHIFT,\va
361 .import swapper_pg_dir,code
363 /* Get the pgd. For faults on space zero (kernel space), this
364 * is simply swapper_pg_dir. For user space faults, the
365 * pgd is stored in %cr25 */
366 .macro get_pgd spc,reg
367 ldil L%PA(swapper_pg_dir),\reg
368 ldo R%PA(swapper_pg_dir)(\reg),\reg
369 or,COND(=) %r0,\spc,%r0
374 space_check(spc,tmp,fault)
376 spc - The space we saw the fault with.
377 tmp - The place to store the current space.
378 fault - Function to call on failure.
380 Only allow faults on different spaces from the
381 currently active one if we're the kernel
384 .macro space_check spc,tmp,fault
386 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
387 * as kernel, so defeat the space
390 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
391 cmpb,COND(<>),n \tmp,\spc,\fault
394 /* Look up a PTE in a 2-Level scheme (faulting at each
395 * level if the entry isn't present
397 * NOTE: we use ldw even for LP64, since the short pointers
398 * can address up to 1TB
400 .macro L2_ptep pmd,pte,index,va,fault
402 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
404 # if defined(CONFIG_64BIT)
405 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
407 # if PAGE_SIZE > 4096
408 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
410 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
414 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
416 ldw,s \index(\pmd),\pmd
417 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
418 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
423 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
424 LDREG %r0(\pmd),\pte /* pmd is now pte */
425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
428 /* Look up PTE in a 3-Level scheme.
430 * Here we implement a Hybrid L2/L3 scheme: we allocate the
431 * first pmd adjacent to the pgd. This means that we can
432 * subtract a constant offset to get to it. The pmd and pgd
433 * sizes are arranged so that a single pmd covers 4GB (giving
434 * a full LP64 process access to 8TB) so our lookups are
435 * effectively L2 for the first 4GB of the kernel (i.e. for
436 * all ILP32 processes and all the kernel for machines with
437 * under 4GB of memory) */
438 .macro L3_ptep pgd,pte,index,va,fault
439 #if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
440 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
442 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
443 ldw,s \index(\pgd),\pgd
444 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
445 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
446 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
447 shld \pgd,PxD_VALUE_SHIFT,\index
448 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
450 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
451 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
453 L2_ptep \pgd,\pte,\index,\va,\fault
456 /* Acquire pa_dbit_lock lock. */
457 .macro dbit_lock spc,tmp,tmp1
459 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_dbit_lock),\tmp
461 1: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b
468 /* Release pa_dbit_lock lock without reloading lock address. */
469 .macro dbit_unlock0 spc,tmp
471 or,COND(=) %r0,\spc,%r0
476 /* Release pa_dbit_lock lock. */
477 .macro dbit_unlock1 spc,tmp
479 load32 PA(pa_dbit_lock),\tmp
480 dbit_unlock0 \spc,\tmp
484 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
485 * don't needlessly dirty the cache line if it was already set */
486 .macro update_ptep spc,ptep,pte,tmp,tmp1
488 or,COND(=) %r0,\spc,%r0
491 ldi _PAGE_ACCESSED,\tmp1
493 and,COND(<>) \tmp1,\pte,%r0
497 /* Set the dirty bit (and accessed bit). No need to be
498 * clever, this is only used from the dirty fault */
499 .macro update_dirty spc,ptep,pte,tmp
501 or,COND(=) %r0,\spc,%r0
504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
510 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
511 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
513 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
514 .macro convert_for_tlb_insert20 pte
515 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
516 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
517 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
518 (63-58)+PAGE_ADD_SHIFT,\pte
521 /* Convert the pte and prot to tlb insertion values. How
522 * this happens is quite subtle, read below */
523 .macro make_insert_tlb spc,pte,prot
524 space_to_prot \spc \prot /* create prot id from space */
525 /* The following is the real subtlety. This is depositing
526 * T <-> _PAGE_REFTRAP
528 * B <-> _PAGE_DMB (memory break)
530 * Then incredible subtlety: The access rights are
531 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
532 * See 3-14 of the parisc 2.0 manual
534 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
535 * trigger an access rights trap in user space if the user
536 * tries to read an unreadable page */
539 /* PAGE_USER indicates the page can be read with user privileges,
540 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
541 * contains _PAGE_READ) */
542 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
544 /* If we're a gateway page, drop PL2 back to zero for promotion
545 * to kernel privilege (so we can execute the page as kernel).
546 * Any privilege promotion page always denys read and write */
547 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
548 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
550 /* Enforce uncacheable pages.
551 * This should ONLY be use for MMIO on PA 2.0 machines.
552 * Memory/DMA is cache coherent on all PA2.0 machines we support
553 * (that means T-class is NOT supported) and the memory controllers
554 * on most of those machines only handles cache transactions.
556 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
559 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
560 convert_for_tlb_insert20 \pte
563 /* Identical macro to make_insert_tlb above, except it
564 * makes the tlb entry for the differently formatted pa11
565 * insertion instructions */
566 .macro make_insert_tlb_11 spc,pte,prot
567 zdep \spc,30,15,\prot
569 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
571 extru,= \pte,_PAGE_USER_BIT,1,%r0
572 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
573 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
574 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
576 /* Get rid of prot bits and convert to page addr for iitlba */
578 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
579 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
582 /* This is for ILP32 PA2.0 only. The TLB insertion needs
583 * to extend into I/O space if the address is 0xfXXXXXXX
584 * so we extend the f's into the top word of the pte in
586 .macro f_extend pte,tmp
587 extrd,s \pte,42,4,\tmp
589 extrd,s \pte,63,25,\pte
592 /* The alias region is an 8MB aligned 16MB to do clear and
593 * copy user pages at addresses congruent with the user
596 * To use the alias page, you set %r26 up with the to TLB
597 * entry (identifying the physical page) and %r23 up with
598 * the from tlb entry (or nothing if only a to entry---for
599 * clear_user_page_asm) */
600 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
601 cmpib,COND(<>),n 0,\spc,\fault
602 ldil L%(TMPALIAS_MAP_START),\tmp
603 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
604 /* on LP64, ldi will sign extend into the upper 32 bits,
605 * which is behaviour we don't want */
610 cmpb,COND(<>),n \tmp,\tmp1,\fault
611 mfctl %cr19,\tmp /* iir */
612 /* get the opcode (first six bits) into \tmp */
613 extrw,u \tmp,5,6,\tmp
615 * Only setting the T bit prevents data cache movein
616 * Setting access rights to zero prevents instruction cache movein
618 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
619 * to type field and _PAGE_READ goes to top bit of PL1
621 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
623 * so if the opcode is one (i.e. this is a memory management
624 * instruction) nullify the next load so \prot is only T.
625 * Otherwise this is a normal data operation
627 cmpiclr,= 0x01,\tmp,%r0
628 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
630 depd,z \prot,8,7,\prot
633 depw,z \prot,8,7,\prot
635 .error "undefined PA type to do_alias"
639 * OK, it is in the temp alias region, check whether "from" or "to".
640 * Check "subtle" note in pacache.S re: r23/r26.
643 extrd,u,*= \va,41,1,%r0
645 extrw,u,= \va,9,1,%r0
647 or,COND(tr) %r23,%r0,\pte
653 * Align fault_vector_20 on 4K boundary so that both
654 * fault_vector_11 and fault_vector_20 are on the
655 * same page. This is only necessary as long as we
656 * write protect the kernel text, which we may stop
657 * doing once we use large page translations to cover
658 * the static part of the kernel address space.
665 ENTRY(fault_vector_20)
666 /* First vector is invalid (0) */
667 .ascii "cows can fly"
708 ENTRY(fault_vector_11)
709 /* First vector is invalid (0) */
710 .ascii "cows can fly"
748 /* Fault vector is separately protected and *must* be on its own page */
750 ENTRY(end_fault_vector)
752 .import handle_interruption,code
753 .import do_cpu_irq_mask,code
758 * copy_thread moved args into task save area.
761 ENTRY(ret_from_kernel_thread)
763 /* Call schedule_tail first though */
764 BL schedule_tail, %r2
767 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
768 LDREG TASK_PT_GR25(%r1), %r26
770 LDREG TASK_PT_GR27(%r1), %r27
772 LDREG TASK_PT_GR26(%r1), %r1
775 b finish_child_return
777 ENDPROC(ret_from_kernel_thread)
781 * struct task_struct *_switch_to(struct task_struct *prev,
782 * struct task_struct *next)
784 * switch kernel stacks and return prev */
786 STREG %r2, -RP_OFFSET(%r30)
791 load32 _switch_to_ret, %r2
793 STREG %r2, TASK_PT_KPC(%r26)
794 LDREG TASK_PT_KPC(%r25), %r2
796 STREG %r30, TASK_PT_KSP(%r26)
797 LDREG TASK_PT_KSP(%r25), %r30
798 LDREG TASK_THREAD_INFO(%r25), %r25
803 mtctl %r0, %cr0 /* Needed for single stepping */
807 LDREG -RP_OFFSET(%r30), %r2
813 * Common rfi return path for interruptions, kernel execve, and
814 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
815 * return via this path if the signal was received when the process
816 * was running; if the process was blocked on a syscall then the
817 * normal syscall_exit path is used. All syscalls for traced
818 * proceses exit via intr_restore.
820 * XXX If any syscalls that change a processes space id ever exit
821 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
828 ENTRY(syscall_exit_rfi)
830 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
831 ldo TASK_REGS(%r16),%r16
832 /* Force iaoq to userspace, as the user has had access to our current
833 * context via sigcontext. Also Filter the PSW for the same reason.
835 LDREG PT_IAOQ0(%r16),%r19
837 STREG %r19,PT_IAOQ0(%r16)
838 LDREG PT_IAOQ1(%r16),%r19
840 STREG %r19,PT_IAOQ1(%r16)
841 LDREG PT_PSW(%r16),%r19
842 load32 USER_PSW_MASK,%r1
844 load32 USER_PSW_HI_MASK,%r20
847 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
849 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
850 STREG %r19,PT_PSW(%r16)
853 * If we aren't being traced, we never saved space registers
854 * (we don't store them in the sigcontext), so set them
855 * to "proper" values now (otherwise we'll wind up restoring
856 * whatever was last stored in the task structure, which might
857 * be inconsistent if an interrupt occurred while on the gateway
858 * page). Note that we may be "trashing" values the user put in
859 * them, but we don't support the user changing them.
862 STREG %r0,PT_SR2(%r16)
864 STREG %r19,PT_SR0(%r16)
865 STREG %r19,PT_SR1(%r16)
866 STREG %r19,PT_SR3(%r16)
867 STREG %r19,PT_SR4(%r16)
868 STREG %r19,PT_SR5(%r16)
869 STREG %r19,PT_SR6(%r16)
870 STREG %r19,PT_SR7(%r16)
873 /* check for reschedule */
875 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
876 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
878 .import do_notify_resume,code
882 LDREG TI_FLAGS(%r1),%r19
883 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
884 and,COND(<>) %r19, %r20, %r0
885 b,n intr_restore /* skip past if we've nothing to do */
887 /* This check is critical to having LWS
888 * working. The IASQ is zero on the gateway
889 * page and we cannot deliver any signals until
890 * we get off the gateway page.
892 * Only do signals if we are returning to user space
894 LDREG PT_IASQ0(%r16), %r20
895 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
896 LDREG PT_IASQ1(%r16), %r20
897 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
899 /* NOTE: We need to enable interrupts if we have to deliver
900 * signals. We used to do this earlier but it caused kernel
901 * stack overflows. */
904 copy %r0, %r25 /* long in_syscall = 0 */
906 ldo -16(%r30),%r29 /* Reference param save area */
909 BL do_notify_resume,%r2
910 copy %r16, %r26 /* struct pt_regs *regs */
916 ldo PT_FR31(%r29),%r1
920 /* inverse of virt_map */
922 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
925 /* Restore space id's and special cr's from PT_REGS
926 * structure pointed to by r29
930 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
931 * It also restores r1 and r30.
938 #ifndef CONFIG_PREEMPT
939 # define intr_do_preempt intr_restore
940 #endif /* !CONFIG_PREEMPT */
942 .import schedule,code
944 /* Only call schedule on return to userspace. If we're returning
945 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
946 * we jump back to intr_restore.
948 LDREG PT_IASQ0(%r16), %r20
949 cmpib,COND(=) 0, %r20, intr_do_preempt
951 LDREG PT_IASQ1(%r16), %r20
952 cmpib,COND(=) 0, %r20, intr_do_preempt
955 /* NOTE: We need to enable interrupts if we schedule. We used
956 * to do this earlier but it caused kernel stack overflows. */
960 ldo -16(%r30),%r29 /* Reference param save area */
963 ldil L%intr_check_sig, %r2
967 load32 schedule, %r20
970 ldo R%intr_check_sig(%r2), %r2
972 /* preempt the current task on returning to kernel
973 * mode from an interrupt, iff need_resched is set,
974 * and preempt_count is 0. otherwise, we continue on
975 * our merry way back to the current running task.
977 #ifdef CONFIG_PREEMPT
978 .import preempt_schedule_irq,code
980 rsm PSW_SM_I, %r0 /* disable interrupts */
982 /* current_thread_info()->preempt_count */
984 LDREG TI_PRE_COUNT(%r1), %r19
985 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
986 nop /* prev insn branched backwards */
988 /* check if we interrupted a critical path */
989 LDREG PT_PSW(%r16), %r20
990 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
993 BL preempt_schedule_irq, %r2
996 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
997 #endif /* CONFIG_PREEMPT */
1000 * External interrupts.
1004 cmpib,COND(=),n 0,%r16,1f
1016 ldo PT_FR0(%r29), %r24
1021 copy %r29, %r26 /* arg0 is pt_regs */
1022 copy %r29, %r16 /* save pt_regs */
1024 ldil L%intr_return, %r2
1027 ldo -16(%r30),%r29 /* Reference param save area */
1031 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1032 ENDPROC(syscall_exit_rfi)
1035 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1037 ENTRY(intr_save) /* for os_hpmc */
1039 cmpib,COND(=),n 0,%r16,1f
1051 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1054 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1056 * 2) Once we start executing code above 4 Gb, we need
1057 * to adjust iasq/iaoq here in the same way we
1058 * adjust isr/ior below.
1061 cmpib,COND(=),n 6,%r26,skip_save_ior
1064 mfctl %cr20, %r16 /* isr */
1065 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1066 mfctl %cr21, %r17 /* ior */
1071 * If the interrupted code was running with W bit off (32 bit),
1072 * clear the b bits (bits 0 & 1) in the ior.
1073 * save_specials left ipsw value in r8 for us to test.
1075 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1079 * FIXME: This code has hardwired assumptions about the split
1080 * between space bits and offset bits. This will change
1081 * when we allow alternate page sizes.
1084 /* adjust isr/ior. */
1085 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1086 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1087 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1089 STREG %r16, PT_ISR(%r29)
1090 STREG %r17, PT_IOR(%r29)
1097 ldo PT_FR0(%r29), %r25
1102 copy %r29, %r25 /* arg1 is pt_regs */
1104 ldo -16(%r30),%r29 /* Reference param save area */
1107 ldil L%intr_check_sig, %r2
1108 copy %r25, %r16 /* save pt_regs */
1110 b handle_interruption
1111 ldo R%intr_check_sig(%r2), %r2
1116 * Note for all tlb miss handlers:
1118 * cr24 contains a pointer to the kernel address space
1121 * cr25 contains a pointer to the current user address
1122 * space page directory.
1124 * sr3 will contain the space id of the user address space
1125 * of the current running thread while that thread is
1126 * running in the kernel.
1130 * register number allocations. Note that these are all
1131 * in the shadowed registers
1134 t0 = r1 /* temporary register 0 */
1135 va = r8 /* virtual address for which the trap occurred */
1136 t1 = r9 /* temporary register 1 */
1137 pte = r16 /* pte/phys page # */
1138 prot = r17 /* prot bits */
1139 spc = r24 /* space for which the trap occurred */
1140 ptp = r25 /* page directory/page table pointer */
1145 space_adjust spc,va,t0
1147 space_check spc,t0,dtlb_fault
1149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1152 update_ptep spc,ptp,pte,t0,t1
1154 make_insert_tlb spc,pte,prot
1162 dtlb_check_alias_20w:
1163 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1171 space_adjust spc,va,t0
1173 space_check spc,t0,nadtlb_fault
1175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1178 update_ptep spc,ptp,pte,t0,t1
1180 make_insert_tlb spc,pte,prot
1188 nadtlb_check_alias_20w:
1189 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1201 space_check spc,t0,dtlb_fault
1203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1206 update_ptep spc,ptp,pte,t0,t1
1208 make_insert_tlb_11 spc,pte,prot
1210 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1213 idtlba pte,(%sr1,va)
1214 idtlbp prot,(%sr1,va)
1216 mtsp t0, %sr1 /* Restore sr1 */
1222 dtlb_check_alias_11:
1223 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1234 space_check spc,t0,nadtlb_fault
1236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1239 update_ptep spc,ptp,pte,t0,t1
1241 make_insert_tlb_11 spc,pte,prot
1244 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1247 idtlba pte,(%sr1,va)
1248 idtlbp prot,(%sr1,va)
1250 mtsp t0, %sr1 /* Restore sr1 */
1256 nadtlb_check_alias_11:
1257 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1266 space_adjust spc,va,t0
1268 space_check spc,t0,dtlb_fault
1270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1273 update_ptep spc,ptp,pte,t0,t1
1275 make_insert_tlb spc,pte,prot
1285 dtlb_check_alias_20:
1286 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1296 space_check spc,t0,nadtlb_fault
1298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1301 update_ptep spc,ptp,pte,t0,t1
1303 make_insert_tlb spc,pte,prot
1313 nadtlb_check_alias_20:
1314 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1326 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1327 * probei instructions. We don't want to fault for these
1328 * instructions (not only does it not make sense, it can cause
1329 * deadlocks, since some flushes are done with the mmap
1330 * semaphore held). If the translation doesn't exist, we can't
1331 * insert a translation, so have to emulate the side effects
1332 * of the instruction. Since we don't insert a translation
1333 * we can get a lot of faults during a flush loop, so it makes
1334 * sense to try to do it here with minimum overhead. We only
1335 * emulate fdc,fic,pdc,probew,prober instructions whose base
1336 * and index registers are not shadowed. We defer everything
1337 * else to the "slow" path.
1340 mfctl %cr19,%r9 /* Get iir */
1342 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1343 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1345 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1348 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1349 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1350 BL get_register,%r25
1351 extrw,u %r9,15,5,%r8 /* Get index register # */
1352 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1354 BL get_register,%r25
1355 extrw,u %r9,10,5,%r8 /* Get base register # */
1356 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1357 BL set_register,%r25
1358 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1363 or %r8,%r9,%r8 /* Set PSW_N */
1370 When there is no translation for the probe address then we
1371 must nullify the insn and return zero in the target regsiter.
1372 This will indicate to the calling code that it does not have
1373 write/read privileges to this address.
1375 This should technically work for prober and probew in PA 1.1,
1376 and also probe,r and probe,w in PA 2.0
1378 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1379 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1385 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1386 BL get_register,%r25 /* Find the target register */
1387 extrw,u %r9,31,5,%r8 /* Get target register */
1388 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1389 BL set_register,%r25
1390 copy %r0,%r1 /* Write zero to target register */
1391 b nadtlb_nullify /* Nullify return insn */
1399 * I miss is a little different, since we allow users to fault
1400 * on the gateway page which is in the kernel address space.
1403 space_adjust spc,va,t0
1405 space_check spc,t0,itlb_fault
1407 L3_ptep ptp,pte,t0,va,itlb_fault
1410 update_ptep spc,ptp,pte,t0,t1
1412 make_insert_tlb spc,pte,prot
1423 * I miss is a little different, since we allow users to fault
1424 * on the gateway page which is in the kernel address space.
1427 space_adjust spc,va,t0
1429 space_check spc,t0,naitlb_fault
1431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1434 update_ptep spc,ptp,pte,t0,t1
1436 make_insert_tlb spc,pte,prot
1444 naitlb_check_alias_20w:
1445 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1457 space_check spc,t0,itlb_fault
1459 L2_ptep ptp,pte,t0,va,itlb_fault
1462 update_ptep spc,ptp,pte,t0,t1
1464 make_insert_tlb_11 spc,pte,prot
1466 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1469 iitlba pte,(%sr1,va)
1470 iitlbp prot,(%sr1,va)
1472 mtsp t0, %sr1 /* Restore sr1 */
1481 space_check spc,t0,naitlb_fault
1483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1486 update_ptep spc,ptp,pte,t0,t1
1488 make_insert_tlb_11 spc,pte,prot
1490 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1493 iitlba pte,(%sr1,va)
1494 iitlbp prot,(%sr1,va)
1496 mtsp t0, %sr1 /* Restore sr1 */
1502 naitlb_check_alias_11:
1503 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1505 iitlba pte,(%sr0, va)
1506 iitlbp prot,(%sr0, va)
1515 space_check spc,t0,itlb_fault
1517 L2_ptep ptp,pte,t0,va,itlb_fault
1520 update_ptep spc,ptp,pte,t0,t1
1522 make_insert_tlb spc,pte,prot
1535 space_check spc,t0,naitlb_fault
1537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1540 update_ptep spc,ptp,pte,t0,t1
1542 make_insert_tlb spc,pte,prot
1552 naitlb_check_alias_20:
1553 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1565 space_adjust spc,va,t0
1567 space_check spc,t0,dbit_fault
1569 L3_ptep ptp,pte,t0,va,dbit_fault
1572 update_dirty spc,ptp,pte,t1
1574 make_insert_tlb spc,pte,prot
1587 space_check spc,t0,dbit_fault
1589 L2_ptep ptp,pte,t0,va,dbit_fault
1592 update_dirty spc,ptp,pte,t1
1594 make_insert_tlb_11 spc,pte,prot
1596 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1599 idtlba pte,(%sr1,va)
1600 idtlbp prot,(%sr1,va)
1602 mtsp t1, %sr1 /* Restore sr1 */
1611 space_check spc,t0,dbit_fault
1613 L2_ptep ptp,pte,t0,va,dbit_fault
1616 update_dirty spc,ptp,pte,t1
1618 make_insert_tlb spc,pte,prot
1629 .import handle_interruption,code
1633 ldi 31,%r8 /* Use an unused code */
1655 /* Register saving semantics for system calls:
1657 %r1 clobbered by system call macro in userspace
1658 %r2 saved in PT_REGS by gateway page
1659 %r3 - %r18 preserved by C code (saved by signal code)
1660 %r19 - %r20 saved in PT_REGS by gateway page
1661 %r21 - %r22 non-standard syscall args
1662 stored in kernel stack by gateway page
1663 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1664 %r27 - %r30 saved in PT_REGS by gateway page
1665 %r31 syscall return pointer
1668 /* Floating point registers (FIXME: what do we do with these?)
1670 %fr0 - %fr3 status/exception, not preserved
1671 %fr4 - %fr7 arguments
1672 %fr8 - %fr11 not preserved by C code
1673 %fr12 - %fr21 preserved by C code
1674 %fr22 - %fr31 not preserved by C code
1677 .macro reg_save regs
1678 STREG %r3, PT_GR3(\regs)
1679 STREG %r4, PT_GR4(\regs)
1680 STREG %r5, PT_GR5(\regs)
1681 STREG %r6, PT_GR6(\regs)
1682 STREG %r7, PT_GR7(\regs)
1683 STREG %r8, PT_GR8(\regs)
1684 STREG %r9, PT_GR9(\regs)
1685 STREG %r10,PT_GR10(\regs)
1686 STREG %r11,PT_GR11(\regs)
1687 STREG %r12,PT_GR12(\regs)
1688 STREG %r13,PT_GR13(\regs)
1689 STREG %r14,PT_GR14(\regs)
1690 STREG %r15,PT_GR15(\regs)
1691 STREG %r16,PT_GR16(\regs)
1692 STREG %r17,PT_GR17(\regs)
1693 STREG %r18,PT_GR18(\regs)
1696 .macro reg_restore regs
1697 LDREG PT_GR3(\regs), %r3
1698 LDREG PT_GR4(\regs), %r4
1699 LDREG PT_GR5(\regs), %r5
1700 LDREG PT_GR6(\regs), %r6
1701 LDREG PT_GR7(\regs), %r7
1702 LDREG PT_GR8(\regs), %r8
1703 LDREG PT_GR9(\regs), %r9
1704 LDREG PT_GR10(\regs),%r10
1705 LDREG PT_GR11(\regs),%r11
1706 LDREG PT_GR12(\regs),%r12
1707 LDREG PT_GR13(\regs),%r13
1708 LDREG PT_GR14(\regs),%r14
1709 LDREG PT_GR15(\regs),%r15
1710 LDREG PT_GR16(\regs),%r16
1711 LDREG PT_GR17(\regs),%r17
1712 LDREG PT_GR18(\regs),%r18
1715 .macro fork_like name
1716 ENTRY(sys_\name\()_wrapper)
1717 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1718 ldo TASK_REGS(%r1),%r1
1721 ldil L%sys_\name, %r31
1722 be R%sys_\name(%sr4,%r31)
1723 STREG %r28, PT_CR27(%r1)
1724 ENDPROC(sys_\name\()_wrapper)
1731 /* Set the return value for the child */
1733 BL schedule_tail, %r2
1735 finish_child_return:
1736 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1737 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1739 LDREG PT_CR27(%r1), %r3
1744 ENDPROC(child_return)
1746 ENTRY(sys_rt_sigreturn_wrapper)
1747 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1748 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1749 /* Don't save regs, we are going to restore them from sigcontext. */
1750 STREG %r2, -RP_OFFSET(%r30)
1752 ldo FRAME_SIZE(%r30), %r30
1753 BL sys_rt_sigreturn,%r2
1754 ldo -16(%r30),%r29 /* Reference param save area */
1756 BL sys_rt_sigreturn,%r2
1757 ldo FRAME_SIZE(%r30), %r30
1760 ldo -FRAME_SIZE(%r30), %r30
1761 LDREG -RP_OFFSET(%r30), %r2
1763 /* FIXME: I think we need to restore a few more things here. */
1764 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1765 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1768 /* If the signal was received while the process was blocked on a
1769 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1770 * take us to syscall_exit_rfi and on to intr_return.
1773 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1774 ENDPROC(sys_rt_sigreturn_wrapper)
1777 /* NOTE: HP-UX syscalls also come through here
1778 * after hpux_syscall_exit fixes up return
1781 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1782 * via syscall_exit_rfi if the signal was received while the process
1786 /* save return value now */
1789 LDREG TI_TASK(%r1),%r1
1790 STREG %r28,TASK_PT_GR28(%r1)
1793 /* <linux/personality.h> cannot be easily included */
1794 #define PER_HPUX 0x10
1795 ldw TASK_PERSONALITY(%r1),%r19
1797 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1798 ldo -PER_HPUX(%r19), %r19
1799 cmpib,COND(<>),n 0,%r19,1f
1801 /* Save other hpux returns if personality is PER_HPUX */
1802 STREG %r22,TASK_PT_GR22(%r1)
1803 STREG %r29,TASK_PT_GR29(%r1)
1806 #endif /* CONFIG_HPUX */
1808 /* Seems to me that dp could be wrong here, if the syscall involved
1809 * calling a module, and nothing got round to restoring dp on return.
1813 syscall_check_resched:
1815 /* check for reschedule */
1817 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1818 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1820 .import do_signal,code
1822 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1823 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1824 and,COND(<>) %r19, %r26, %r0
1825 b,n syscall_restore /* skip past if we've nothing to do */
1828 /* Save callee-save registers (for sigcontext).
1829 * FIXME: After this point the process structure should be
1830 * consistent with all the relevant state of the process
1831 * before the syscall. We need to verify this.
1833 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1834 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1838 ldo -16(%r30),%r29 /* Reference param save area */
1841 BL do_notify_resume,%r2
1842 ldi 1, %r25 /* long in_syscall = 1 */
1844 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1845 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1848 b,n syscall_check_sig
1851 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1853 /* Are we being ptraced? */
1854 ldw TASK_FLAGS(%r1),%r19
1855 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1856 and,COND(=) %r19,%r2,%r0
1857 b,n syscall_restore_rfi
1859 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1862 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1865 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1866 LDREG TASK_PT_GR19(%r1),%r19
1867 LDREG TASK_PT_GR20(%r1),%r20
1868 LDREG TASK_PT_GR21(%r1),%r21
1869 LDREG TASK_PT_GR22(%r1),%r22
1870 LDREG TASK_PT_GR23(%r1),%r23
1871 LDREG TASK_PT_GR24(%r1),%r24
1872 LDREG TASK_PT_GR25(%r1),%r25
1873 LDREG TASK_PT_GR26(%r1),%r26
1874 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1875 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1876 LDREG TASK_PT_GR29(%r1),%r29
1877 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1879 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1880 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1882 copy %r1,%r30 /* Restore user sp */
1883 mfsp %sr3,%r1 /* Get user space id */
1884 mtsp %r1,%sr7 /* Restore sr7 */
1887 /* Set sr2 to zero for userspace syscalls to work. */
1889 mtsp %r1,%sr4 /* Restore sr4 */
1890 mtsp %r1,%sr5 /* Restore sr5 */
1891 mtsp %r1,%sr6 /* Restore sr6 */
1893 depi 3,31,2,%r31 /* ensure return to user mode. */
1896 /* decide whether to reset the wide mode bit
1898 * For a syscall, the W bit is stored in the lowest bit
1899 * of sp. Extract it and reset W if it is zero */
1900 extrd,u,*<> %r30,63,1,%r1
1902 /* now reset the lowest bit of sp if it was set */
1905 be,n 0(%sr3,%r31) /* return to user space */
1907 /* We have to return via an RFI, so that PSW T and R bits can be set
1909 * This sets up pt_regs so we can return via intr_restore, which is not
1910 * the most efficient way of doing things, but it works.
1912 syscall_restore_rfi:
1913 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1914 mtctl %r2,%cr0 /* for immediate trap */
1915 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1916 ldi 0x0b,%r20 /* Create new PSW */
1917 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1919 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1920 * set in thread_info.h and converted to PA bitmap
1921 * numbers in asm-offsets.c */
1923 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1924 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1925 depi -1,27,1,%r20 /* R bit */
1927 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1928 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1929 depi -1,7,1,%r20 /* T bit */
1931 STREG %r20,TASK_PT_PSW(%r1)
1933 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1936 STREG %r25,TASK_PT_SR3(%r1)
1937 STREG %r25,TASK_PT_SR4(%r1)
1938 STREG %r25,TASK_PT_SR5(%r1)
1939 STREG %r25,TASK_PT_SR6(%r1)
1940 STREG %r25,TASK_PT_SR7(%r1)
1941 STREG %r25,TASK_PT_IASQ0(%r1)
1942 STREG %r25,TASK_PT_IASQ1(%r1)
1945 /* Now if old D bit is clear, it means we didn't save all registers
1946 * on syscall entry, so do that now. This only happens on TRACEME
1947 * calls, or if someone attached to us while we were on a syscall.
1948 * We could make this more efficient by not saving r3-r18, but
1949 * then we wouldn't be able to use the common intr_restore path.
1950 * It is only for traced processes anyway, so performance is not
1953 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1954 ldo TASK_REGS(%r1),%r25
1955 reg_save %r25 /* Save r3 to r18 */
1957 /* Save the current sr */
1959 STREG %r2,TASK_PT_SR0(%r1)
1961 /* Save the scratch sr */
1963 STREG %r2,TASK_PT_SR1(%r1)
1965 /* sr2 should be set to zero for userspace syscalls */
1966 STREG %r0,TASK_PT_SR2(%r1)
1968 LDREG TASK_PT_GR31(%r1),%r2
1969 depi 3,31,2,%r2 /* ensure return to user mode. */
1970 STREG %r2,TASK_PT_IAOQ0(%r1)
1972 STREG %r2,TASK_PT_IAOQ1(%r1)
1977 LDREG TASK_PT_IAOQ0(%r1),%r2
1978 depi 3,31,2,%r2 /* ensure return to user mode. */
1979 STREG %r2,TASK_PT_IAOQ0(%r1)
1980 LDREG TASK_PT_IAOQ1(%r1),%r2
1982 STREG %r2,TASK_PT_IAOQ1(%r1)
1986 .import schedule,code
1990 ldo -16(%r30),%r29 /* Reference param save area */
1994 b syscall_check_resched /* if resched, we start over again */
1996 ENDPROC(syscall_exit)
1999 #ifdef CONFIG_FUNCTION_TRACER
2000 .import ftrace_function_trampoline,code
2003 b ftrace_function_trampoline
2007 ENTRY(return_to_handler)
2008 load32 return_trampoline, %rp
2011 b ftrace_return_to_handler
2022 ENDPROC(return_to_handler)
2023 #endif /* CONFIG_FUNCTION_TRACER */
2025 #ifdef CONFIG_IRQSTACKS
2026 /* void call_on_stack(unsigned long param1, void *func,
2027 unsigned long new_stack) */
2028 ENTRY(call_on_stack)
2031 /* Regarding the HPPA calling conventions for function pointers,
2032 we assume the PIC register is not changed across call. For
2033 CONFIG_64BIT, the argument pointer is left to point at the
2034 argument region allocated for the call to call_on_stack. */
2035 # ifdef CONFIG_64BIT
2036 /* Switch to new stack. We allocate two 128 byte frames. */
2038 /* Save previous stack pointer and return pointer in frame marker */
2039 STREG %rp, -144(%sp)
2040 /* Calls always use function descriptor */
2041 LDREG 16(%arg1), %arg1
2043 STREG %r1, -136(%sp)
2044 LDREG -144(%sp), %rp
2046 LDREG -136(%sp), %sp
2048 /* Switch to new stack. We allocate two 64 byte frames. */
2050 /* Save previous stack pointer and return pointer in frame marker */
2053 /* Calls use function descriptor if PLABEL bit is set */
2054 bb,>=,n %arg1, 30, 1f
2056 LDREG 0(%arg1), %arg1
2058 be,l 0(%sr4,%arg1), %sr0, %r31
2063 # endif /* CONFIG_64BIT */
2064 ENDPROC(call_on_stack)
2065 #endif /* CONFIG_IRQSTACKS */
2069 * get_register is used by the non access tlb miss handlers to
2070 * copy the value of the general register specified in r8 into
2071 * r1. This routine can't be used for shadowed registers, since
2072 * the rfir will restore the original value. So, for the shadowed
2073 * registers we put a -1 into r1 to indicate that the register
2074 * should not be used (the register being copied could also have
2075 * a -1 in it, but that is OK, it just means that we will have
2076 * to use the slow path instead).
2080 bv %r0(%r25) /* r0 */
2082 bv %r0(%r25) /* r1 - shadowed */
2084 bv %r0(%r25) /* r2 */
2086 bv %r0(%r25) /* r3 */
2088 bv %r0(%r25) /* r4 */
2090 bv %r0(%r25) /* r5 */
2092 bv %r0(%r25) /* r6 */
2094 bv %r0(%r25) /* r7 */
2096 bv %r0(%r25) /* r8 - shadowed */
2098 bv %r0(%r25) /* r9 - shadowed */
2100 bv %r0(%r25) /* r10 */
2102 bv %r0(%r25) /* r11 */
2104 bv %r0(%r25) /* r12 */
2106 bv %r0(%r25) /* r13 */
2108 bv %r0(%r25) /* r14 */
2110 bv %r0(%r25) /* r15 */
2112 bv %r0(%r25) /* r16 - shadowed */
2114 bv %r0(%r25) /* r17 - shadowed */
2116 bv %r0(%r25) /* r18 */
2118 bv %r0(%r25) /* r19 */
2120 bv %r0(%r25) /* r20 */
2122 bv %r0(%r25) /* r21 */
2124 bv %r0(%r25) /* r22 */
2126 bv %r0(%r25) /* r23 */
2128 bv %r0(%r25) /* r24 - shadowed */
2130 bv %r0(%r25) /* r25 - shadowed */
2132 bv %r0(%r25) /* r26 */
2134 bv %r0(%r25) /* r27 */
2136 bv %r0(%r25) /* r28 */
2138 bv %r0(%r25) /* r29 */
2140 bv %r0(%r25) /* r30 */
2142 bv %r0(%r25) /* r31 */
2148 * set_register is used by the non access tlb miss handlers to
2149 * copy the value of r1 into the general register specified in
2154 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2156 bv %r0(%r25) /* r1 */
2158 bv %r0(%r25) /* r2 */
2160 bv %r0(%r25) /* r3 */
2162 bv %r0(%r25) /* r4 */
2164 bv %r0(%r25) /* r5 */
2166 bv %r0(%r25) /* r6 */
2168 bv %r0(%r25) /* r7 */
2170 bv %r0(%r25) /* r8 */
2172 bv %r0(%r25) /* r9 */
2174 bv %r0(%r25) /* r10 */
2176 bv %r0(%r25) /* r11 */
2178 bv %r0(%r25) /* r12 */
2180 bv %r0(%r25) /* r13 */
2182 bv %r0(%r25) /* r14 */
2184 bv %r0(%r25) /* r15 */
2186 bv %r0(%r25) /* r16 */
2188 bv %r0(%r25) /* r17 */
2190 bv %r0(%r25) /* r18 */
2192 bv %r0(%r25) /* r19 */
2194 bv %r0(%r25) /* r20 */
2196 bv %r0(%r25) /* r21 */
2198 bv %r0(%r25) /* r22 */
2200 bv %r0(%r25) /* r23 */
2202 bv %r0(%r25) /* r24 */
2204 bv %r0(%r25) /* r25 */
2206 bv %r0(%r25) /* r26 */
2208 bv %r0(%r25) /* r27 */
2210 bv %r0(%r25) /* r28 */
2212 bv %r0(%r25) /* r29 */
2214 bv %r0(%r25) /* r30 */
2216 bv %r0(%r25) /* r31 */