1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/pgtable.h>
23 #include <asm/signal.h>
24 #include <asm/unistd.h>
26 #include <asm/traps.h>
27 #include <asm/thread_info.h>
28 #include <asm/alternative.h>
30 #include <linux/linkage.h>
38 .import pa_tlb_lock,data
39 .macro load_pa_tlb_lock reg
41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
44 /* space_to_prot macro creates a prot id from a space id */
46 #if (SPACEID_SHIFT) == 0
47 .macro space_to_prot spc prot
48 depd,z \spc,62,31,\prot
51 .macro space_to_prot spc prot
52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
56 /* Switch to virtual mapping, trashing only %r1 */
59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
64 load32 KERNEL_PSW, %r1
66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
67 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */
71 mtctl %r1, %cr18 /* Set IIAOQ tail */
73 mtctl %r1, %cr18 /* Set IIAOQ head */
80 * The "get_stack" macros are responsible for determining the
84 * Already using a kernel stack, so call the
85 * get_stack_use_r30 macro to push a pt_regs structure
86 * on the stack, and store registers there.
88 * Need to set up a kernel stack, so call the
89 * get_stack_use_cr30 macro to set up a pointer
90 * to the pt_regs structure contained within the
91 * task pointer pointed to by cr30. Set the stack
92 * pointer to point to the end of the task structure.
94 * Note that we use shadowed registers for temps until
95 * we can save %r26 and %r29. %r26 is used to preserve
96 * %r8 (a shadowed register) which temporarily contained
97 * either the fault type ("code") or the eirr. We need
98 * to use a non-shadowed register to carry the value over
99 * the rfir in virt_map. We use %r26 since this value winds
100 * up being passed as the argument to either do_cpu_irq_mask
101 * or handle_interruption. %r29 is used to hold a pointer
102 * the register save area, and once again, it needs to
103 * be a non-shadowed register so that it survives the rfir.
105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
108 .macro get_stack_use_cr30
110 /* we save the registers in the task struct */
114 ldo THREAD_SZ_ALGN(%r1), %r30
118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
120 ldo TASK_REGS(%r9),%r9
121 STREG %r17,PT_GR30(%r9)
122 STREG %r29,PT_GR29(%r9)
123 STREG %r26,PT_GR26(%r9)
124 STREG %r16,PT_SR7(%r9)
128 .macro get_stack_use_r30
130 /* we put a struct pt_regs on the stack and save the registers there */
134 ldo PT_SZ_ALGN(%r30),%r30
135 STREG %r1,PT_GR30(%r9)
136 STREG %r29,PT_GR29(%r9)
137 STREG %r26,PT_GR26(%r9)
138 STREG %r16,PT_SR7(%r9)
143 LDREG PT_GR1(%r29), %r1
144 LDREG PT_GR30(%r29),%r30
145 LDREG PT_GR29(%r29),%r29
148 /* default interruption handler
149 * (calls traps.c:handle_interruption) */
156 /* Interrupt interruption handler
157 * (calls irq.c:do_cpu_irq_mask) */
164 .import os_hpmc, code
168 nop /* must be a NOP, will be patched later */
169 load32 PA(os_hpmc), %r3
172 .word 0 /* checksum (will be patched) */
173 .word 0 /* address of handler */
174 .word 0 /* length of handler */
178 * Performance Note: Instructions will be moved up into
179 * this part of the code later on, once we are sure
180 * that the tlb miss handlers are close to final form.
183 /* Register definitions for tlb miss handler macros */
185 va = r8 /* virtual address for which the trap occurred */
186 spc = r24 /* space for which the trap occurred */
191 * itlb miss interruption handler (parisc 1.1 - 32 bit)
205 * itlb miss interruption handler (parisc 2.0)
222 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
225 .macro naitlb_11 code
236 * naitlb miss interruption handler (parisc 2.0)
239 .macro naitlb_20 code
254 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
268 * dtlb miss interruption handler (parisc 2.0)
285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
287 .macro nadtlb_11 code
297 /* nadtlb miss interruption handler (parisc 2.0) */
299 .macro nadtlb_20 code
314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
328 * dirty bit trap interruption handler (parisc 2.0)
344 /* In LP64, the space contains part of the upper 32 bits of the
345 * fault. We have to extract this and place it in the va,
346 * zeroing the corresponding bits in the space register */
347 .macro space_adjust spc,va,tmp
349 extrd,u \spc,63,SPACEID_SHIFT,\tmp
350 depd %r0,63,SPACEID_SHIFT,\spc
351 depd \tmp,31,SPACEID_SHIFT,\va
355 .import swapper_pg_dir,code
357 /* Get the pgd. For faults on space zero (kernel space), this
358 * is simply swapper_pg_dir. For user space faults, the
359 * pgd is stored in %cr25 */
360 .macro get_pgd spc,reg
361 ldil L%PA(swapper_pg_dir),\reg
362 ldo R%PA(swapper_pg_dir)(\reg),\reg
363 or,COND(=) %r0,\spc,%r0
368 space_check(spc,tmp,fault)
370 spc - The space we saw the fault with.
371 tmp - The place to store the current space.
372 fault - Function to call on failure.
374 Only allow faults on different spaces from the
375 currently active one if we're the kernel
378 .macro space_check spc,tmp,fault
380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
382 * as kernel, so defeat the space
385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
386 cmpb,COND(<>),n \tmp,\spc,\fault
389 /* Look up a PTE in a 2-Level scheme (faulting at each
390 * level if the entry isn't present
392 * NOTE: we use ldw even for LP64, since the short pointers
393 * can address up to 1TB
395 .macro L2_ptep pmd,pte,index,va,fault
396 #if CONFIG_PGTABLE_LEVELS == 3
397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
399 # if defined(CONFIG_64BIT)
400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
402 # if PAGE_SIZE > 4096
403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
411 ldw,s \index(\pmd),\pmd
412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
420 /* Look up PTE in a 3-Level scheme.
422 * Here we implement a Hybrid L2/L3 scheme: we allocate the
423 * first pmd adjacent to the pgd. This means that we can
424 * subtract a constant offset to get to it. The pmd and pgd
425 * sizes are arranged so that a single pmd covers 4GB (giving
426 * a full LP64 process access to 8TB) so our lookups are
427 * effectively L2 for the first 4GB of the kernel (i.e. for
428 * all ILP32 processes and all the kernel for machines with
429 * under 4GB of memory) */
430 .macro L3_ptep pgd,pte,index,va,fault
431 #if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
434 ldw,s \index(\pgd),\pgd
435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
438 shld \pgd,PxD_VALUE_SHIFT,\index
439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
444 L2_ptep \pgd,\pte,\index,\va,\fault
447 /* Acquire pa_tlb_lock lock and check page is present. */
448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
450 98: cmpib,COND(=),n 0,\spc,2f
451 load_pa_tlb_lock \tmp
452 1: LDCW 0(\tmp),\tmp1
453 cmpib,COND(=) 0,\tmp1,1b
456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
460 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
462 2: LDREG 0(\ptp),\pte
463 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
467 /* Release pa_tlb_lock lock without reloading lock address. */
468 .macro tlb_unlock0 spc,tmp,tmp1
470 98: or,COND(=) %r0,\spc,%r0
472 or,COND(=) %r0,\spc,%r0
474 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
478 /* Release pa_tlb_lock lock. */
479 .macro tlb_unlock1 spc,tmp,tmp1
481 98: load_pa_tlb_lock \tmp
482 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
483 tlb_unlock0 \spc,\tmp,\tmp1
487 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
488 * don't needlessly dirty the cache line if it was already set */
489 .macro update_accessed ptp,pte,tmp,tmp1
490 ldi _PAGE_ACCESSED,\tmp1
492 and,COND(<>) \tmp1,\pte,%r0
496 /* Set the dirty bit (and accessed bit). No need to be
497 * clever, this is only used from the dirty fault */
498 .macro update_dirty ptp,pte,tmp
499 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
504 /* We have (depending on the page size):
505 * - 38 to 52-bit Physical Page Number
506 * - 12 to 26-bit page offset
508 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
509 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
510 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
511 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
513 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
514 .macro convert_for_tlb_insert20 pte,tmp
515 #ifdef CONFIG_HUGETLB_PAGE
517 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
518 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
520 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
521 (63-58)+PAGE_ADD_SHIFT,\pte
522 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
523 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
524 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
525 #else /* Huge pages disabled */
526 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
527 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
528 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
529 (63-58)+PAGE_ADD_SHIFT,\pte
533 /* Convert the pte and prot to tlb insertion values. How
534 * this happens is quite subtle, read below */
535 .macro make_insert_tlb spc,pte,prot,tmp
536 space_to_prot \spc \prot /* create prot id from space */
537 /* The following is the real subtlety. This is depositing
538 * T <-> _PAGE_REFTRAP
540 * B <-> _PAGE_DMB (memory break)
542 * Then incredible subtlety: The access rights are
543 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
544 * See 3-14 of the parisc 2.0 manual
546 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
547 * trigger an access rights trap in user space if the user
548 * tries to read an unreadable page */
551 /* PAGE_USER indicates the page can be read with user privileges,
552 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
553 * contains _PAGE_READ) */
554 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
556 /* If we're a gateway page, drop PL2 back to zero for promotion
557 * to kernel privilege (so we can execute the page as kernel).
558 * Any privilege promotion page always denys read and write */
559 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
560 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
562 /* Enforce uncacheable pages.
563 * This should ONLY be use for MMIO on PA 2.0 machines.
564 * Memory/DMA is cache coherent on all PA2.0 machines we support
565 * (that means T-class is NOT supported) and the memory controllers
566 * on most of those machines only handles cache transactions.
568 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
571 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
572 convert_for_tlb_insert20 \pte \tmp
575 /* Identical macro to make_insert_tlb above, except it
576 * makes the tlb entry for the differently formatted pa11
577 * insertion instructions */
578 .macro make_insert_tlb_11 spc,pte,prot
579 zdep \spc,30,15,\prot
581 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
583 extru,= \pte,_PAGE_USER_BIT,1,%r0
584 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
585 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
586 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
588 /* Get rid of prot bits and convert to page addr for iitlba */
590 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
591 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
594 /* This is for ILP32 PA2.0 only. The TLB insertion needs
595 * to extend into I/O space if the address is 0xfXXXXXXX
596 * so we extend the f's into the top word of the pte in
598 .macro f_extend pte,tmp
599 extrd,s \pte,42,4,\tmp
601 extrd,s \pte,63,25,\pte
604 /* The alias region is an 8MB aligned 16MB to do clear and
605 * copy user pages at addresses congruent with the user
608 * To use the alias page, you set %r26 up with the to TLB
609 * entry (identifying the physical page) and %r23 up with
610 * the from tlb entry (or nothing if only a to entry---for
611 * clear_user_page_asm) */
612 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
613 cmpib,COND(<>),n 0,\spc,\fault
614 ldil L%(TMPALIAS_MAP_START),\tmp
615 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
616 /* on LP64, ldi will sign extend into the upper 32 bits,
617 * which is behaviour we don't want */
622 cmpb,COND(<>),n \tmp,\tmp1,\fault
623 mfctl %cr19,\tmp /* iir */
624 /* get the opcode (first six bits) into \tmp */
625 extrw,u \tmp,5,6,\tmp
627 * Only setting the T bit prevents data cache movein
628 * Setting access rights to zero prevents instruction cache movein
630 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
631 * to type field and _PAGE_READ goes to top bit of PL1
633 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
635 * so if the opcode is one (i.e. this is a memory management
636 * instruction) nullify the next load so \prot is only T.
637 * Otherwise this is a normal data operation
639 cmpiclr,= 0x01,\tmp,%r0
640 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
642 depd,z \prot,8,7,\prot
645 depw,z \prot,8,7,\prot
647 .error "undefined PA type to do_alias"
651 * OK, it is in the temp alias region, check whether "from" or "to".
652 * Check "subtle" note in pacache.S re: r23/r26.
655 extrd,u,*= \va,41,1,%r0
657 extrw,u,= \va,9,1,%r0
659 or,COND(tr) %r23,%r0,\pte
665 * Fault_vectors are architecturally required to be aligned on a 2K
672 ENTRY(fault_vector_20)
673 /* First vector is invalid (0) */
674 .ascii "cows can fly"
683 itlb_20 PARISC_ITLB_TRAP
715 ENTRY(fault_vector_11)
716 /* First vector is invalid (0) */
717 .ascii "cows can fly"
726 itlb_11 PARISC_ITLB_TRAP
755 /* Fault vector is separately protected and *must* be on its own page */
758 .import handle_interruption,code
759 .import do_cpu_irq_mask,code
764 * copy_thread moved args into task save area.
767 ENTRY(ret_from_kernel_thread)
768 /* Call schedule_tail first though */
769 BL schedule_tail, %r2
772 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
773 LDREG TASK_PT_GR25(%r1), %r26
775 LDREG TASK_PT_GR27(%r1), %r27
777 LDREG TASK_PT_GR26(%r1), %r1
780 b finish_child_return
782 END(ret_from_kernel_thread)
786 * struct task_struct *_switch_to(struct task_struct *prev,
787 * struct task_struct *next)
789 * switch kernel stacks and return prev */
790 ENTRY_CFI(_switch_to)
791 STREG %r2, -RP_OFFSET(%r30)
796 load32 _switch_to_ret, %r2
798 STREG %r2, TASK_PT_KPC(%r26)
799 LDREG TASK_PT_KPC(%r25), %r2
801 STREG %r30, TASK_PT_KSP(%r26)
802 LDREG TASK_PT_KSP(%r25), %r30
803 LDREG TASK_THREAD_INFO(%r25), %r25
807 ENTRY(_switch_to_ret)
808 mtctl %r0, %cr0 /* Needed for single stepping */
812 LDREG -RP_OFFSET(%r30), %r2
815 ENDPROC_CFI(_switch_to)
818 * Common rfi return path for interruptions, kernel execve, and
819 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
820 * return via this path if the signal was received when the process
821 * was running; if the process was blocked on a syscall then the
822 * normal syscall_exit path is used. All syscalls for traced
823 * proceses exit via intr_restore.
825 * XXX If any syscalls that change a processes space id ever exit
826 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
833 ENTRY_CFI(syscall_exit_rfi)
835 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
836 ldo TASK_REGS(%r16),%r16
837 /* Force iaoq to userspace, as the user has had access to our current
838 * context via sigcontext. Also Filter the PSW for the same reason.
840 LDREG PT_IAOQ0(%r16),%r19
842 STREG %r19,PT_IAOQ0(%r16)
843 LDREG PT_IAOQ1(%r16),%r19
845 STREG %r19,PT_IAOQ1(%r16)
846 LDREG PT_PSW(%r16),%r19
847 load32 USER_PSW_MASK,%r1
849 load32 USER_PSW_HI_MASK,%r20
852 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
854 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
855 STREG %r19,PT_PSW(%r16)
858 * If we aren't being traced, we never saved space registers
859 * (we don't store them in the sigcontext), so set them
860 * to "proper" values now (otherwise we'll wind up restoring
861 * whatever was last stored in the task structure, which might
862 * be inconsistent if an interrupt occurred while on the gateway
863 * page). Note that we may be "trashing" values the user put in
864 * them, but we don't support the user changing them.
867 STREG %r0,PT_SR2(%r16)
869 STREG %r19,PT_SR0(%r16)
870 STREG %r19,PT_SR1(%r16)
871 STREG %r19,PT_SR3(%r16)
872 STREG %r19,PT_SR4(%r16)
873 STREG %r19,PT_SR5(%r16)
874 STREG %r19,PT_SR6(%r16)
875 STREG %r19,PT_SR7(%r16)
878 /* check for reschedule */
880 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
881 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
883 .import do_notify_resume,code
887 LDREG TI_FLAGS(%r1),%r19
888 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
889 and,COND(<>) %r19, %r20, %r0
890 b,n intr_restore /* skip past if we've nothing to do */
892 /* This check is critical to having LWS
893 * working. The IASQ is zero on the gateway
894 * page and we cannot deliver any signals until
895 * we get off the gateway page.
897 * Only do signals if we are returning to user space
899 LDREG PT_IASQ0(%r16), %r20
900 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
901 LDREG PT_IASQ1(%r16), %r20
902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
904 /* NOTE: We need to enable interrupts if we have to deliver
905 * signals. We used to do this earlier but it caused kernel
906 * stack overflows. */
909 copy %r0, %r25 /* long in_syscall = 0 */
911 ldo -16(%r30),%r29 /* Reference param save area */
914 BL do_notify_resume,%r2
915 copy %r16, %r26 /* struct pt_regs *regs */
921 ldo PT_FR31(%r29),%r1
925 /* inverse of virt_map */
927 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
930 /* Restore space id's and special cr's from PT_REGS
931 * structure pointed to by r29
935 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
936 * It also restores r1 and r30.
943 #ifndef CONFIG_PREEMPTION
944 # define intr_do_preempt intr_restore
945 #endif /* !CONFIG_PREEMPTION */
947 .import schedule,code
949 /* Only call schedule on return to userspace. If we're returning
950 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
951 * we jump back to intr_restore.
953 LDREG PT_IASQ0(%r16), %r20
954 cmpib,COND(=) 0, %r20, intr_do_preempt
956 LDREG PT_IASQ1(%r16), %r20
957 cmpib,COND(=) 0, %r20, intr_do_preempt
960 /* NOTE: We need to enable interrupts if we schedule. We used
961 * to do this earlier but it caused kernel stack overflows. */
965 ldo -16(%r30),%r29 /* Reference param save area */
968 ldil L%intr_check_sig, %r2
972 load32 schedule, %r20
975 ldo R%intr_check_sig(%r2), %r2
977 /* preempt the current task on returning to kernel
978 * mode from an interrupt, iff need_resched is set,
979 * and preempt_count is 0. otherwise, we continue on
980 * our merry way back to the current running task.
982 #ifdef CONFIG_PREEMPTION
983 .import preempt_schedule_irq,code
985 rsm PSW_SM_I, %r0 /* disable interrupts */
987 /* current_thread_info()->preempt_count */
989 LDREG TI_PRE_COUNT(%r1), %r19
990 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
991 nop /* prev insn branched backwards */
993 /* check if we interrupted a critical path */
994 LDREG PT_PSW(%r16), %r20
995 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
998 BL preempt_schedule_irq, %r2
1001 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1002 #endif /* CONFIG_PREEMPTION */
1005 * External interrupts.
1009 cmpib,COND(=),n 0,%r16,1f
1021 ldo PT_FR0(%r29), %r24
1026 copy %r29, %r26 /* arg0 is pt_regs */
1027 copy %r29, %r16 /* save pt_regs */
1029 ldil L%intr_return, %r2
1032 ldo -16(%r30),%r29 /* Reference param save area */
1036 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1037 ENDPROC_CFI(syscall_exit_rfi)
1040 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1042 ENTRY_CFI(intr_save) /* for os_hpmc */
1044 cmpib,COND(=),n 0,%r16,1f
1056 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1057 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1061 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1067 * If the interrupted code was running with W bit off (32 bit),
1068 * clear the b bits (bits 0 & 1) in the ior.
1069 * save_specials left ipsw value in r8 for us to test.
1071 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1074 /* adjust isr/ior: get high bits from isr and deposit in ior */
1075 space_adjust %r16,%r17,%r1
1077 STREG %r16, PT_ISR(%r29)
1078 STREG %r17, PT_IOR(%r29)
1080 #if 0 && defined(CONFIG_64BIT)
1081 /* Revisit when we have 64-bit code above 4Gb */
1085 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1086 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1089 extrd,u,* %r8,PSW_W_BIT,1,%r1
1090 cmpib,COND(=),n 1,%r1,intr_save2
1091 LDREG PT_IASQ0(%r29), %r16
1092 LDREG PT_IAOQ0(%r29), %r17
1093 /* adjust iasq/iaoq */
1094 space_adjust %r16,%r17,%r1
1095 STREG %r16, PT_IASQ0(%r29)
1096 STREG %r17, PT_IAOQ0(%r29)
1105 ldo PT_FR0(%r29), %r25
1110 copy %r29, %r25 /* arg1 is pt_regs */
1112 ldo -16(%r30),%r29 /* Reference param save area */
1115 ldil L%intr_check_sig, %r2
1116 copy %r25, %r16 /* save pt_regs */
1118 b handle_interruption
1119 ldo R%intr_check_sig(%r2), %r2
1120 ENDPROC_CFI(intr_save)
1124 * Note for all tlb miss handlers:
1126 * cr24 contains a pointer to the kernel address space
1129 * cr25 contains a pointer to the current user address
1130 * space page directory.
1132 * sr3 will contain the space id of the user address space
1133 * of the current running thread while that thread is
1134 * running in the kernel.
1138 * register number allocations. Note that these are all
1139 * in the shadowed registers
1142 t0 = r1 /* temporary register 0 */
1143 va = r8 /* virtual address for which the trap occurred */
1144 t1 = r9 /* temporary register 1 */
1145 pte = r16 /* pte/phys page # */
1146 prot = r17 /* prot bits */
1147 spc = r24 /* space for which the trap occurred */
1148 ptp = r25 /* page directory/page table pointer */
1153 space_adjust spc,va,t0
1155 space_check spc,t0,dtlb_fault
1157 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1160 update_accessed ptp,pte,t0,t1
1162 make_insert_tlb spc,pte,prot,t1
1166 tlb_unlock1 spc,t0,t1
1170 dtlb_check_alias_20w:
1171 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1179 space_adjust spc,va,t0
1181 space_check spc,t0,nadtlb_fault
1183 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1186 update_accessed ptp,pte,t0,t1
1188 make_insert_tlb spc,pte,prot,t1
1192 tlb_unlock1 spc,t0,t1
1196 nadtlb_check_alias_20w:
1197 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1209 space_check spc,t0,dtlb_fault
1211 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1213 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1214 update_accessed ptp,pte,t0,t1
1216 make_insert_tlb_11 spc,pte,prot
1218 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1221 idtlba pte,(%sr1,va)
1222 idtlbp prot,(%sr1,va)
1224 mtsp t1, %sr1 /* Restore sr1 */
1226 tlb_unlock1 spc,t0,t1
1230 dtlb_check_alias_11:
1231 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1242 space_check spc,t0,nadtlb_fault
1244 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1246 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1247 update_accessed ptp,pte,t0,t1
1249 make_insert_tlb_11 spc,pte,prot
1251 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1254 idtlba pte,(%sr1,va)
1255 idtlbp prot,(%sr1,va)
1257 mtsp t1, %sr1 /* Restore sr1 */
1259 tlb_unlock1 spc,t0,t1
1263 nadtlb_check_alias_11:
1264 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1273 space_adjust spc,va,t0
1275 space_check spc,t0,dtlb_fault
1277 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1280 update_accessed ptp,pte,t0,t1
1282 make_insert_tlb spc,pte,prot,t1
1288 tlb_unlock1 spc,t0,t1
1292 dtlb_check_alias_20:
1293 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1303 space_check spc,t0,nadtlb_fault
1305 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1308 update_accessed ptp,pte,t0,t1
1310 make_insert_tlb spc,pte,prot,t1
1316 tlb_unlock1 spc,t0,t1
1320 nadtlb_check_alias_20:
1321 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1333 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1334 * probei instructions. We don't want to fault for these
1335 * instructions (not only does it not make sense, it can cause
1336 * deadlocks, since some flushes are done with the mmap
1337 * semaphore held). If the translation doesn't exist, we can't
1338 * insert a translation, so have to emulate the side effects
1339 * of the instruction. Since we don't insert a translation
1340 * we can get a lot of faults during a flush loop, so it makes
1341 * sense to try to do it here with minimum overhead. We only
1342 * emulate fdc,fic,pdc,probew,prober instructions whose base
1343 * and index registers are not shadowed. We defer everything
1344 * else to the "slow" path.
1347 mfctl %cr19,%r9 /* Get iir */
1349 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1350 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1352 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1355 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1356 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1357 BL get_register,%r25
1358 extrw,u %r9,15,5,%r8 /* Get index register # */
1359 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1361 BL get_register,%r25
1362 extrw,u %r9,10,5,%r8 /* Get base register # */
1363 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1364 BL set_register,%r25
1365 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1370 or %r8,%r9,%r8 /* Set PSW_N */
1377 When there is no translation for the probe address then we
1378 must nullify the insn and return zero in the target register.
1379 This will indicate to the calling code that it does not have
1380 write/read privileges to this address.
1382 This should technically work for prober and probew in PA 1.1,
1383 and also probe,r and probe,w in PA 2.0
1385 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1386 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1392 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1393 BL get_register,%r25 /* Find the target register */
1394 extrw,u %r9,31,5,%r8 /* Get target register */
1395 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1396 BL set_register,%r25
1397 copy %r0,%r1 /* Write zero to target register */
1398 b nadtlb_nullify /* Nullify return insn */
1406 * I miss is a little different, since we allow users to fault
1407 * on the gateway page which is in the kernel address space.
1410 space_adjust spc,va,t0
1412 space_check spc,t0,itlb_fault
1414 L3_ptep ptp,pte,t0,va,itlb_fault
1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1417 update_accessed ptp,pte,t0,t1
1419 make_insert_tlb spc,pte,prot,t1
1423 tlb_unlock1 spc,t0,t1
1430 * I miss is a little different, since we allow users to fault
1431 * on the gateway page which is in the kernel address space.
1434 space_adjust spc,va,t0
1436 space_check spc,t0,naitlb_fault
1438 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1441 update_accessed ptp,pte,t0,t1
1443 make_insert_tlb spc,pte,prot,t1
1447 tlb_unlock1 spc,t0,t1
1451 naitlb_check_alias_20w:
1452 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1464 space_check spc,t0,itlb_fault
1466 L2_ptep ptp,pte,t0,va,itlb_fault
1468 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1469 update_accessed ptp,pte,t0,t1
1471 make_insert_tlb_11 spc,pte,prot
1473 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1476 iitlba pte,(%sr1,va)
1477 iitlbp prot,(%sr1,va)
1479 mtsp t1, %sr1 /* Restore sr1 */
1481 tlb_unlock1 spc,t0,t1
1488 space_check spc,t0,naitlb_fault
1490 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1492 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1493 update_accessed ptp,pte,t0,t1
1495 make_insert_tlb_11 spc,pte,prot
1497 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1500 iitlba pte,(%sr1,va)
1501 iitlbp prot,(%sr1,va)
1503 mtsp t1, %sr1 /* Restore sr1 */
1505 tlb_unlock1 spc,t0,t1
1509 naitlb_check_alias_11:
1510 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1512 iitlba pte,(%sr0, va)
1513 iitlbp prot,(%sr0, va)
1522 space_check spc,t0,itlb_fault
1524 L2_ptep ptp,pte,t0,va,itlb_fault
1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1527 update_accessed ptp,pte,t0,t1
1529 make_insert_tlb spc,pte,prot,t1
1535 tlb_unlock1 spc,t0,t1
1542 space_check spc,t0,naitlb_fault
1544 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1547 update_accessed ptp,pte,t0,t1
1549 make_insert_tlb spc,pte,prot,t1
1555 tlb_unlock1 spc,t0,t1
1559 naitlb_check_alias_20:
1560 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1572 space_adjust spc,va,t0
1574 space_check spc,t0,dbit_fault
1576 L3_ptep ptp,pte,t0,va,dbit_fault
1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1579 update_dirty ptp,pte,t1
1581 make_insert_tlb spc,pte,prot,t1
1585 tlb_unlock0 spc,t0,t1
1594 space_check spc,t0,dbit_fault
1596 L2_ptep ptp,pte,t0,va,dbit_fault
1598 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1599 update_dirty ptp,pte,t1
1601 make_insert_tlb_11 spc,pte,prot
1603 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1606 idtlba pte,(%sr1,va)
1607 idtlbp prot,(%sr1,va)
1609 mtsp t1, %sr1 /* Restore sr1 */
1611 tlb_unlock0 spc,t0,t1
1618 space_check spc,t0,dbit_fault
1620 L2_ptep ptp,pte,t0,va,dbit_fault
1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1623 update_dirty ptp,pte,t1
1625 make_insert_tlb spc,pte,prot,t1
1631 tlb_unlock0 spc,t0,t1
1636 .import handle_interruption,code
1640 ldi 31,%r8 /* Use an unused code */
1648 ldi PARISC_ITLB_TRAP,%r8
1662 /* Register saving semantics for system calls:
1664 %r1 clobbered by system call macro in userspace
1665 %r2 saved in PT_REGS by gateway page
1666 %r3 - %r18 preserved by C code (saved by signal code)
1667 %r19 - %r20 saved in PT_REGS by gateway page
1668 %r21 - %r22 non-standard syscall args
1669 stored in kernel stack by gateway page
1670 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1671 %r27 - %r30 saved in PT_REGS by gateway page
1672 %r31 syscall return pointer
1675 /* Floating point registers (FIXME: what do we do with these?)
1677 %fr0 - %fr3 status/exception, not preserved
1678 %fr4 - %fr7 arguments
1679 %fr8 - %fr11 not preserved by C code
1680 %fr12 - %fr21 preserved by C code
1681 %fr22 - %fr31 not preserved by C code
1684 .macro reg_save regs
1685 STREG %r3, PT_GR3(\regs)
1686 STREG %r4, PT_GR4(\regs)
1687 STREG %r5, PT_GR5(\regs)
1688 STREG %r6, PT_GR6(\regs)
1689 STREG %r7, PT_GR7(\regs)
1690 STREG %r8, PT_GR8(\regs)
1691 STREG %r9, PT_GR9(\regs)
1692 STREG %r10,PT_GR10(\regs)
1693 STREG %r11,PT_GR11(\regs)
1694 STREG %r12,PT_GR12(\regs)
1695 STREG %r13,PT_GR13(\regs)
1696 STREG %r14,PT_GR14(\regs)
1697 STREG %r15,PT_GR15(\regs)
1698 STREG %r16,PT_GR16(\regs)
1699 STREG %r17,PT_GR17(\regs)
1700 STREG %r18,PT_GR18(\regs)
1703 .macro reg_restore regs
1704 LDREG PT_GR3(\regs), %r3
1705 LDREG PT_GR4(\regs), %r4
1706 LDREG PT_GR5(\regs), %r5
1707 LDREG PT_GR6(\regs), %r6
1708 LDREG PT_GR7(\regs), %r7
1709 LDREG PT_GR8(\regs), %r8
1710 LDREG PT_GR9(\regs), %r9
1711 LDREG PT_GR10(\regs),%r10
1712 LDREG PT_GR11(\regs),%r11
1713 LDREG PT_GR12(\regs),%r12
1714 LDREG PT_GR13(\regs),%r13
1715 LDREG PT_GR14(\regs),%r14
1716 LDREG PT_GR15(\regs),%r15
1717 LDREG PT_GR16(\regs),%r16
1718 LDREG PT_GR17(\regs),%r17
1719 LDREG PT_GR18(\regs),%r18
1722 .macro fork_like name
1723 ENTRY_CFI(sys_\name\()_wrapper)
1724 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1725 ldo TASK_REGS(%r1),%r1
1728 ldil L%sys_\name, %r31
1729 be R%sys_\name(%sr4,%r31)
1730 STREG %r28, PT_CR27(%r1)
1731 ENDPROC_CFI(sys_\name\()_wrapper)
1739 /* Set the return value for the child */
1741 BL schedule_tail, %r2
1743 finish_child_return:
1744 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1745 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1747 LDREG PT_CR27(%r1), %r3
1754 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1755 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1756 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1757 /* Don't save regs, we are going to restore them from sigcontext. */
1758 STREG %r2, -RP_OFFSET(%r30)
1760 ldo FRAME_SIZE(%r30), %r30
1761 BL sys_rt_sigreturn,%r2
1762 ldo -16(%r30),%r29 /* Reference param save area */
1764 BL sys_rt_sigreturn,%r2
1765 ldo FRAME_SIZE(%r30), %r30
1768 ldo -FRAME_SIZE(%r30), %r30
1769 LDREG -RP_OFFSET(%r30), %r2
1771 /* FIXME: I think we need to restore a few more things here. */
1772 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1773 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1776 /* If the signal was received while the process was blocked on a
1777 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1778 * take us to syscall_exit_rfi and on to intr_return.
1781 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1782 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1785 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1786 * via syscall_exit_rfi if the signal was received while the process
1790 /* save return value now */
1793 LDREG TI_TASK(%r1),%r1
1794 STREG %r28,TASK_PT_GR28(%r1)
1796 /* Seems to me that dp could be wrong here, if the syscall involved
1797 * calling a module, and nothing got round to restoring dp on return.
1801 syscall_check_resched:
1803 /* check for reschedule */
1805 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1806 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1808 .import do_signal,code
1810 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1811 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1812 and,COND(<>) %r19, %r26, %r0
1813 b,n syscall_restore /* skip past if we've nothing to do */
1816 /* Save callee-save registers (for sigcontext).
1817 * FIXME: After this point the process structure should be
1818 * consistent with all the relevant state of the process
1819 * before the syscall. We need to verify this.
1821 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1822 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1826 ldo -16(%r30),%r29 /* Reference param save area */
1829 BL do_notify_resume,%r2
1830 ldi 1, %r25 /* long in_syscall = 1 */
1832 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1836 b,n syscall_check_sig
1839 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1841 /* Are we being ptraced? */
1842 ldw TASK_FLAGS(%r1),%r19
1843 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1844 and,COND(=) %r19,%r2,%r0
1845 b,n syscall_restore_rfi
1847 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1850 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1853 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1854 LDREG TASK_PT_GR19(%r1),%r19
1855 LDREG TASK_PT_GR20(%r1),%r20
1856 LDREG TASK_PT_GR21(%r1),%r21
1857 LDREG TASK_PT_GR22(%r1),%r22
1858 LDREG TASK_PT_GR23(%r1),%r23
1859 LDREG TASK_PT_GR24(%r1),%r24
1860 LDREG TASK_PT_GR25(%r1),%r25
1861 LDREG TASK_PT_GR26(%r1),%r26
1862 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1863 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1864 LDREG TASK_PT_GR29(%r1),%r29
1865 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1867 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1868 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1870 copy %r1,%r30 /* Restore user sp */
1871 mfsp %sr3,%r1 /* Get user space id */
1872 mtsp %r1,%sr7 /* Restore sr7 */
1875 /* Set sr2 to zero for userspace syscalls to work. */
1877 mtsp %r1,%sr4 /* Restore sr4 */
1878 mtsp %r1,%sr5 /* Restore sr5 */
1879 mtsp %r1,%sr6 /* Restore sr6 */
1881 depi 3,31,2,%r31 /* ensure return to user mode. */
1884 /* decide whether to reset the wide mode bit
1886 * For a syscall, the W bit is stored in the lowest bit
1887 * of sp. Extract it and reset W if it is zero */
1888 extrd,u,*<> %r30,63,1,%r1
1890 /* now reset the lowest bit of sp if it was set */
1893 be,n 0(%sr3,%r31) /* return to user space */
1895 /* We have to return via an RFI, so that PSW T and R bits can be set
1897 * This sets up pt_regs so we can return via intr_restore, which is not
1898 * the most efficient way of doing things, but it works.
1900 syscall_restore_rfi:
1901 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1902 mtctl %r2,%cr0 /* for immediate trap */
1903 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1904 ldi 0x0b,%r20 /* Create new PSW */
1905 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1907 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1908 * set in thread_info.h and converted to PA bitmap
1909 * numbers in asm-offsets.c */
1911 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1912 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1913 depi -1,27,1,%r20 /* R bit */
1915 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1916 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1917 depi -1,7,1,%r20 /* T bit */
1919 STREG %r20,TASK_PT_PSW(%r1)
1921 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1924 STREG %r25,TASK_PT_SR3(%r1)
1925 STREG %r25,TASK_PT_SR4(%r1)
1926 STREG %r25,TASK_PT_SR5(%r1)
1927 STREG %r25,TASK_PT_SR6(%r1)
1928 STREG %r25,TASK_PT_SR7(%r1)
1929 STREG %r25,TASK_PT_IASQ0(%r1)
1930 STREG %r25,TASK_PT_IASQ1(%r1)
1933 /* Now if old D bit is clear, it means we didn't save all registers
1934 * on syscall entry, so do that now. This only happens on TRACEME
1935 * calls, or if someone attached to us while we were on a syscall.
1936 * We could make this more efficient by not saving r3-r18, but
1937 * then we wouldn't be able to use the common intr_restore path.
1938 * It is only for traced processes anyway, so performance is not
1941 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1942 ldo TASK_REGS(%r1),%r25
1943 reg_save %r25 /* Save r3 to r18 */
1945 /* Save the current sr */
1947 STREG %r2,TASK_PT_SR0(%r1)
1949 /* Save the scratch sr */
1951 STREG %r2,TASK_PT_SR1(%r1)
1953 /* sr2 should be set to zero for userspace syscalls */
1954 STREG %r0,TASK_PT_SR2(%r1)
1956 LDREG TASK_PT_GR31(%r1),%r2
1957 depi 3,31,2,%r2 /* ensure return to user mode. */
1958 STREG %r2,TASK_PT_IAOQ0(%r1)
1960 STREG %r2,TASK_PT_IAOQ1(%r1)
1965 LDREG TASK_PT_IAOQ0(%r1),%r2
1966 depi 3,31,2,%r2 /* ensure return to user mode. */
1967 STREG %r2,TASK_PT_IAOQ0(%r1)
1968 LDREG TASK_PT_IAOQ1(%r1),%r2
1970 STREG %r2,TASK_PT_IAOQ1(%r1)
1975 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1976 load32 schedule,%r19
1977 bv %r0(%r19) /* jumps to schedule() */
1979 ldo -16(%r30),%r29 /* Reference param save area */
1986 #ifdef CONFIG_FUNCTION_TRACER
1988 .import ftrace_function_trampoline,code
1989 .align L1_CACHE_BYTES
1990 ENTRY_CFI(mcount, caller)
1992 .export _mcount,data
1994 * The 64bit mcount() function pointer needs 4 dwords, of which the
1995 * first two are free. We optimize it here and put 2 instructions for
1996 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1997 * have all on one L1 cacheline.
2000 b ftrace_function_trampoline
2001 copy %r3, %arg2 /* caller original %sp */
2004 .type ftrace_stub, @function
2013 .dword 0 /* code in head.S puts value of global gp here */
2017 #ifdef CONFIG_DYNAMIC_FTRACE
2020 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2022 #define FTRACE_FRAME_SIZE FRAME_SIZE
2024 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2026 .global ftrace_caller
2028 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2029 ldo -FTRACE_FRAME_SIZE(%sp), %r3
2030 STREG %rp, -RP_OFFSET(%r3)
2032 /* Offset 0 is already allocated for %r1 */
2033 STREG %r23, 2*REG_SZ(%r3)
2034 STREG %r24, 3*REG_SZ(%r3)
2035 STREG %r25, 4*REG_SZ(%r3)
2036 STREG %r26, 5*REG_SZ(%r3)
2037 STREG %r28, 6*REG_SZ(%r3)
2038 STREG %r29, 7*REG_SZ(%r3)
2040 STREG %r19, 8*REG_SZ(%r3)
2041 STREG %r20, 9*REG_SZ(%r3)
2042 STREG %r21, 10*REG_SZ(%r3)
2043 STREG %r22, 11*REG_SZ(%r3)
2044 STREG %r27, 12*REG_SZ(%r3)
2045 STREG %r31, 13*REG_SZ(%r3)
2052 ldi 0, %r23 /* no pt_regs */
2053 b,l ftrace_function_trampoline, %rp
2056 LDREG -RP_OFFSET(%r3), %rp
2057 LDREG 2*REG_SZ(%r3), %r23
2058 LDREG 3*REG_SZ(%r3), %r24
2059 LDREG 4*REG_SZ(%r3), %r25
2060 LDREG 5*REG_SZ(%r3), %r26
2061 LDREG 6*REG_SZ(%r3), %r28
2062 LDREG 7*REG_SZ(%r3), %r29
2064 LDREG 8*REG_SZ(%r3), %r19
2065 LDREG 9*REG_SZ(%r3), %r20
2066 LDREG 10*REG_SZ(%r3), %r21
2067 LDREG 11*REG_SZ(%r3), %r22
2068 LDREG 12*REG_SZ(%r3), %r27
2069 LDREG 13*REG_SZ(%r3), %r31
2071 LDREG 1*REG_SZ(%r3), %r3
2073 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2074 /* Adjust return point to jump back to beginning of traced function */
2078 ENDPROC_CFI(ftrace_caller)
2080 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2081 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2082 CALLS,SAVE_RP,SAVE_SP)
2084 .global ftrace_regs_caller
2086 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2087 STREG %rp, -RP_OFFSET(%r1)
2090 ldo PT_SZ_ALGN(%sp), %sp
2092 STREG %rp, PT_GR2(%r1)
2093 STREG %r3, PT_GR3(%r1)
2094 STREG %r4, PT_GR4(%r1)
2095 STREG %r5, PT_GR5(%r1)
2096 STREG %r6, PT_GR6(%r1)
2097 STREG %r7, PT_GR7(%r1)
2098 STREG %r8, PT_GR8(%r1)
2099 STREG %r9, PT_GR9(%r1)
2100 STREG %r10, PT_GR10(%r1)
2101 STREG %r11, PT_GR11(%r1)
2102 STREG %r12, PT_GR12(%r1)
2103 STREG %r13, PT_GR13(%r1)
2104 STREG %r14, PT_GR14(%r1)
2105 STREG %r15, PT_GR15(%r1)
2106 STREG %r16, PT_GR16(%r1)
2107 STREG %r17, PT_GR17(%r1)
2108 STREG %r18, PT_GR18(%r1)
2109 STREG %r19, PT_GR19(%r1)
2110 STREG %r20, PT_GR20(%r1)
2111 STREG %r21, PT_GR21(%r1)
2112 STREG %r22, PT_GR22(%r1)
2113 STREG %r23, PT_GR23(%r1)
2114 STREG %r24, PT_GR24(%r1)
2115 STREG %r25, PT_GR25(%r1)
2116 STREG %r26, PT_GR26(%r1)
2117 STREG %r27, PT_GR27(%r1)
2118 STREG %r28, PT_GR28(%r1)
2119 STREG %r29, PT_GR29(%r1)
2120 STREG %r30, PT_GR30(%r1)
2121 STREG %r31, PT_GR31(%r1)
2123 STREG %r26, PT_SAR(%r1)
2126 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2128 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2129 b,l ftrace_function_trampoline, %rp
2130 copy %r1, %arg3 /* struct pt_regs */
2132 ldo -PT_SZ_ALGN(%sp), %r1
2134 LDREG PT_SAR(%r1), %rp
2137 LDREG PT_GR2(%r1), %rp
2138 LDREG PT_GR3(%r1), %r3
2139 LDREG PT_GR4(%r1), %r4
2140 LDREG PT_GR5(%r1), %r5
2141 LDREG PT_GR6(%r1), %r6
2142 LDREG PT_GR7(%r1), %r7
2143 LDREG PT_GR8(%r1), %r8
2144 LDREG PT_GR9(%r1), %r9
2145 LDREG PT_GR10(%r1),%r10
2146 LDREG PT_GR11(%r1),%r11
2147 LDREG PT_GR12(%r1),%r12
2148 LDREG PT_GR13(%r1),%r13
2149 LDREG PT_GR14(%r1),%r14
2150 LDREG PT_GR15(%r1),%r15
2151 LDREG PT_GR16(%r1),%r16
2152 LDREG PT_GR17(%r1),%r17
2153 LDREG PT_GR18(%r1),%r18
2154 LDREG PT_GR19(%r1),%r19
2155 LDREG PT_GR20(%r1),%r20
2156 LDREG PT_GR21(%r1),%r21
2157 LDREG PT_GR22(%r1),%r22
2158 LDREG PT_GR23(%r1),%r23
2159 LDREG PT_GR24(%r1),%r24
2160 LDREG PT_GR25(%r1),%r25
2161 LDREG PT_GR26(%r1),%r26
2162 LDREG PT_GR27(%r1),%r27
2163 LDREG PT_GR28(%r1),%r28
2164 LDREG PT_GR29(%r1),%r29
2165 LDREG PT_GR30(%r1),%r30
2166 LDREG PT_GR31(%r1),%r31
2168 ldo -PT_SZ_ALGN(%sp), %sp
2169 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2170 /* Adjust return point to jump back to beginning of traced function */
2174 ENDPROC_CFI(ftrace_regs_caller)
2179 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2181 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2182 .export parisc_return_to_handler,data
2183 parisc_return_to_handler:
2185 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2187 STREGM %r1,FRAME_SIZE(%sp)
2195 /* call ftrace_return_to_handler(0) */
2196 .import ftrace_return_to_handler,code
2197 load32 ftrace_return_to_handler,%ret0
2198 load32 .Lftrace_ret,%r2
2200 ldo -16(%sp),%ret1 /* Reference param save area */
2209 /* restore original return values */
2213 /* return from function */
2219 LDREGM -FRAME_SIZE(%sp),%r3
2220 ENDPROC_CFI(return_to_handler)
2222 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2224 #endif /* CONFIG_FUNCTION_TRACER */
2226 #ifdef CONFIG_IRQSTACKS
2227 /* void call_on_stack(unsigned long param1, void *func,
2228 unsigned long new_stack) */
2229 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2230 ENTRY(_call_on_stack)
2233 /* Regarding the HPPA calling conventions for function pointers,
2234 we assume the PIC register is not changed across call. For
2235 CONFIG_64BIT, the argument pointer is left to point at the
2236 argument region allocated for the call to call_on_stack. */
2238 /* Switch to new stack. We allocate two frames. */
2239 ldo 2*FRAME_SIZE(%arg2), %sp
2240 # ifdef CONFIG_64BIT
2241 /* Save previous stack pointer and return pointer in frame marker */
2242 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2243 /* Calls always use function descriptor */
2244 LDREG 16(%arg1), %arg1
2246 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2247 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2249 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2251 /* Save previous stack pointer and return pointer in frame marker */
2252 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2253 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2254 /* Calls use function descriptor if PLABEL bit is set */
2255 bb,>=,n %arg1, 30, 1f
2257 LDREG 0(%arg1), %arg1
2259 be,l 0(%sr4,%arg1), %sr0, %r31
2261 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2263 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2264 # endif /* CONFIG_64BIT */
2265 ENDPROC_CFI(call_on_stack)
2266 #endif /* CONFIG_IRQSTACKS */
2268 ENTRY_CFI(get_register)
2270 * get_register is used by the non access tlb miss handlers to
2271 * copy the value of the general register specified in r8 into
2272 * r1. This routine can't be used for shadowed registers, since
2273 * the rfir will restore the original value. So, for the shadowed
2274 * registers we put a -1 into r1 to indicate that the register
2275 * should not be used (the register being copied could also have
2276 * a -1 in it, but that is OK, it just means that we will have
2277 * to use the slow path instead).
2281 bv %r0(%r25) /* r0 */
2283 bv %r0(%r25) /* r1 - shadowed */
2285 bv %r0(%r25) /* r2 */
2287 bv %r0(%r25) /* r3 */
2289 bv %r0(%r25) /* r4 */
2291 bv %r0(%r25) /* r5 */
2293 bv %r0(%r25) /* r6 */
2295 bv %r0(%r25) /* r7 */
2297 bv %r0(%r25) /* r8 - shadowed */
2299 bv %r0(%r25) /* r9 - shadowed */
2301 bv %r0(%r25) /* r10 */
2303 bv %r0(%r25) /* r11 */
2305 bv %r0(%r25) /* r12 */
2307 bv %r0(%r25) /* r13 */
2309 bv %r0(%r25) /* r14 */
2311 bv %r0(%r25) /* r15 */
2313 bv %r0(%r25) /* r16 - shadowed */
2315 bv %r0(%r25) /* r17 - shadowed */
2317 bv %r0(%r25) /* r18 */
2319 bv %r0(%r25) /* r19 */
2321 bv %r0(%r25) /* r20 */
2323 bv %r0(%r25) /* r21 */
2325 bv %r0(%r25) /* r22 */
2327 bv %r0(%r25) /* r23 */
2329 bv %r0(%r25) /* r24 - shadowed */
2331 bv %r0(%r25) /* r25 - shadowed */
2333 bv %r0(%r25) /* r26 */
2335 bv %r0(%r25) /* r27 */
2337 bv %r0(%r25) /* r28 */
2339 bv %r0(%r25) /* r29 */
2341 bv %r0(%r25) /* r30 */
2343 bv %r0(%r25) /* r31 */
2345 ENDPROC_CFI(get_register)
2348 ENTRY_CFI(set_register)
2350 * set_register is used by the non access tlb miss handlers to
2351 * copy the value of r1 into the general register specified in
2356 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2358 bv %r0(%r25) /* r1 */
2360 bv %r0(%r25) /* r2 */
2362 bv %r0(%r25) /* r3 */
2364 bv %r0(%r25) /* r4 */
2366 bv %r0(%r25) /* r5 */
2368 bv %r0(%r25) /* r6 */
2370 bv %r0(%r25) /* r7 */
2372 bv %r0(%r25) /* r8 */
2374 bv %r0(%r25) /* r9 */
2376 bv %r0(%r25) /* r10 */
2378 bv %r0(%r25) /* r11 */
2380 bv %r0(%r25) /* r12 */
2382 bv %r0(%r25) /* r13 */
2384 bv %r0(%r25) /* r14 */
2386 bv %r0(%r25) /* r15 */
2388 bv %r0(%r25) /* r16 */
2390 bv %r0(%r25) /* r17 */
2392 bv %r0(%r25) /* r18 */
2394 bv %r0(%r25) /* r19 */
2396 bv %r0(%r25) /* r20 */
2398 bv %r0(%r25) /* r21 */
2400 bv %r0(%r25) /* r22 */
2402 bv %r0(%r25) /* r23 */
2404 bv %r0(%r25) /* r24 */
2406 bv %r0(%r25) /* r25 */
2408 bv %r0(%r25) /* r26 */
2410 bv %r0(%r25) /* r27 */
2412 bv %r0(%r25) /* r28 */
2414 bv %r0(%r25) /* r29 */
2416 bv %r0(%r25) /* r30 */
2418 bv %r0(%r25) /* r31 */
2420 ENDPROC_CFI(set_register)