1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/signal.h>
23 #include <asm/unistd.h>
25 #include <asm/traps.h>
26 #include <asm/thread_info.h>
27 #include <asm/alternative.h>
29 #include <linux/linkage.h>
30 #include <linux/pgtable.h>
38 .import pa_tlb_lock,data
39 .macro load_pa_tlb_lock reg
41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
44 /* space_to_prot macro creates a prot id from a space id */
46 #if (SPACEID_SHIFT) == 0
47 .macro space_to_prot spc prot
48 depd,z \spc,62,31,\prot
51 .macro space_to_prot spc prot
52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
56 /* Switch to virtual mapping, trashing only %r1 */
59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
64 load32 KERNEL_PSW, %r1
66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
67 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */
71 mtctl %r1, %cr18 /* Set IIAOQ tail */
73 mtctl %r1, %cr18 /* Set IIAOQ head */
80 * The "get_stack" macros are responsible for determining the
84 * Already using a kernel stack, so call the
85 * get_stack_use_r30 macro to push a pt_regs structure
86 * on the stack, and store registers there.
88 * Need to set up a kernel stack, so call the
89 * get_stack_use_cr30 macro to set up a pointer
90 * to the pt_regs structure contained within the
91 * task pointer pointed to by cr30. Set the stack
92 * pointer to point to the end of the task structure.
94 * Note that we use shadowed registers for temps until
95 * we can save %r26 and %r29. %r26 is used to preserve
96 * %r8 (a shadowed register) which temporarily contained
97 * either the fault type ("code") or the eirr. We need
98 * to use a non-shadowed register to carry the value over
99 * the rfir in virt_map. We use %r26 since this value winds
100 * up being passed as the argument to either do_cpu_irq_mask
101 * or handle_interruption. %r29 is used to hold a pointer
102 * the register save area, and once again, it needs to
103 * be a non-shadowed register so that it survives the rfir.
105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
108 .macro get_stack_use_cr30
110 /* we save the registers in the task struct */
114 ldo THREAD_SZ_ALGN(%r1), %r30
118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
120 ldo TASK_REGS(%r9),%r9
121 STREG %r17,PT_GR30(%r9)
122 STREG %r29,PT_GR29(%r9)
123 STREG %r26,PT_GR26(%r9)
124 STREG %r16,PT_SR7(%r9)
128 .macro get_stack_use_r30
130 /* we put a struct pt_regs on the stack and save the registers there */
134 ldo PT_SZ_ALGN(%r30),%r30
135 STREG %r1,PT_GR30(%r9)
136 STREG %r29,PT_GR29(%r9)
137 STREG %r26,PT_GR26(%r9)
138 STREG %r16,PT_SR7(%r9)
143 LDREG PT_GR1(%r29), %r1
144 LDREG PT_GR30(%r29),%r30
145 LDREG PT_GR29(%r29),%r29
148 /* default interruption handler
149 * (calls traps.c:handle_interruption) */
156 /* Interrupt interruption handler
157 * (calls irq.c:do_cpu_irq_mask) */
164 .import os_hpmc, code
168 nop /* must be a NOP, will be patched later */
169 load32 PA(os_hpmc), %r3
172 .word 0 /* checksum (will be patched) */
173 .word 0 /* address of handler */
174 .word 0 /* length of handler */
178 * Performance Note: Instructions will be moved up into
179 * this part of the code later on, once we are sure
180 * that the tlb miss handlers are close to final form.
183 /* Register definitions for tlb miss handler macros */
185 va = r8 /* virtual address for which the trap occurred */
186 spc = r24 /* space for which the trap occurred */
191 * itlb miss interruption handler (parisc 1.1 - 32 bit)
205 * itlb miss interruption handler (parisc 2.0)
222 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
225 .macro naitlb_11 code
236 * naitlb miss interruption handler (parisc 2.0)
239 .macro naitlb_20 code
254 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
268 * dtlb miss interruption handler (parisc 2.0)
285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
287 .macro nadtlb_11 code
297 /* nadtlb miss interruption handler (parisc 2.0) */
299 .macro nadtlb_20 code
314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
328 * dirty bit trap interruption handler (parisc 2.0)
344 /* In LP64, the space contains part of the upper 32 bits of the
345 * fault. We have to extract this and place it in the va,
346 * zeroing the corresponding bits in the space register */
347 .macro space_adjust spc,va,tmp
349 extrd,u \spc,63,SPACEID_SHIFT,\tmp
350 depd %r0,63,SPACEID_SHIFT,\spc
351 depd \tmp,31,SPACEID_SHIFT,\va
355 .import swapper_pg_dir,code
357 /* Get the pgd. For faults on space zero (kernel space), this
358 * is simply swapper_pg_dir. For user space faults, the
359 * pgd is stored in %cr25 */
360 .macro get_pgd spc,reg
361 ldil L%PA(swapper_pg_dir),\reg
362 ldo R%PA(swapper_pg_dir)(\reg),\reg
363 or,COND(=) %r0,\spc,%r0
368 space_check(spc,tmp,fault)
370 spc - The space we saw the fault with.
371 tmp - The place to store the current space.
372 fault - Function to call on failure.
374 Only allow faults on different spaces from the
375 currently active one if we're the kernel
378 .macro space_check spc,tmp,fault
380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
382 * as kernel, so defeat the space
385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
386 cmpb,COND(<>),n \tmp,\spc,\fault
389 /* Look up a PTE in a 2-Level scheme (faulting at each
390 * level if the entry isn't present
392 * NOTE: we use ldw even for LP64, since the short pointers
393 * can address up to 1TB
395 .macro L2_ptep pmd,pte,index,va,fault
396 #if CONFIG_PGTABLE_LEVELS == 3
397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
399 # if defined(CONFIG_64BIT)
400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
402 # if PAGE_SIZE > 4096
403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
411 ldw,s \index(\pmd),\pmd
412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
420 /* Look up PTE in a 3-Level scheme.
422 * Here we implement a Hybrid L2/L3 scheme: we allocate the
423 * first pmd adjacent to the pgd. This means that we can
424 * subtract a constant offset to get to it. The pmd and pgd
425 * sizes are arranged so that a single pmd covers 4GB (giving
426 * a full LP64 process access to 8TB) so our lookups are
427 * effectively L2 for the first 4GB of the kernel (i.e. for
428 * all ILP32 processes and all the kernel for machines with
429 * under 4GB of memory) */
430 .macro L3_ptep pgd,pte,index,va,fault
431 #if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
434 ldw,s \index(\pgd),\pgd
435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
438 shld \pgd,PxD_VALUE_SHIFT,\index
439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
444 L2_ptep \pgd,\pte,\index,\va,\fault
447 /* Acquire pa_tlb_lock lock and check page is present. */
448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
450 98: cmpib,COND(=),n 0,\spc,2f
451 load_pa_tlb_lock \tmp
452 1: LDCW 0(\tmp),\tmp1
453 cmpib,COND(=) 0,\tmp1,1b
456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
459 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
461 2: LDREG 0(\ptp),\pte
462 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
466 /* Release pa_tlb_lock lock without reloading lock address.
467 Note that the values in the register spc are limited to
468 NR_SPACE_IDS (262144). Thus, the stw instruction always
469 stores a nonzero value even when register spc is 64 bits.
470 We use an ordered store to ensure all prior accesses are
471 performed prior to releasing the lock. */
472 .macro tlb_unlock0 spc,tmp
474 98: or,COND(=) %r0,\spc,%r0
476 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
480 /* Release pa_tlb_lock lock. */
481 .macro tlb_unlock1 spc,tmp
483 98: load_pa_tlb_lock \tmp
484 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
485 tlb_unlock0 \spc,\tmp
489 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
490 * don't needlessly dirty the cache line if it was already set */
491 .macro update_accessed ptp,pte,tmp,tmp1
492 ldi _PAGE_ACCESSED,\tmp1
494 and,COND(<>) \tmp1,\pte,%r0
498 /* Set the dirty bit (and accessed bit). No need to be
499 * clever, this is only used from the dirty fault */
500 .macro update_dirty ptp,pte,tmp
501 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
506 /* We have (depending on the page size):
507 * - 38 to 52-bit Physical Page Number
508 * - 12 to 26-bit page offset
510 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
511 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
512 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
513 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
515 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
516 .macro convert_for_tlb_insert20 pte,tmp
517 #ifdef CONFIG_HUGETLB_PAGE
519 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
520 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
522 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
523 (63-58)+PAGE_ADD_SHIFT,\pte
524 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
525 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
526 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
527 #else /* Huge pages disabled */
528 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
529 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
530 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
531 (63-58)+PAGE_ADD_SHIFT,\pte
535 /* Convert the pte and prot to tlb insertion values. How
536 * this happens is quite subtle, read below */
537 .macro make_insert_tlb spc,pte,prot,tmp
538 space_to_prot \spc \prot /* create prot id from space */
539 /* The following is the real subtlety. This is depositing
540 * T <-> _PAGE_REFTRAP
542 * B <-> _PAGE_DMB (memory break)
544 * Then incredible subtlety: The access rights are
545 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
546 * See 3-14 of the parisc 2.0 manual
548 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
549 * trigger an access rights trap in user space if the user
550 * tries to read an unreadable page */
553 /* PAGE_USER indicates the page can be read with user privileges,
554 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
555 * contains _PAGE_READ) */
556 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
558 /* If we're a gateway page, drop PL2 back to zero for promotion
559 * to kernel privilege (so we can execute the page as kernel).
560 * Any privilege promotion page always denys read and write */
561 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
562 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
564 /* Enforce uncacheable pages.
565 * This should ONLY be use for MMIO on PA 2.0 machines.
566 * Memory/DMA is cache coherent on all PA2.0 machines we support
567 * (that means T-class is NOT supported) and the memory controllers
568 * on most of those machines only handles cache transactions.
570 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
573 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
574 convert_for_tlb_insert20 \pte \tmp
577 /* Identical macro to make_insert_tlb above, except it
578 * makes the tlb entry for the differently formatted pa11
579 * insertion instructions */
580 .macro make_insert_tlb_11 spc,pte,prot
581 zdep \spc,30,15,\prot
583 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
585 extru,= \pte,_PAGE_USER_BIT,1,%r0
586 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
587 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
588 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
590 /* Get rid of prot bits and convert to page addr for iitlba */
592 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
593 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
596 /* This is for ILP32 PA2.0 only. The TLB insertion needs
597 * to extend into I/O space if the address is 0xfXXXXXXX
598 * so we extend the f's into the top word of the pte in
600 .macro f_extend pte,tmp
601 extrd,s \pte,42,4,\tmp
603 extrd,s \pte,63,25,\pte
606 /* The alias region is an 8MB aligned 16MB to do clear and
607 * copy user pages at addresses congruent with the user
610 * To use the alias page, you set %r26 up with the to TLB
611 * entry (identifying the physical page) and %r23 up with
612 * the from tlb entry (or nothing if only a to entry---for
613 * clear_user_page_asm) */
614 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
615 cmpib,COND(<>),n 0,\spc,\fault
616 ldil L%(TMPALIAS_MAP_START),\tmp
617 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
618 /* on LP64, ldi will sign extend into the upper 32 bits,
619 * which is behaviour we don't want */
624 cmpb,COND(<>),n \tmp,\tmp1,\fault
625 mfctl %cr19,\tmp /* iir */
626 /* get the opcode (first six bits) into \tmp */
627 extrw,u \tmp,5,6,\tmp
629 * Only setting the T bit prevents data cache movein
630 * Setting access rights to zero prevents instruction cache movein
632 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
633 * to type field and _PAGE_READ goes to top bit of PL1
635 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
637 * so if the opcode is one (i.e. this is a memory management
638 * instruction) nullify the next load so \prot is only T.
639 * Otherwise this is a normal data operation
641 cmpiclr,= 0x01,\tmp,%r0
642 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
644 depd,z \prot,8,7,\prot
647 depw,z \prot,8,7,\prot
649 .error "undefined PA type to do_alias"
653 * OK, it is in the temp alias region, check whether "from" or "to".
654 * Check "subtle" note in pacache.S re: r23/r26.
657 extrd,u,*= \va,41,1,%r0
659 extrw,u,= \va,9,1,%r0
661 or,COND(tr) %r23,%r0,\pte
667 * Fault_vectors are architecturally required to be aligned on a 2K
674 ENTRY(fault_vector_20)
675 /* First vector is invalid (0) */
676 .ascii "cows can fly"
685 itlb_20 PARISC_ITLB_TRAP
717 ENTRY(fault_vector_11)
718 /* First vector is invalid (0) */
719 .ascii "cows can fly"
728 itlb_11 PARISC_ITLB_TRAP
757 /* Fault vector is separately protected and *must* be on its own page */
760 .import handle_interruption,code
761 .import do_cpu_irq_mask,code
766 * copy_thread moved args into task save area.
769 ENTRY(ret_from_kernel_thread)
770 /* Call schedule_tail first though */
771 BL schedule_tail, %r2
774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
775 LDREG TASK_PT_GR25(%r1), %r26
777 LDREG TASK_PT_GR27(%r1), %r27
779 LDREG TASK_PT_GR26(%r1), %r1
782 b finish_child_return
784 END(ret_from_kernel_thread)
788 * struct task_struct *_switch_to(struct task_struct *prev,
789 * struct task_struct *next)
791 * switch kernel stacks and return prev */
792 ENTRY_CFI(_switch_to)
793 STREG %r2, -RP_OFFSET(%r30)
798 load32 _switch_to_ret, %r2
800 STREG %r2, TASK_PT_KPC(%r26)
801 LDREG TASK_PT_KPC(%r25), %r2
803 STREG %r30, TASK_PT_KSP(%r26)
804 LDREG TASK_PT_KSP(%r25), %r30
805 LDREG TASK_THREAD_INFO(%r25), %r25
809 ENTRY(_switch_to_ret)
810 mtctl %r0, %cr0 /* Needed for single stepping */
814 LDREG -RP_OFFSET(%r30), %r2
817 ENDPROC_CFI(_switch_to)
820 * Common rfi return path for interruptions, kernel execve, and
821 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
822 * return via this path if the signal was received when the process
823 * was running; if the process was blocked on a syscall then the
824 * normal syscall_exit path is used. All syscalls for traced
825 * proceses exit via intr_restore.
827 * XXX If any syscalls that change a processes space id ever exit
828 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
835 ENTRY_CFI(syscall_exit_rfi)
837 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
838 ldo TASK_REGS(%r16),%r16
839 /* Force iaoq to userspace, as the user has had access to our current
840 * context via sigcontext. Also Filter the PSW for the same reason.
842 LDREG PT_IAOQ0(%r16),%r19
844 STREG %r19,PT_IAOQ0(%r16)
845 LDREG PT_IAOQ1(%r16),%r19
847 STREG %r19,PT_IAOQ1(%r16)
848 LDREG PT_PSW(%r16),%r19
849 load32 USER_PSW_MASK,%r1
851 load32 USER_PSW_HI_MASK,%r20
854 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
856 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
857 STREG %r19,PT_PSW(%r16)
860 * If we aren't being traced, we never saved space registers
861 * (we don't store them in the sigcontext), so set them
862 * to "proper" values now (otherwise we'll wind up restoring
863 * whatever was last stored in the task structure, which might
864 * be inconsistent if an interrupt occurred while on the gateway
865 * page). Note that we may be "trashing" values the user put in
866 * them, but we don't support the user changing them.
869 STREG %r0,PT_SR2(%r16)
871 STREG %r19,PT_SR0(%r16)
872 STREG %r19,PT_SR1(%r16)
873 STREG %r19,PT_SR3(%r16)
874 STREG %r19,PT_SR4(%r16)
875 STREG %r19,PT_SR5(%r16)
876 STREG %r19,PT_SR6(%r16)
877 STREG %r19,PT_SR7(%r16)
880 /* check for reschedule */
882 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
883 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
885 .import do_notify_resume,code
889 LDREG TI_FLAGS(%r1),%r19
890 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
891 and,COND(<>) %r19, %r20, %r0
892 b,n intr_restore /* skip past if we've nothing to do */
894 /* This check is critical to having LWS
895 * working. The IASQ is zero on the gateway
896 * page and we cannot deliver any signals until
897 * we get off the gateway page.
899 * Only do signals if we are returning to user space
901 LDREG PT_IASQ0(%r16), %r20
902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
903 LDREG PT_IASQ1(%r16), %r20
904 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
906 copy %r0, %r25 /* long in_syscall = 0 */
908 ldo -16(%r30),%r29 /* Reference param save area */
911 /* NOTE: We need to enable interrupts if we have to deliver
912 * signals. We used to do this earlier but it caused kernel
913 * stack overflows. */
916 BL do_notify_resume,%r2
917 copy %r16, %r26 /* struct pt_regs *regs */
923 ldo PT_FR31(%r29),%r1
927 /* inverse of virt_map */
929 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
932 /* Restore space id's and special cr's from PT_REGS
933 * structure pointed to by r29
937 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
938 * It also restores r1 and r30.
945 #ifndef CONFIG_PREEMPTION
946 # define intr_do_preempt intr_restore
947 #endif /* !CONFIG_PREEMPTION */
949 .import schedule,code
951 /* Only call schedule on return to userspace. If we're returning
952 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
953 * we jump back to intr_restore.
955 LDREG PT_IASQ0(%r16), %r20
956 cmpib,COND(=) 0, %r20, intr_do_preempt
958 LDREG PT_IASQ1(%r16), %r20
959 cmpib,COND(=) 0, %r20, intr_do_preempt
962 /* NOTE: We need to enable interrupts if we schedule. We used
963 * to do this earlier but it caused kernel stack overflows. */
967 ldo -16(%r30),%r29 /* Reference param save area */
970 ldil L%intr_check_sig, %r2
974 load32 schedule, %r20
977 ldo R%intr_check_sig(%r2), %r2
979 /* preempt the current task on returning to kernel
980 * mode from an interrupt, iff need_resched is set,
981 * and preempt_count is 0. otherwise, we continue on
982 * our merry way back to the current running task.
984 #ifdef CONFIG_PREEMPTION
985 .import preempt_schedule_irq,code
987 rsm PSW_SM_I, %r0 /* disable interrupts */
989 /* current_thread_info()->preempt_count */
991 LDREG TI_PRE_COUNT(%r1), %r19
992 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
993 nop /* prev insn branched backwards */
995 /* check if we interrupted a critical path */
996 LDREG PT_PSW(%r16), %r20
997 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1000 BL preempt_schedule_irq, %r2
1003 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1004 #endif /* CONFIG_PREEMPTION */
1007 * External interrupts.
1011 cmpib,COND(=),n 0,%r16,1f
1023 ldo PT_FR0(%r29), %r24
1028 copy %r29, %r26 /* arg0 is pt_regs */
1029 copy %r29, %r16 /* save pt_regs */
1031 ldil L%intr_return, %r2
1034 ldo -16(%r30),%r29 /* Reference param save area */
1038 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1039 ENDPROC_CFI(syscall_exit_rfi)
1042 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1044 ENTRY_CFI(intr_save) /* for os_hpmc */
1046 cmpib,COND(=),n 0,%r16,1f
1058 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1059 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1063 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1069 * If the interrupted code was running with W bit off (32 bit),
1070 * clear the b bits (bits 0 & 1) in the ior.
1071 * save_specials left ipsw value in r8 for us to test.
1073 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1076 /* adjust isr/ior: get high bits from isr and deposit in ior */
1077 space_adjust %r16,%r17,%r1
1079 STREG %r16, PT_ISR(%r29)
1080 STREG %r17, PT_IOR(%r29)
1082 #if 0 && defined(CONFIG_64BIT)
1083 /* Revisit when we have 64-bit code above 4Gb */
1087 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1088 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1091 extrd,u,* %r8,PSW_W_BIT,1,%r1
1092 cmpib,COND(=),n 1,%r1,intr_save2
1093 LDREG PT_IASQ0(%r29), %r16
1094 LDREG PT_IAOQ0(%r29), %r17
1095 /* adjust iasq/iaoq */
1096 space_adjust %r16,%r17,%r1
1097 STREG %r16, PT_IASQ0(%r29)
1098 STREG %r17, PT_IAOQ0(%r29)
1107 ldo PT_FR0(%r29), %r25
1112 copy %r29, %r25 /* arg1 is pt_regs */
1114 ldo -16(%r30),%r29 /* Reference param save area */
1117 ldil L%intr_check_sig, %r2
1118 copy %r25, %r16 /* save pt_regs */
1120 b handle_interruption
1121 ldo R%intr_check_sig(%r2), %r2
1122 ENDPROC_CFI(intr_save)
1126 * Note for all tlb miss handlers:
1128 * cr24 contains a pointer to the kernel address space
1131 * cr25 contains a pointer to the current user address
1132 * space page directory.
1134 * sr3 will contain the space id of the user address space
1135 * of the current running thread while that thread is
1136 * running in the kernel.
1140 * register number allocations. Note that these are all
1141 * in the shadowed registers
1144 t0 = r1 /* temporary register 0 */
1145 va = r8 /* virtual address for which the trap occurred */
1146 t1 = r9 /* temporary register 1 */
1147 pte = r16 /* pte/phys page # */
1148 prot = r17 /* prot bits */
1149 spc = r24 /* space for which the trap occurred */
1150 ptp = r25 /* page directory/page table pointer */
1155 space_adjust spc,va,t0
1157 space_check spc,t0,dtlb_fault
1159 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1161 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1162 update_accessed ptp,pte,t0,t1
1164 make_insert_tlb spc,pte,prot,t1
1172 dtlb_check_alias_20w:
1173 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1181 space_adjust spc,va,t0
1183 space_check spc,t0,nadtlb_fault
1185 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1187 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1188 update_accessed ptp,pte,t0,t1
1190 make_insert_tlb spc,pte,prot,t1
1198 nadtlb_check_alias_20w:
1199 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1211 space_check spc,t0,dtlb_fault
1213 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1215 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1216 update_accessed ptp,pte,t0,t1
1218 make_insert_tlb_11 spc,pte,prot
1220 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1223 idtlba pte,(%sr1,va)
1224 idtlbp prot,(%sr1,va)
1226 mtsp t1, %sr1 /* Restore sr1 */
1232 dtlb_check_alias_11:
1233 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1244 space_check spc,t0,nadtlb_fault
1246 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1248 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1249 update_accessed ptp,pte,t0,t1
1251 make_insert_tlb_11 spc,pte,prot
1253 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1256 idtlba pte,(%sr1,va)
1257 idtlbp prot,(%sr1,va)
1259 mtsp t1, %sr1 /* Restore sr1 */
1265 nadtlb_check_alias_11:
1266 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1275 space_adjust spc,va,t0
1277 space_check spc,t0,dtlb_fault
1279 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1281 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1282 update_accessed ptp,pte,t0,t1
1284 make_insert_tlb spc,pte,prot,t1
1294 dtlb_check_alias_20:
1295 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1305 space_check spc,t0,nadtlb_fault
1307 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1309 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1310 update_accessed ptp,pte,t0,t1
1312 make_insert_tlb spc,pte,prot,t1
1322 nadtlb_check_alias_20:
1323 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1335 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1336 * probei instructions. We don't want to fault for these
1337 * instructions (not only does it not make sense, it can cause
1338 * deadlocks, since some flushes are done with the mmap
1339 * semaphore held). If the translation doesn't exist, we can't
1340 * insert a translation, so have to emulate the side effects
1341 * of the instruction. Since we don't insert a translation
1342 * we can get a lot of faults during a flush loop, so it makes
1343 * sense to try to do it here with minimum overhead. We only
1344 * emulate fdc,fic,pdc,probew,prober instructions whose base
1345 * and index registers are not shadowed. We defer everything
1346 * else to the "slow" path.
1349 mfctl %cr19,%r9 /* Get iir */
1351 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1352 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1354 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1357 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1358 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1359 BL get_register,%r25
1360 extrw,u %r9,15,5,%r8 /* Get index register # */
1361 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1363 BL get_register,%r25
1364 extrw,u %r9,10,5,%r8 /* Get base register # */
1365 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1366 BL set_register,%r25
1367 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1372 or %r8,%r9,%r8 /* Set PSW_N */
1379 When there is no translation for the probe address then we
1380 must nullify the insn and return zero in the target register.
1381 This will indicate to the calling code that it does not have
1382 write/read privileges to this address.
1384 This should technically work for prober and probew in PA 1.1,
1385 and also probe,r and probe,w in PA 2.0
1387 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1388 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1394 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1395 BL get_register,%r25 /* Find the target register */
1396 extrw,u %r9,31,5,%r8 /* Get target register */
1397 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1398 BL set_register,%r25
1399 copy %r0,%r1 /* Write zero to target register */
1400 b nadtlb_nullify /* Nullify return insn */
1408 * I miss is a little different, since we allow users to fault
1409 * on the gateway page which is in the kernel address space.
1412 space_adjust spc,va,t0
1414 space_check spc,t0,itlb_fault
1416 L3_ptep ptp,pte,t0,va,itlb_fault
1418 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1419 update_accessed ptp,pte,t0,t1
1421 make_insert_tlb spc,pte,prot,t1
1432 * I miss is a little different, since we allow users to fault
1433 * on the gateway page which is in the kernel address space.
1436 space_adjust spc,va,t0
1438 space_check spc,t0,naitlb_fault
1440 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1442 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1443 update_accessed ptp,pte,t0,t1
1445 make_insert_tlb spc,pte,prot,t1
1453 naitlb_check_alias_20w:
1454 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1466 space_check spc,t0,itlb_fault
1468 L2_ptep ptp,pte,t0,va,itlb_fault
1470 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1471 update_accessed ptp,pte,t0,t1
1473 make_insert_tlb_11 spc,pte,prot
1475 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1478 iitlba pte,(%sr1,va)
1479 iitlbp prot,(%sr1,va)
1481 mtsp t1, %sr1 /* Restore sr1 */
1490 space_check spc,t0,naitlb_fault
1492 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1494 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1495 update_accessed ptp,pte,t0,t1
1497 make_insert_tlb_11 spc,pte,prot
1499 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1502 iitlba pte,(%sr1,va)
1503 iitlbp prot,(%sr1,va)
1505 mtsp t1, %sr1 /* Restore sr1 */
1511 naitlb_check_alias_11:
1512 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1514 iitlba pte,(%sr0, va)
1515 iitlbp prot,(%sr0, va)
1524 space_check spc,t0,itlb_fault
1526 L2_ptep ptp,pte,t0,va,itlb_fault
1528 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1529 update_accessed ptp,pte,t0,t1
1531 make_insert_tlb spc,pte,prot,t1
1544 space_check spc,t0,naitlb_fault
1546 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1548 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1549 update_accessed ptp,pte,t0,t1
1551 make_insert_tlb spc,pte,prot,t1
1561 naitlb_check_alias_20:
1562 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1574 space_adjust spc,va,t0
1576 space_check spc,t0,dbit_fault
1578 L3_ptep ptp,pte,t0,va,dbit_fault
1580 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1581 update_dirty ptp,pte,t1
1583 make_insert_tlb spc,pte,prot,t1
1596 space_check spc,t0,dbit_fault
1598 L2_ptep ptp,pte,t0,va,dbit_fault
1600 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1601 update_dirty ptp,pte,t1
1603 make_insert_tlb_11 spc,pte,prot
1605 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1608 idtlba pte,(%sr1,va)
1609 idtlbp prot,(%sr1,va)
1611 mtsp t1, %sr1 /* Restore sr1 */
1620 space_check spc,t0,dbit_fault
1622 L2_ptep ptp,pte,t0,va,dbit_fault
1624 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1625 update_dirty ptp,pte,t1
1627 make_insert_tlb spc,pte,prot,t1
1638 .import handle_interruption,code
1642 ldi 31,%r8 /* Use an unused code */
1650 ldi PARISC_ITLB_TRAP,%r8
1664 /* Register saving semantics for system calls:
1666 %r1 clobbered by system call macro in userspace
1667 %r2 saved in PT_REGS by gateway page
1668 %r3 - %r18 preserved by C code (saved by signal code)
1669 %r19 - %r20 saved in PT_REGS by gateway page
1670 %r21 - %r22 non-standard syscall args
1671 stored in kernel stack by gateway page
1672 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1673 %r27 - %r30 saved in PT_REGS by gateway page
1674 %r31 syscall return pointer
1677 /* Floating point registers (FIXME: what do we do with these?)
1679 %fr0 - %fr3 status/exception, not preserved
1680 %fr4 - %fr7 arguments
1681 %fr8 - %fr11 not preserved by C code
1682 %fr12 - %fr21 preserved by C code
1683 %fr22 - %fr31 not preserved by C code
1686 .macro reg_save regs
1687 STREG %r3, PT_GR3(\regs)
1688 STREG %r4, PT_GR4(\regs)
1689 STREG %r5, PT_GR5(\regs)
1690 STREG %r6, PT_GR6(\regs)
1691 STREG %r7, PT_GR7(\regs)
1692 STREG %r8, PT_GR8(\regs)
1693 STREG %r9, PT_GR9(\regs)
1694 STREG %r10,PT_GR10(\regs)
1695 STREG %r11,PT_GR11(\regs)
1696 STREG %r12,PT_GR12(\regs)
1697 STREG %r13,PT_GR13(\regs)
1698 STREG %r14,PT_GR14(\regs)
1699 STREG %r15,PT_GR15(\regs)
1700 STREG %r16,PT_GR16(\regs)
1701 STREG %r17,PT_GR17(\regs)
1702 STREG %r18,PT_GR18(\regs)
1705 .macro reg_restore regs
1706 LDREG PT_GR3(\regs), %r3
1707 LDREG PT_GR4(\regs), %r4
1708 LDREG PT_GR5(\regs), %r5
1709 LDREG PT_GR6(\regs), %r6
1710 LDREG PT_GR7(\regs), %r7
1711 LDREG PT_GR8(\regs), %r8
1712 LDREG PT_GR9(\regs), %r9
1713 LDREG PT_GR10(\regs),%r10
1714 LDREG PT_GR11(\regs),%r11
1715 LDREG PT_GR12(\regs),%r12
1716 LDREG PT_GR13(\regs),%r13
1717 LDREG PT_GR14(\regs),%r14
1718 LDREG PT_GR15(\regs),%r15
1719 LDREG PT_GR16(\regs),%r16
1720 LDREG PT_GR17(\regs),%r17
1721 LDREG PT_GR18(\regs),%r18
1724 .macro fork_like name
1725 ENTRY_CFI(sys_\name\()_wrapper)
1726 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1727 ldo TASK_REGS(%r1),%r1
1730 ldil L%sys_\name, %r31
1731 be R%sys_\name(%sr4,%r31)
1732 STREG %r28, PT_CR27(%r1)
1733 ENDPROC_CFI(sys_\name\()_wrapper)
1741 /* Set the return value for the child */
1743 BL schedule_tail, %r2
1745 finish_child_return:
1746 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1747 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1749 LDREG PT_CR27(%r1), %r3
1756 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1757 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1758 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1759 /* Don't save regs, we are going to restore them from sigcontext. */
1760 STREG %r2, -RP_OFFSET(%r30)
1762 ldo FRAME_SIZE(%r30), %r30
1763 BL sys_rt_sigreturn,%r2
1764 ldo -16(%r30),%r29 /* Reference param save area */
1766 BL sys_rt_sigreturn,%r2
1767 ldo FRAME_SIZE(%r30), %r30
1770 ldo -FRAME_SIZE(%r30), %r30
1771 LDREG -RP_OFFSET(%r30), %r2
1773 /* FIXME: I think we need to restore a few more things here. */
1774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1775 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1778 /* If the signal was received while the process was blocked on a
1779 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1780 * take us to syscall_exit_rfi and on to intr_return.
1783 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1784 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1787 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1788 * via syscall_exit_rfi if the signal was received while the process
1792 /* save return value now */
1795 LDREG TI_TASK(%r1),%r1
1796 STREG %r28,TASK_PT_GR28(%r1)
1798 /* Seems to me that dp could be wrong here, if the syscall involved
1799 * calling a module, and nothing got round to restoring dp on return.
1803 syscall_check_resched:
1805 /* check for reschedule */
1807 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1808 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1810 .import do_signal,code
1812 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1813 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1814 and,COND(<>) %r19, %r26, %r0
1815 b,n syscall_restore /* skip past if we've nothing to do */
1818 /* Save callee-save registers (for sigcontext).
1819 * FIXME: After this point the process structure should be
1820 * consistent with all the relevant state of the process
1821 * before the syscall. We need to verify this.
1823 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1824 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1828 ldo -16(%r30),%r29 /* Reference param save area */
1831 BL do_notify_resume,%r2
1832 ldi 1, %r25 /* long in_syscall = 1 */
1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1835 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1838 b,n syscall_check_sig
1841 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1843 /* Are we being ptraced? */
1844 ldw TASK_FLAGS(%r1),%r19
1845 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1846 and,COND(=) %r19,%r2,%r0
1847 b,n syscall_restore_rfi
1849 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1852 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1855 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1856 LDREG TASK_PT_GR19(%r1),%r19
1857 LDREG TASK_PT_GR20(%r1),%r20
1858 LDREG TASK_PT_GR21(%r1),%r21
1859 LDREG TASK_PT_GR22(%r1),%r22
1860 LDREG TASK_PT_GR23(%r1),%r23
1861 LDREG TASK_PT_GR24(%r1),%r24
1862 LDREG TASK_PT_GR25(%r1),%r25
1863 LDREG TASK_PT_GR26(%r1),%r26
1864 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1865 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1866 LDREG TASK_PT_GR29(%r1),%r29
1867 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1869 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1870 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1872 copy %r1,%r30 /* Restore user sp */
1873 mfsp %sr3,%r1 /* Get user space id */
1874 mtsp %r1,%sr7 /* Restore sr7 */
1877 /* Set sr2 to zero for userspace syscalls to work. */
1879 mtsp %r1,%sr4 /* Restore sr4 */
1880 mtsp %r1,%sr5 /* Restore sr5 */
1881 mtsp %r1,%sr6 /* Restore sr6 */
1883 depi 3,31,2,%r31 /* ensure return to user mode. */
1886 /* decide whether to reset the wide mode bit
1888 * For a syscall, the W bit is stored in the lowest bit
1889 * of sp. Extract it and reset W if it is zero */
1890 extrd,u,*<> %r30,63,1,%r1
1892 /* now reset the lowest bit of sp if it was set */
1895 be,n 0(%sr3,%r31) /* return to user space */
1897 /* We have to return via an RFI, so that PSW T and R bits can be set
1899 * This sets up pt_regs so we can return via intr_restore, which is not
1900 * the most efficient way of doing things, but it works.
1902 syscall_restore_rfi:
1903 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1904 mtctl %r2,%cr0 /* for immediate trap */
1905 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1906 ldi 0x0b,%r20 /* Create new PSW */
1907 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1909 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1910 * set in thread_info.h and converted to PA bitmap
1911 * numbers in asm-offsets.c */
1913 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1914 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1915 depi -1,27,1,%r20 /* R bit */
1917 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1918 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1919 depi -1,7,1,%r20 /* T bit */
1921 STREG %r20,TASK_PT_PSW(%r1)
1923 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1926 STREG %r25,TASK_PT_SR3(%r1)
1927 STREG %r25,TASK_PT_SR4(%r1)
1928 STREG %r25,TASK_PT_SR5(%r1)
1929 STREG %r25,TASK_PT_SR6(%r1)
1930 STREG %r25,TASK_PT_SR7(%r1)
1931 STREG %r25,TASK_PT_IASQ0(%r1)
1932 STREG %r25,TASK_PT_IASQ1(%r1)
1935 /* Now if old D bit is clear, it means we didn't save all registers
1936 * on syscall entry, so do that now. This only happens on TRACEME
1937 * calls, or if someone attached to us while we were on a syscall.
1938 * We could make this more efficient by not saving r3-r18, but
1939 * then we wouldn't be able to use the common intr_restore path.
1940 * It is only for traced processes anyway, so performance is not
1943 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1944 ldo TASK_REGS(%r1),%r25
1945 reg_save %r25 /* Save r3 to r18 */
1947 /* Save the current sr */
1949 STREG %r2,TASK_PT_SR0(%r1)
1951 /* Save the scratch sr */
1953 STREG %r2,TASK_PT_SR1(%r1)
1955 /* sr2 should be set to zero for userspace syscalls */
1956 STREG %r0,TASK_PT_SR2(%r1)
1958 LDREG TASK_PT_GR31(%r1),%r2
1959 depi 3,31,2,%r2 /* ensure return to user mode. */
1960 STREG %r2,TASK_PT_IAOQ0(%r1)
1962 STREG %r2,TASK_PT_IAOQ1(%r1)
1967 LDREG TASK_PT_IAOQ0(%r1),%r2
1968 depi 3,31,2,%r2 /* ensure return to user mode. */
1969 STREG %r2,TASK_PT_IAOQ0(%r1)
1970 LDREG TASK_PT_IAOQ1(%r1),%r2
1972 STREG %r2,TASK_PT_IAOQ1(%r1)
1977 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1978 load32 schedule,%r19
1979 bv %r0(%r19) /* jumps to schedule() */
1981 ldo -16(%r30),%r29 /* Reference param save area */
1988 #ifdef CONFIG_FUNCTION_TRACER
1990 .import ftrace_function_trampoline,code
1991 .align L1_CACHE_BYTES
1992 ENTRY_CFI(mcount, caller)
1994 .export _mcount,data
1996 * The 64bit mcount() function pointer needs 4 dwords, of which the
1997 * first two are free. We optimize it here and put 2 instructions for
1998 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1999 * have all on one L1 cacheline.
2002 b ftrace_function_trampoline
2003 copy %r3, %arg2 /* caller original %sp */
2006 .type ftrace_stub, @function
2015 .dword 0 /* code in head.S puts value of global gp here */
2019 #ifdef CONFIG_DYNAMIC_FTRACE
2022 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2024 #define FTRACE_FRAME_SIZE FRAME_SIZE
2026 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2028 .global ftrace_caller
2030 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2031 ldo -FTRACE_FRAME_SIZE(%sp), %r3
2032 STREG %rp, -RP_OFFSET(%r3)
2034 /* Offset 0 is already allocated for %r1 */
2035 STREG %r23, 2*REG_SZ(%r3)
2036 STREG %r24, 3*REG_SZ(%r3)
2037 STREG %r25, 4*REG_SZ(%r3)
2038 STREG %r26, 5*REG_SZ(%r3)
2039 STREG %r28, 6*REG_SZ(%r3)
2040 STREG %r29, 7*REG_SZ(%r3)
2042 STREG %r19, 8*REG_SZ(%r3)
2043 STREG %r20, 9*REG_SZ(%r3)
2044 STREG %r21, 10*REG_SZ(%r3)
2045 STREG %r22, 11*REG_SZ(%r3)
2046 STREG %r27, 12*REG_SZ(%r3)
2047 STREG %r31, 13*REG_SZ(%r3)
2054 ldi 0, %r23 /* no pt_regs */
2055 b,l ftrace_function_trampoline, %rp
2058 LDREG -RP_OFFSET(%r3), %rp
2059 LDREG 2*REG_SZ(%r3), %r23
2060 LDREG 3*REG_SZ(%r3), %r24
2061 LDREG 4*REG_SZ(%r3), %r25
2062 LDREG 5*REG_SZ(%r3), %r26
2063 LDREG 6*REG_SZ(%r3), %r28
2064 LDREG 7*REG_SZ(%r3), %r29
2066 LDREG 8*REG_SZ(%r3), %r19
2067 LDREG 9*REG_SZ(%r3), %r20
2068 LDREG 10*REG_SZ(%r3), %r21
2069 LDREG 11*REG_SZ(%r3), %r22
2070 LDREG 12*REG_SZ(%r3), %r27
2071 LDREG 13*REG_SZ(%r3), %r31
2073 LDREG 1*REG_SZ(%r3), %r3
2075 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2076 /* Adjust return point to jump back to beginning of traced function */
2080 ENDPROC_CFI(ftrace_caller)
2082 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2083 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2084 CALLS,SAVE_RP,SAVE_SP)
2086 .global ftrace_regs_caller
2088 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2089 STREG %rp, -RP_OFFSET(%r1)
2092 ldo PT_SZ_ALGN(%sp), %sp
2094 STREG %rp, PT_GR2(%r1)
2095 STREG %r3, PT_GR3(%r1)
2096 STREG %r4, PT_GR4(%r1)
2097 STREG %r5, PT_GR5(%r1)
2098 STREG %r6, PT_GR6(%r1)
2099 STREG %r7, PT_GR7(%r1)
2100 STREG %r8, PT_GR8(%r1)
2101 STREG %r9, PT_GR9(%r1)
2102 STREG %r10, PT_GR10(%r1)
2103 STREG %r11, PT_GR11(%r1)
2104 STREG %r12, PT_GR12(%r1)
2105 STREG %r13, PT_GR13(%r1)
2106 STREG %r14, PT_GR14(%r1)
2107 STREG %r15, PT_GR15(%r1)
2108 STREG %r16, PT_GR16(%r1)
2109 STREG %r17, PT_GR17(%r1)
2110 STREG %r18, PT_GR18(%r1)
2111 STREG %r19, PT_GR19(%r1)
2112 STREG %r20, PT_GR20(%r1)
2113 STREG %r21, PT_GR21(%r1)
2114 STREG %r22, PT_GR22(%r1)
2115 STREG %r23, PT_GR23(%r1)
2116 STREG %r24, PT_GR24(%r1)
2117 STREG %r25, PT_GR25(%r1)
2118 STREG %r26, PT_GR26(%r1)
2119 STREG %r27, PT_GR27(%r1)
2120 STREG %r28, PT_GR28(%r1)
2121 STREG %r29, PT_GR29(%r1)
2122 STREG %r30, PT_GR30(%r1)
2123 STREG %r31, PT_GR31(%r1)
2125 STREG %r26, PT_SAR(%r1)
2128 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2130 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2131 b,l ftrace_function_trampoline, %rp
2132 copy %r1, %arg3 /* struct pt_regs */
2134 ldo -PT_SZ_ALGN(%sp), %r1
2136 LDREG PT_SAR(%r1), %rp
2139 LDREG PT_GR2(%r1), %rp
2140 LDREG PT_GR3(%r1), %r3
2141 LDREG PT_GR4(%r1), %r4
2142 LDREG PT_GR5(%r1), %r5
2143 LDREG PT_GR6(%r1), %r6
2144 LDREG PT_GR7(%r1), %r7
2145 LDREG PT_GR8(%r1), %r8
2146 LDREG PT_GR9(%r1), %r9
2147 LDREG PT_GR10(%r1),%r10
2148 LDREG PT_GR11(%r1),%r11
2149 LDREG PT_GR12(%r1),%r12
2150 LDREG PT_GR13(%r1),%r13
2151 LDREG PT_GR14(%r1),%r14
2152 LDREG PT_GR15(%r1),%r15
2153 LDREG PT_GR16(%r1),%r16
2154 LDREG PT_GR17(%r1),%r17
2155 LDREG PT_GR18(%r1),%r18
2156 LDREG PT_GR19(%r1),%r19
2157 LDREG PT_GR20(%r1),%r20
2158 LDREG PT_GR21(%r1),%r21
2159 LDREG PT_GR22(%r1),%r22
2160 LDREG PT_GR23(%r1),%r23
2161 LDREG PT_GR24(%r1),%r24
2162 LDREG PT_GR25(%r1),%r25
2163 LDREG PT_GR26(%r1),%r26
2164 LDREG PT_GR27(%r1),%r27
2165 LDREG PT_GR28(%r1),%r28
2166 LDREG PT_GR29(%r1),%r29
2167 LDREG PT_GR30(%r1),%r30
2168 LDREG PT_GR31(%r1),%r31
2170 ldo -PT_SZ_ALGN(%sp), %sp
2171 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2172 /* Adjust return point to jump back to beginning of traced function */
2176 ENDPROC_CFI(ftrace_regs_caller)
2181 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2183 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2184 .export parisc_return_to_handler,data
2185 parisc_return_to_handler:
2187 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2189 STREGM %r1,FRAME_SIZE(%sp)
2197 /* call ftrace_return_to_handler(0) */
2198 .import ftrace_return_to_handler,code
2199 load32 ftrace_return_to_handler,%ret0
2200 load32 .Lftrace_ret,%r2
2202 ldo -16(%sp),%ret1 /* Reference param save area */
2211 /* restore original return values */
2215 /* return from function */
2221 LDREGM -FRAME_SIZE(%sp),%r3
2222 ENDPROC_CFI(return_to_handler)
2224 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2226 #endif /* CONFIG_FUNCTION_TRACER */
2228 #ifdef CONFIG_IRQSTACKS
2229 /* void call_on_stack(unsigned long param1, void *func,
2230 unsigned long new_stack) */
2231 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2232 ENTRY(_call_on_stack)
2235 /* Regarding the HPPA calling conventions for function pointers,
2236 we assume the PIC register is not changed across call. For
2237 CONFIG_64BIT, the argument pointer is left to point at the
2238 argument region allocated for the call to call_on_stack. */
2240 /* Switch to new stack. We allocate two frames. */
2241 ldo 2*FRAME_SIZE(%arg2), %sp
2242 # ifdef CONFIG_64BIT
2243 /* Save previous stack pointer and return pointer in frame marker */
2244 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2245 /* Calls always use function descriptor */
2246 LDREG 16(%arg1), %arg1
2248 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2249 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2251 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2253 /* Save previous stack pointer and return pointer in frame marker */
2254 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2255 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2256 /* Calls use function descriptor if PLABEL bit is set */
2257 bb,>=,n %arg1, 30, 1f
2259 LDREG 0(%arg1), %arg1
2261 be,l 0(%sr4,%arg1), %sr0, %r31
2263 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2265 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2266 # endif /* CONFIG_64BIT */
2267 ENDPROC_CFI(call_on_stack)
2268 #endif /* CONFIG_IRQSTACKS */
2270 ENTRY_CFI(get_register)
2272 * get_register is used by the non access tlb miss handlers to
2273 * copy the value of the general register specified in r8 into
2274 * r1. This routine can't be used for shadowed registers, since
2275 * the rfir will restore the original value. So, for the shadowed
2276 * registers we put a -1 into r1 to indicate that the register
2277 * should not be used (the register being copied could also have
2278 * a -1 in it, but that is OK, it just means that we will have
2279 * to use the slow path instead).
2283 bv %r0(%r25) /* r0 */
2285 bv %r0(%r25) /* r1 - shadowed */
2287 bv %r0(%r25) /* r2 */
2289 bv %r0(%r25) /* r3 */
2291 bv %r0(%r25) /* r4 */
2293 bv %r0(%r25) /* r5 */
2295 bv %r0(%r25) /* r6 */
2297 bv %r0(%r25) /* r7 */
2299 bv %r0(%r25) /* r8 - shadowed */
2301 bv %r0(%r25) /* r9 - shadowed */
2303 bv %r0(%r25) /* r10 */
2305 bv %r0(%r25) /* r11 */
2307 bv %r0(%r25) /* r12 */
2309 bv %r0(%r25) /* r13 */
2311 bv %r0(%r25) /* r14 */
2313 bv %r0(%r25) /* r15 */
2315 bv %r0(%r25) /* r16 - shadowed */
2317 bv %r0(%r25) /* r17 - shadowed */
2319 bv %r0(%r25) /* r18 */
2321 bv %r0(%r25) /* r19 */
2323 bv %r0(%r25) /* r20 */
2325 bv %r0(%r25) /* r21 */
2327 bv %r0(%r25) /* r22 */
2329 bv %r0(%r25) /* r23 */
2331 bv %r0(%r25) /* r24 - shadowed */
2333 bv %r0(%r25) /* r25 - shadowed */
2335 bv %r0(%r25) /* r26 */
2337 bv %r0(%r25) /* r27 */
2339 bv %r0(%r25) /* r28 */
2341 bv %r0(%r25) /* r29 */
2343 bv %r0(%r25) /* r30 */
2345 bv %r0(%r25) /* r31 */
2347 ENDPROC_CFI(get_register)
2350 ENTRY_CFI(set_register)
2352 * set_register is used by the non access tlb miss handlers to
2353 * copy the value of r1 into the general register specified in
2358 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2360 bv %r0(%r25) /* r1 */
2362 bv %r0(%r25) /* r2 */
2364 bv %r0(%r25) /* r3 */
2366 bv %r0(%r25) /* r4 */
2368 bv %r0(%r25) /* r5 */
2370 bv %r0(%r25) /* r6 */
2372 bv %r0(%r25) /* r7 */
2374 bv %r0(%r25) /* r8 */
2376 bv %r0(%r25) /* r9 */
2378 bv %r0(%r25) /* r10 */
2380 bv %r0(%r25) /* r11 */
2382 bv %r0(%r25) /* r12 */
2384 bv %r0(%r25) /* r13 */
2386 bv %r0(%r25) /* r14 */
2388 bv %r0(%r25) /* r15 */
2390 bv %r0(%r25) /* r16 */
2392 bv %r0(%r25) /* r17 */
2394 bv %r0(%r25) /* r18 */
2396 bv %r0(%r25) /* r19 */
2398 bv %r0(%r25) /* r20 */
2400 bv %r0(%r25) /* r21 */
2402 bv %r0(%r25) /* r22 */
2404 bv %r0(%r25) /* r23 */
2406 bv %r0(%r25) /* r24 */
2408 bv %r0(%r25) /* r25 */
2410 bv %r0(%r25) /* r26 */
2412 bv %r0(%r25) /* r27 */
2414 bv %r0(%r25) /* r28 */
2416 bv %r0(%r25) /* r29 */
2418 bv %r0(%r25) /* r30 */
2420 bv %r0(%r25) /* r31 */
2422 ENDPROC_CFI(set_register)