3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Entry to the kernel is "interesting":
13 * (1) There are no stack pointers, not even for the kernel
14 * (2) General Registers should not be clobbered
15 * (3) There are no kernel-only data registers
16 * (4) Since all addressing modes are wrt to a General Register, no global
17 * variables can be reached
19 * We deal with this by declaring that we shall kill GR28 on entering the
20 * kernel from userspace
22 * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
23 * they can't rely on GR28 to be anything useful, and so need to clobber a
24 * separate register (GR31). Break interrupts are managed in break.S
26 * GR29 _is_ saved, and holds the current task pointer globally
30 #include <linux/linkage.h>
31 #include <asm/thread_info.h>
32 #include <asm/setup.h>
33 #include <asm/segment.h>
34 #include <asm/ptrace.h>
35 #include <asm/errno.h>
36 #include <asm/cache.h>
37 #include <asm/spr-regs.h>
39 #define nr_syscalls ((syscall_table_size)/4)
45 # sethi.p %hi(0xe1200004),gr30
46 # setlo %lo(0xe1200004),gr30
49 # sethi.p %hi(0xffc00100),gr30
50 # setlo %lo(0xffc00100),gr30
57 # sethi.p %hi(0xe1200004),gr30
58 # setlo %lo(0xe1200004),gr30
59 # st.p gr31,@(gr30,gr0)
61 # sethi.p %hi(0xffc00100),gr30
62 # setlo %lo(0xffc00100),gr30
63 # sth gr31,@(gr30,gr0)
67 ###############################################################################
69 # entry point for External interrupts received whilst executing userspace code
71 ###############################################################################
72 .globl __entry_uspace_external_interrupt
73 .type __entry_uspace_external_interrupt,@function
74 __entry_uspace_external_interrupt:
76 sethi.p %hi(__kernel_frame0_ptr),gr28
77 setlo %lo(__kernel_frame0_ptr),gr28
80 # handle h/w single-step through exceptions
81 sti gr0,@(gr28,#REG__STATUS)
83 .globl __entry_uspace_external_interrupt_reentry
84 __entry_uspace_external_interrupt_reentry:
90 # finish building the exception frame
91 sti sp, @(gr28,#REG_SP)
92 stdi gr2, @(gr28,#REG_GR(2))
93 stdi gr4, @(gr28,#REG_GR(4))
94 stdi gr6, @(gr28,#REG_GR(6))
95 stdi gr8, @(gr28,#REG_GR(8))
96 stdi gr10,@(gr28,#REG_GR(10))
97 stdi gr12,@(gr28,#REG_GR(12))
98 stdi gr14,@(gr28,#REG_GR(14))
99 stdi gr16,@(gr28,#REG_GR(16))
100 stdi gr18,@(gr28,#REG_GR(18))
101 stdi gr20,@(gr28,#REG_GR(20))
102 stdi gr22,@(gr28,#REG_GR(22))
103 stdi gr24,@(gr28,#REG_GR(24))
104 stdi gr26,@(gr28,#REG_GR(26))
105 sti gr0, @(gr28,#REG_GR(28))
106 sti gr29,@(gr28,#REG_GR(29))
107 stdi.p gr30,@(gr28,#REG_GR(30))
109 # set up the kernel stack pointer
122 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
123 andi.p gr22,#~(PSR_PS|PSR_S),gr6
126 andi gr5,#~PSR_ET,gr5
128 sti gr20,@(gr28,#REG_TBR)
129 sti gr21,@(gr28,#REG_PC)
130 sti gr5 ,@(gr28,#REG_PSR)
131 sti gr23,@(gr28,#REG_ISR)
132 stdi gr24,@(gr28,#REG_CCR)
133 stdi gr26,@(gr28,#REG_LR)
134 sti gr4 ,@(gr28,#REG_SYSCALLNO)
138 stdi gr4,@(gr28,#REG_IACC0)
142 stdi.p gr4,@(gr28,#REG_GNER0)
144 # interrupts start off fully disabled in the interrupt handler
145 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
147 # set up kernel global registers
148 sethi.p %hi(__kernel_current_task),gr5
149 setlo %lo(__kernel_current_task),gr5
150 sethi.p %hi(_gp),gr16
153 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
155 # make sure we (the kernel) get div-zero and misalignment exceptions
156 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
159 # switch to the kernel trap table
160 sethi.p %hi(__entry_kerneltrap_table),gr6
161 setlo %lo(__entry_kerneltrap_table),gr6
164 # set the return address
165 sethi.p %hi(__entry_return_from_user_interrupt),gr4
166 setlo %lo(__entry_return_from_user_interrupt),gr4
169 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
172 ori gr4,#PSR_PIL_14,gr4
174 ori gr4,#PSR_PIL_14|PSR_ET,gr4
180 .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
182 ###############################################################################
184 # entry point for External interrupts received whilst executing kernel code
185 # - on arriving here, the following registers should already be set up:
186 # GR15 - current thread_info struct pointer
187 # GR16 - kernel GP-REL pointer
188 # GR29 - current task struct pointer
189 # TBR - kernel trap vector table
190 # ISR - kernel's preferred integer controls
192 ###############################################################################
193 .globl __entry_kernel_external_interrupt
194 .type __entry_kernel_external_interrupt,@function
195 __entry_kernel_external_interrupt:
200 # set up the stack pointer
203 sti gr30,@(sp,#REG_SP)
205 # handle h/w single-step through exceptions
206 sti gr0,@(sp,#REG__STATUS)
208 .globl __entry_kernel_external_interrupt_reentry
209 __entry_kernel_external_interrupt_reentry:
212 # set up the exception frame
213 setlos #REG__END,gr30
216 sti.p gr28,@(sp,#REG_GR(28))
219 # finish building the exception frame
220 stdi gr2,@(gr28,#REG_GR(2))
221 stdi gr4,@(gr28,#REG_GR(4))
222 stdi gr6,@(gr28,#REG_GR(6))
223 stdi gr8,@(gr28,#REG_GR(8))
224 stdi gr10,@(gr28,#REG_GR(10))
225 stdi gr12,@(gr28,#REG_GR(12))
226 stdi gr14,@(gr28,#REG_GR(14))
227 stdi gr16,@(gr28,#REG_GR(16))
228 stdi gr18,@(gr28,#REG_GR(18))
229 stdi gr20,@(gr28,#REG_GR(20))
230 stdi gr22,@(gr28,#REG_GR(22))
231 stdi gr24,@(gr28,#REG_GR(24))
232 stdi gr26,@(gr28,#REG_GR(26))
233 sti gr29,@(gr28,#REG_GR(29))
234 stdi.p gr30,@(gr28,#REG_GR(30))
236 # note virtual interrupts will be fully enabled upon return
237 subicc gr0,#1,gr0,icc2 /* clear Z, set C */
249 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
250 andi.p gr22,#~(PSR_PS|PSR_S),gr6
253 andi.p gr5,#~PSR_ET,gr5
255 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
256 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
257 andi gr25,#~0xc0,gr25
259 sti gr20,@(gr28,#REG_TBR)
260 sti gr21,@(gr28,#REG_PC)
261 sti gr5 ,@(gr28,#REG_PSR)
262 sti gr23,@(gr28,#REG_ISR)
263 stdi gr24,@(gr28,#REG_CCR)
264 stdi gr26,@(gr28,#REG_LR)
265 sti gr4 ,@(gr28,#REG_SYSCALLNO)
269 stdi gr4,@(gr28,#REG_IACC0)
273 stdi.p gr4,@(gr28,#REG_GNER0)
275 # interrupts start off fully disabled in the interrupt handler
276 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
278 # set the return address
279 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
280 setlo %lo(__entry_return_from_kernel_interrupt),gr4
283 # clear power-saving mode flags
285 andi gr4,#~HSR0_PDM,gr4
288 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
290 ori gr4,#PSR_PIL_14,gr4
298 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
300 ###############################################################################
302 # deal with interrupts that were actually virtually disabled
303 # - we need to really disable them, flag the fact and return immediately
304 # - if you change this, you must alter break.S also
306 ###############################################################################
307 .balign L1_CACHE_BYTES
308 .globl __entry_kernel_external_interrupt_virtually_disabled
309 .type __entry_kernel_external_interrupt_virtually_disabled,@function
310 __entry_kernel_external_interrupt_virtually_disabled:
312 andi gr30,#~PSR_PIL,gr30
313 ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
315 subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
318 .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
320 ###############################################################################
322 # deal with re-enablement of interrupts that were pending when virtually re-enabled
323 # - set ICC2.C, re-enable the real interrupts and return
324 # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
325 # - if you change this, you must alter break.S also
327 ###############################################################################
328 .balign L1_CACHE_BYTES
329 .globl __entry_kernel_external_interrupt_virtual_reenable
330 .type __entry_kernel_external_interrupt_virtual_reenable,@function
331 __entry_kernel_external_interrupt_virtual_reenable:
333 andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
335 subicc gr0,#1,gr0,icc2 ; clear Z, set C
338 .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
340 ###############################################################################
342 # entry point for Software and Progam interrupts generated whilst executing userspace code
344 ###############################################################################
345 .globl __entry_uspace_softprog_interrupt
346 .type __entry_uspace_softprog_interrupt,@function
347 .globl __entry_uspace_handle_mmu_fault
348 __entry_uspace_softprog_interrupt:
352 __entry_uspace_handle_mmu_fault:
355 sethi.p %hi(__kernel_frame0_ptr),gr28
356 setlo %lo(__kernel_frame0_ptr),gr28
359 # handle h/w single-step through exceptions
360 sti gr0,@(gr28,#REG__STATUS)
362 .globl __entry_uspace_softprog_interrupt_reentry
363 __entry_uspace_softprog_interrupt_reentry:
366 setlos #REG__END,gr30
369 # set up the kernel stack pointer
370 sti.p sp,@(gr28,#REG_SP)
372 sti gr0,@(gr28,#REG_GR(28))
374 stdi gr20,@(gr28,#REG_GR(20))
375 stdi gr22,@(gr28,#REG_GR(22))
381 sethi.p %hi(__entry_return_from_user_exception),gr23
382 setlo %lo(__entry_return_from_user_exception),gr23
386 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
388 # single-stepping was disabled on entry to a TLB handler that then faulted
390 .globl __entry_uspace_handle_mmu_fault_sstep
391 __entry_uspace_handle_mmu_fault_sstep:
393 sethi.p %hi(__kernel_frame0_ptr),gr28
394 setlo %lo(__kernel_frame0_ptr),gr28
397 # flag single-step re-enablement
398 sti gr0,@(gr28,#REG__STATUS)
399 bra __entry_uspace_softprog_interrupt_reentry
403 ###############################################################################
405 # entry point for Software and Progam interrupts generated whilst executing kernel code
407 ###############################################################################
408 .globl __entry_kernel_softprog_interrupt
409 .type __entry_kernel_softprog_interrupt,@function
410 __entry_kernel_softprog_interrupt:
418 .globl __entry_kernel_handle_mmu_fault
419 __entry_kernel_handle_mmu_fault:
420 # set up the stack pointer
423 sti sp,@(sp,#REG_SP-4)
426 # handle h/w single-step through exceptions
427 sti gr0,@(sp,#REG__STATUS)
429 .globl __entry_kernel_softprog_interrupt_reentry
430 __entry_kernel_softprog_interrupt_reentry:
433 setlos #REG__END,gr30
436 # set up the exception frame
437 sti.p gr28,@(sp,#REG_GR(28))
440 stdi gr20,@(gr28,#REG_GR(20))
441 stdi gr22,@(gr28,#REG_GR(22))
443 ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
444 addi gr22,#REG__END,gr22
445 sti gr22,@(sp,#REG_SP)
447 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
448 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
450 andi gr20,#~0xc0,gr20
457 sethi.p %hi(__entry_return_from_kernel_exception),gr23
458 setlo %lo(__entry_return_from_kernel_exception),gr23
461 .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
463 # single-stepping was disabled on entry to a TLB handler that then faulted
465 .globl __entry_kernel_handle_mmu_fault_sstep
466 __entry_kernel_handle_mmu_fault_sstep:
467 # set up the stack pointer
470 sti sp,@(sp,#REG_SP-4)
473 # flag single-step re-enablement
474 sethi #REG__STATUS_STEP,gr30
475 sti gr30,@(sp,#REG__STATUS)
476 bra __entry_kernel_softprog_interrupt_reentry
480 ###############################################################################
482 # the rest of the kernel entry point code
483 # - on arriving here, the following registers should be set up:
484 # GR1 - kernel stack pointer
485 # GR7 - syscall number (trap 0 only)
486 # GR8-13 - syscall args (trap 0 only)
490 # GR23 - return handler address
491 # GR28 - exception frame on stack
492 # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
493 # PSR - PSR.S 1, PSR.ET 0
495 ###############################################################################
496 .globl __entry_common
497 .type __entry_common,@function
501 # finish building the exception frame
502 stdi gr2,@(gr28,#REG_GR(2))
503 stdi gr4,@(gr28,#REG_GR(4))
504 stdi gr6,@(gr28,#REG_GR(6))
505 stdi gr8,@(gr28,#REG_GR(8))
506 stdi gr10,@(gr28,#REG_GR(10))
507 stdi gr12,@(gr28,#REG_GR(12))
508 stdi gr14,@(gr28,#REG_GR(14))
509 stdi gr16,@(gr28,#REG_GR(16))
510 stdi gr18,@(gr28,#REG_GR(18))
511 stdi gr24,@(gr28,#REG_GR(24))
512 stdi gr26,@(gr28,#REG_GR(26))
513 sti gr29,@(gr28,#REG_GR(29))
514 stdi gr30,@(gr28,#REG_GR(30))
524 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
525 andi.p gr22,#~(PSR_PS|PSR_S),gr6
528 andi gr5,#~PSR_ET,gr5
530 sti gr20,@(gr28,#REG_TBR)
531 sti gr21,@(gr28,#REG_PC)
532 sti gr5 ,@(gr28,#REG_PSR)
533 sti gr23,@(gr28,#REG_ISR)
534 stdi gr24,@(gr28,#REG_CCR)
535 stdi gr26,@(gr28,#REG_LR)
536 sti gr4 ,@(gr28,#REG_SYSCALLNO)
540 stdi gr4,@(gr28,#REG_IACC0)
544 stdi.p gr4,@(gr28,#REG_GNER0)
546 # set up virtual interrupt disablement
547 subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
549 # set up kernel global registers
550 sethi.p %hi(__kernel_current_task),gr5
551 setlo %lo(__kernel_current_task),gr5
552 sethi.p %hi(_gp),gr16
555 ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
557 # switch to the kernel trap table
558 sethi.p %hi(__entry_kerneltrap_table),gr6
559 setlo %lo(__entry_kerneltrap_table),gr6
562 # make sure we (the kernel) get div-zero and misalignment exceptions
563 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
566 # clear power-saving mode flags
568 andi gr4,#~HSR0_PDM,gr4
571 # multiplex again using old TBR as a guide
573 sethi %hi(__entry_vector_table),gr6
575 setlo %lo(__entry_vector_table),gr6
583 .size __entry_common,.-__entry_common
585 ###############################################################################
587 # handle instruction MMU fault
589 ###############################################################################
591 .globl __entry_insn_mmu_fault
592 __entry_insn_mmu_fault:
598 # now that we've accessed the exception regs, we can enable exceptions
603 sethi.p %hi(do_page_fault),gr5
604 setlo %lo(do_page_fault),gr5
605 jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
609 ###############################################################################
611 # handle instruction access error
613 ###############################################################################
614 .globl __entry_insn_access_error
615 __entry_insn_access_error:
617 sethi.p %hi(insn_access_error),gr5
618 setlo %lo(insn_access_error),gr5
623 # now that we've accessed the exception regs, we can enable exceptions
627 jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
629 ###############################################################################
631 # handle various instructions of dubious legality
633 ###############################################################################
634 .globl __entry_unsupported_trap
635 .globl __entry_illegal_instruction
636 .globl __entry_privileged_instruction
637 .globl __entry_debug_exception
638 __entry_unsupported_trap:
640 sti gr21,@(gr28,#REG_PC)
641 __entry_illegal_instruction:
642 __entry_privileged_instruction:
643 __entry_debug_exception:
645 sethi.p %hi(illegal_instruction),gr5
646 setlo %lo(illegal_instruction),gr5
651 # now that we've accessed the exception regs, we can enable exceptions
655 jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
657 ###############################################################################
659 # handle atomic operation emulation for userspace
661 ###############################################################################
662 .globl __entry_atomic_op
665 sethi.p %hi(atomic_operation),gr5
666 setlo %lo(atomic_operation),gr5
671 # now that we've accessed the exception regs, we can enable exceptions
675 jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
677 ###############################################################################
679 # handle media exception
681 ###############################################################################
682 .globl __entry_media_exception
683 __entry_media_exception:
685 sethi.p %hi(media_exception),gr5
686 setlo %lo(media_exception),gr5
690 # now that we've accessed the exception regs, we can enable exceptions
694 jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
696 ###############################################################################
698 # handle data MMU fault
699 # handle data DAT fault (write-protect exception)
701 ###############################################################################
703 .globl __entry_data_mmu_fault
704 __entry_data_mmu_fault:
705 .globl __entry_data_dat_fault
706 __entry_data_dat_fault:
710 movsg scr2,gr10 ; saved EAR0
712 # now that we've accessed the exception regs, we can enable exceptions
717 sethi.p %hi(do_page_fault),gr5
718 setlo %lo(do_page_fault),gr5
719 jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
722 ###############################################################################
724 # handle data and instruction access exceptions
726 ###############################################################################
727 .globl __entry_insn_access_exception
728 .globl __entry_data_access_exception
729 __entry_insn_access_exception:
730 __entry_data_access_exception:
732 sethi.p %hi(memory_access_exception),gr5
733 setlo %lo(memory_access_exception),gr5
735 movsg scr2,gr9 ; saved EAR0
738 # now that we've accessed the exception regs, we can enable exceptions
742 jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
744 ###############################################################################
746 # handle data access error
748 ###############################################################################
749 .globl __entry_data_access_error
750 __entry_data_access_error:
752 sethi.p %hi(data_access_error),gr5
753 setlo %lo(data_access_error),gr5
758 # now that we've accessed the exception regs, we can enable exceptions
762 jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
764 ###############################################################################
766 # handle data store error
768 ###############################################################################
769 .globl __entry_data_store_error
770 __entry_data_store_error:
772 sethi.p %hi(data_store_error),gr5
773 setlo %lo(data_store_error),gr5
777 # now that we've accessed the exception regs, we can enable exceptions
781 jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
783 ###############################################################################
785 # handle division exception
787 ###############################################################################
788 .globl __entry_division_exception
789 __entry_division_exception:
791 sethi.p %hi(division_exception),gr5
792 setlo %lo(division_exception),gr5
797 # now that we've accessed the exception regs, we can enable exceptions
801 jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
803 ###############################################################################
805 # handle compound exception
807 ###############################################################################
808 .globl __entry_compound_exception
809 __entry_compound_exception:
811 sethi.p %hi(compound_exception),gr5
812 setlo %lo(compound_exception),gr5
820 # now that we've accessed the exception regs, we can enable exceptions
824 jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
826 ###############################################################################
828 # handle interrupts and NMIs
830 ###############################################################################
831 .globl __entry_do_IRQ
835 # we can enable exceptions
841 .globl __entry_do_NMI
845 # we can enable exceptions
851 ###############################################################################
853 # the return path for a newly forked child process
854 # - __switch_to() saved the old current pointer in GR8 for us
856 ###############################################################################
862 # fork & co. return 0 to child
866 .globl ret_from_kernel_thread
867 ret_from_kernel_thread:
868 lddi.p @(gr28,#REG_GR(8)),gr20
874 ###################################################################################################
876 # Return to user mode is not as complex as all this looks,
877 # but we want the default path for a system call return to
878 # go as quickly as possible which is why some of this is
879 # less clear than it otherwise should be.
881 ###################################################################################################
882 .balign L1_CACHE_BYTES
886 movsg psr,gr4 ; enable exceptions
890 sti gr7,@(gr28,#REG_SYSCALLNO)
891 sti.p gr8,@(gr28,#REG_ORIG_GR8)
893 subicc gr7,#nr_syscalls,gr0,icc0
894 bnc icc0,#0,__syscall_badsys
896 ldi @(gr15,#TI_FLAGS),gr4
897 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
898 bne icc0,#0,__syscall_trace_entry
902 sethi %hi(sys_call_table),gr5
903 setlo %lo(sys_call_table),gr5
908 ###############################################################################
910 # return to interrupted process
912 ###############################################################################
916 # keep current PSR in GR23
919 ldi @(gr28,#REG_PSR),gr22
921 sti.p gr8,@(gr28,#REG_GR(8)) ; save return value
923 # rebuild saved psr - execve will change it for init/main.c
925 andi.p gr22,#~PSR_PS,gr22
928 ori.p gr22,#PSR_S,gr22
930 # make sure we don't miss an interrupt setting need_resched or sigpending between
931 # sampling and the RETT
932 ori gr23,#PSR_PIL_14,gr23
935 ldi @(gr15,#TI_FLAGS),gr4
936 andicc gr4,#_TIF_ALLWORK_MASK,gr0,icc0
937 bne icc0,#0,__syscall_exit_work
939 # restore all registers and return
940 __entry_return_direct:
943 andi gr22,#~PSR_ET,gr22
946 ldi @(gr28,#REG_ISR),gr23
947 lddi @(gr28,#REG_CCR),gr24
948 lddi @(gr28,#REG_LR) ,gr26
949 ldi @(gr28,#REG_PC) ,gr21
950 ldi @(gr28,#REG_TBR),gr20
960 lddi @(gr28,#REG_GNER0),gr4
964 lddi @(gr28,#REG_IACC0),gr4
968 lddi @(gr28,#REG_GR(4)) ,gr4
969 lddi @(gr28,#REG_GR(6)) ,gr6
970 lddi @(gr28,#REG_GR(8)) ,gr8
971 lddi @(gr28,#REG_GR(10)),gr10
972 lddi @(gr28,#REG_GR(12)),gr12
973 lddi @(gr28,#REG_GR(14)),gr14
974 lddi @(gr28,#REG_GR(16)),gr16
975 lddi @(gr28,#REG_GR(18)),gr18
976 lddi @(gr28,#REG_GR(20)),gr20
977 lddi @(gr28,#REG_GR(22)),gr22
978 lddi @(gr28,#REG_GR(24)),gr24
979 lddi @(gr28,#REG_GR(26)),gr26
980 ldi @(gr28,#REG_GR(29)),gr29
981 lddi @(gr28,#REG_GR(30)),gr30
983 # check to see if a debugging return is required
986 ldi @(gr28,#REG__STATUS),gr3
987 andicc gr3,#REG__STATUS_STEP,gr0,icc0
988 bne icc0,#0,__entry_return_singlestep
991 ldi @(gr28,#REG_SP) ,sp
992 lddi @(gr28,#REG_GR(2)) ,gr2
993 ldi @(gr28,#REG_GR(28)),gr28
1000 # store the current frame in the workram on the FR451
1002 sethi.p %hi(0xfe800000),gr28
1003 setlo %lo(0xfe800000),gr28
1005 stdi gr2,@(gr28,#REG_GR(2))
1006 stdi gr4,@(gr28,#REG_GR(4))
1007 stdi gr6,@(gr28,#REG_GR(6))
1008 stdi gr8,@(gr28,#REG_GR(8))
1009 stdi gr10,@(gr28,#REG_GR(10))
1010 stdi gr12,@(gr28,#REG_GR(12))
1011 stdi gr14,@(gr28,#REG_GR(14))
1012 stdi gr16,@(gr28,#REG_GR(16))
1013 stdi gr18,@(gr28,#REG_GR(18))
1014 stdi gr24,@(gr28,#REG_GR(24))
1015 stdi gr26,@(gr28,#REG_GR(26))
1016 sti gr29,@(gr28,#REG_GR(29))
1017 stdi gr30,@(gr28,#REG_GR(30))
1020 sti gr30,@(gr28,#REG_TBR)
1022 sti gr30,@(gr28,#REG_PC)
1024 sti gr30,@(gr28,#REG_PSR)
1026 sti gr30,@(gr28,#REG_ISR)
1029 stdi gr30,@(gr28,#REG_CCR)
1032 stdi gr30,@(gr28,#REG_LR)
1033 sti gr0 ,@(gr28,#REG_SYSCALLNO)
1039 # return via break.S
1040 __entry_return_singlestep:
1042 lddi @(gr28,#REG_GR(2)) ,gr2
1043 ldi @(gr28,#REG_SP) ,sp
1044 ldi @(gr28,#REG_GR(28)),gr28
1047 .globl __entry_return_singlestep_breaks_here
1048 __entry_return_singlestep_breaks_here:
1052 ###############################################################################
1054 # return to a process interrupted in kernel space
1055 # - we need to consider preemption if that is enabled
1057 ###############################################################################
1058 .balign L1_CACHE_BYTES
1059 __entry_return_from_kernel_exception:
1062 ori gr23,#PSR_PIL_14,gr23
1064 bra __entry_return_direct
1066 .balign L1_CACHE_BYTES
1067 __entry_return_from_kernel_interrupt:
1070 ori gr23,#PSR_PIL_14,gr23
1073 #ifdef CONFIG_PREEMPT
1074 ldi @(gr15,#TI_PRE_COUNT),gr5
1075 subicc gr5,#0,gr0,icc0
1076 beq icc0,#0,__entry_return_direct
1078 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
1079 call preempt_schedule_irq
1081 bra __entry_return_direct
1084 ###############################################################################
1086 # perform work that needs to be done immediately before resumption
1088 ###############################################################################
1089 .globl __entry_return_from_user_exception
1090 .balign L1_CACHE_BYTES
1091 __entry_return_from_user_exception:
1094 __entry_resume_userspace:
1095 # make sure we don't miss an interrupt setting need_resched or sigpending between
1096 # sampling and the RETT
1098 ori gr23,#PSR_PIL_14,gr23
1101 __entry_return_from_user_interrupt:
1103 ldi @(gr15,#TI_FLAGS),gr4
1104 andicc gr4,#_TIF_WORK_MASK,gr0,icc0
1105 beq icc0,#1,__entry_return_direct
1107 __entry_work_pending:
1109 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1110 beq icc0,#1,__entry_work_notifysig
1112 __entry_work_resched:
1115 andi gr23,#~PSR_PIL,gr23
1119 ori gr23,#PSR_PIL_14,gr23
1123 ldi @(gr15,#TI_FLAGS),gr4
1124 andicc gr4,#_TIF_WORK_MASK,gr0,icc0
1125 beq icc0,#1,__entry_return_direct
1126 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1127 bne icc0,#1,__entry_work_resched
1129 __entry_work_notifysig:
1132 call do_notify_resume
1133 bra __entry_resume_userspace
1135 # perform syscall entry tracing
1136 __syscall_trace_entry:
1138 call syscall_trace_entry
1140 lddi.p @(gr28,#REG_GR(8)) ,gr8
1141 ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno
1142 lddi @(gr28,#REG_GR(10)),gr10
1143 lddi.p @(gr28,#REG_GR(12)),gr12
1145 subicc gr7,#nr_syscalls,gr0,icc0
1146 bnc icc0,#0,__syscall_badsys
1149 # perform syscall exit tracing
1150 __syscall_exit_work:
1152 andicc gr22,#PSR_PS,gr0,icc1 ; don't handle on return to kernel mode
1153 andicc.p gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
1154 bne icc1,#0,__entry_return_direct
1155 beq icc0,#1,__entry_work_pending
1158 andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule()
1161 call syscall_trace_exit
1162 bra __entry_resume_userspace
1167 sti gr8,@(gr28,#REG_GR(8)) ; save return value
1168 bra __entry_resume_userspace
1171 ###############################################################################
1173 # syscall vector table
1175 ###############################################################################
1178 .globl sys_call_table
1180 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
1185 .long sys_open /* 5 */
1190 .long sys_unlink /* 10 */
1195 .long sys_chmod /* 15 */
1197 .long sys_ni_syscall /* old break syscall holder */
1200 .long sys_getpid /* 20 */
1205 .long sys_ni_syscall // sys_stime /* 25 */
1210 .long sys_utime /* 30 */
1211 .long sys_ni_syscall /* old stty syscall holder */
1212 .long sys_ni_syscall /* old gtty syscall holder */
1215 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1220 .long sys_rmdir /* 40 */
1224 .long sys_ni_syscall /* old prof syscall holder */
1225 .long sys_brk /* 45 */
1228 .long sys_ni_syscall // sys_signal
1230 .long sys_getegid16 /* 50 */
1232 .long sys_umount /* recycled never used phys( */
1233 .long sys_ni_syscall /* old lock syscall holder */
1235 .long sys_fcntl /* 55 */
1236 .long sys_ni_syscall /* old mpx syscall holder */
1238 .long sys_ni_syscall /* old ulimit syscall holder */
1239 .long sys_ni_syscall /* old old uname syscall */
1240 .long sys_umask /* 60 */
1245 .long sys_getpgrp /* 65 */
1248 .long sys_ni_syscall // sys_sgetmask
1249 .long sys_ni_syscall // sys_ssetmask
1250 .long sys_setreuid16 /* 70 */
1251 .long sys_setregid16
1252 .long sys_sigsuspend
1253 .long sys_ni_syscall // sys_sigpending
1254 .long sys_sethostname
1255 .long sys_setrlimit /* 75 */
1256 .long sys_ni_syscall // sys_old_getrlimit
1258 .long sys_gettimeofday
1259 .long sys_settimeofday
1260 .long sys_getgroups16 /* 80 */
1261 .long sys_setgroups16
1262 .long sys_ni_syscall /* old_select slot */
1265 .long sys_readlink /* 85 */
1269 .long sys_ni_syscall // old_readdir
1270 .long sys_ni_syscall /* 90 */ /* old_mmap slot */
1275 .long sys_fchown16 /* 95 */
1276 .long sys_getpriority
1277 .long sys_setpriority
1278 .long sys_ni_syscall /* old profil syscall holder */
1280 .long sys_fstatfs /* 100 */
1281 .long sys_ni_syscall /* ioperm for i386 */
1282 .long sys_socketcall
1285 .long sys_getitimer /* 105 */
1289 .long sys_ni_syscall /* obsolete olduname( syscall */
1290 .long sys_ni_syscall /* iopl for i386 */ /* 110 */
1292 .long sys_ni_syscall /* obsolete idle( syscall */
1293 .long sys_ni_syscall /* vm86old for i386 */
1295 .long sys_swapoff /* 115 */
1300 .long sys_clone /* 120 */
1301 .long sys_setdomainname
1303 .long sys_ni_syscall /* old "cacheflush" */
1305 .long sys_mprotect /* 125 */
1306 .long sys_sigprocmask
1307 .long sys_ni_syscall /* old "create_module" */
1308 .long sys_init_module
1309 .long sys_delete_module
1310 .long sys_ni_syscall /* old "get_kernel_syms" */
1315 .long sys_sysfs /* 135 */
1316 .long sys_personality
1317 .long sys_ni_syscall /* for afs_syscall */
1318 .long sys_setfsuid16
1319 .long sys_setfsgid16
1320 .long sys_llseek /* 140 */
1325 .long sys_readv /* 145 */
1330 .long sys_mlock /* 150 */
1333 .long sys_munlockall
1334 .long sys_sched_setparam
1335 .long sys_sched_getparam /* 155 */
1336 .long sys_sched_setscheduler
1337 .long sys_sched_getscheduler
1338 .long sys_sched_yield
1339 .long sys_sched_get_priority_max
1340 .long sys_sched_get_priority_min /* 160 */
1341 .long sys_sched_rr_get_interval
1344 .long sys_setresuid16
1345 .long sys_getresuid16 /* 165 */
1346 .long sys_ni_syscall /* for vm86 */
1347 .long sys_ni_syscall /* Old sys_query_module */
1349 .long sys_ni_syscall /* Old nfsservctl */
1350 .long sys_setresgid16 /* 170 */
1351 .long sys_getresgid16
1353 .long sys_rt_sigreturn
1354 .long sys_rt_sigaction
1355 .long sys_rt_sigprocmask /* 175 */
1356 .long sys_rt_sigpending
1357 .long sys_rt_sigtimedwait
1358 .long sys_rt_sigqueueinfo
1359 .long sys_rt_sigsuspend
1360 .long sys_pread64 /* 180 */
1365 .long sys_capset /* 185 */
1366 .long sys_sigaltstack
1368 .long sys_ni_syscall /* streams1 */
1369 .long sys_ni_syscall /* streams2 */
1370 .long sys_vfork /* 190 */
1373 .long sys_truncate64
1374 .long sys_ftruncate64
1375 .long sys_stat64 /* 195 */
1380 .long sys_getgid /* 200 */
1385 .long sys_getgroups /* 205 */
1390 .long sys_setresgid /* 210 */
1395 .long sys_setfsuid /* 215 */
1397 .long sys_pivot_root
1400 .long sys_getdents64 /* 220 */
1402 .long sys_ni_syscall /* reserved for TUX */
1403 .long sys_ni_syscall /* Reserved for Security */
1405 .long sys_readahead /* 225 */
1410 .long sys_lgetxattr /* 230 */
1413 .long sys_llistxattr
1414 .long sys_flistxattr
1415 .long sys_removexattr /* 235 */
1416 .long sys_lremovexattr
1417 .long sys_fremovexattr
1419 .long sys_sendfile64
1420 .long sys_futex /* 240 */
1421 .long sys_sched_setaffinity
1422 .long sys_sched_getaffinity
1423 .long sys_ni_syscall //sys_set_thread_area
1424 .long sys_ni_syscall //sys_get_thread_area
1425 .long sys_io_setup /* 245 */
1426 .long sys_io_destroy
1427 .long sys_io_getevents
1430 .long sys_fadvise64 /* 250 */
1431 .long sys_ni_syscall
1432 .long sys_exit_group
1433 .long sys_lookup_dcookie
1434 .long sys_epoll_create
1435 .long sys_epoll_ctl /* 255 */
1436 .long sys_epoll_wait
1437 .long sys_remap_file_pages
1438 .long sys_set_tid_address
1439 .long sys_timer_create
1440 .long sys_timer_settime /* 260 */
1441 .long sys_timer_gettime
1442 .long sys_timer_getoverrun
1443 .long sys_timer_delete
1444 .long sys_clock_settime
1445 .long sys_clock_gettime /* 265 */
1446 .long sys_clock_getres
1447 .long sys_clock_nanosleep
1450 .long sys_tgkill /* 270 */
1452 .long sys_fadvise64_64
1453 .long sys_ni_syscall /* sys_vserver */
1455 .long sys_get_mempolicy
1456 .long sys_set_mempolicy
1459 .long sys_mq_timedsend
1460 .long sys_mq_timedreceive /* 280 */
1462 .long sys_mq_getsetattr
1463 .long sys_ni_syscall /* reserved for kexec */
1465 .long sys_ni_syscall /* 285 */ /* available */
1467 .long sys_request_key
1469 .long sys_ioprio_set
1470 .long sys_ioprio_get /* 290 */
1471 .long sys_inotify_init
1472 .long sys_inotify_add_watch
1473 .long sys_inotify_rm_watch
1474 .long sys_migrate_pages
1475 .long sys_openat /* 295 */
1480 .long sys_fstatat64 /* 300 */
1485 .long sys_readlinkat /* 305 */
1490 .long sys_unshare /* 310 */
1491 .long sys_set_robust_list
1492 .long sys_get_robust_list
1494 .long sys_sync_file_range
1495 .long sys_tee /* 315 */
1497 .long sys_move_pages
1499 .long sys_epoll_pwait
1500 .long sys_utimensat /* 320 */
1502 .long sys_timerfd_create
1505 .long sys_timerfd_settime /* 325 */
1506 .long sys_timerfd_gettime
1509 .long sys_epoll_create1
1510 .long sys_dup3 /* 330 */
1512 .long sys_inotify_init1
1515 .long sys_rt_tgsigqueueinfo /* 335 */
1516 .long sys_perf_event_open
1519 syscall_table_size = (. - sys_call_table)