2 * linux/arch/unicore32/kernel/entry.S
4 * Code specific to PKUnity SoC and UniCore ISA
6 * Copyright (C) 2001-2010 GUAN Xue-tao
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 #include <linux/init.h>
15 #include <linux/linkage.h>
16 #include <asm/assembler.h>
17 #include <asm/errno.h>
18 #include <asm/thread_info.h>
19 #include <asm/memory.h>
20 #include <asm/unistd.h>
21 #include <generated/asm-offsets.h>
22 #include "debug-macro.S"
25 @ Most of the stack format comes from struct pt_regs, but with
26 @ the addition of 8 bytes for storing syscall args 5 and 6.
31 * The SWI code relies on the fact that R0 is at the bottom of the stack
32 * (due to slow/fast restore user regs).
39 #ifdef CONFIG_FRAME_POINTER
44 .macro alignment_trap, rtemp
45 #ifdef CONFIG_ALIGNMENT_TRAP
46 ldw \rtemp, .LCcralign
48 movc p0.c1, \rtemp, #0
52 .macro load_user_sp_lr, rd, rtemp, offset = 0
54 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
55 mov.a asr, \rtemp @ switch to the SUSR mode
57 ldw sp, [\rd+], #\offset @ load sp_user
58 ldw lr, [\rd+], #\offset + 4 @ load lr_user
60 xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
61 mov.a asr, \rtemp @ switch back to the PRIV mode
64 .macro priv_exit, rpsr
66 ldm.w (r0 - r15), [sp]+
67 ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
70 .macro restore_user_regs, fast = 0, offset = 0
71 ldw r1, [sp+], #\offset + S_PSR @ get calling asr
72 ldw lr, [sp+], #\offset + S_PC @ get pc
73 mov.a bsr, r1 @ save in bsr_priv
75 add sp, sp, #\offset + S_R1 @ r0 is syscall return value
76 ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
77 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
79 ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
80 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
83 add sp, sp, #S_FRAME_SIZE - S_R16
85 @ and move bsr_priv into asr
88 .macro get_thread_info, rd
93 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
94 ldw \base, =(PKUNITY_INTC_BASE)
95 ldw \irqstat, [\base+], #0xC @ INTC_ICIP
96 ldw \tmp, [\base+], #0x4 @ INTC_ICMR
97 and.a \irqstat, \irqstat, \tmp
99 cntlz \irqnr, \irqstat
100 rsub \irqnr, \irqnr, #31
101 1001: /* EQ will be set if no irqs pending */
104 #ifdef CONFIG_DEBUG_LL
105 .macro printreg, reg, temp
107 stm (r0-r3), [\temp]+
108 stw lr, [\temp+], #0x10
118 ldm (r0-r3), [\temp]+
119 ldw lr, [\temp+], #0x10
121 901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
122 902: .asciz ": epip4d\n"
129 * These are the registers used in the syscall handler, and allow us to
130 * have in theory up to 7 arguments to a function - r0 to r6.
132 * Note that tbl == why is intentional.
134 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
136 scno .req r21 @ syscall number
137 tbl .req r22 @ syscall table pointer
138 why .req r22 @ Linux syscall (!= 0)
139 tsk .req r23 @ current thread_info
142 * Interrupt handling. Preserves r17, r18, r19
145 1: get_irqnr_and_base r0, r6, r5, lr
149 @ routine called with r0 = irq number, r1 = struct pt_regs *
160 sub sp, sp, #(S_FRAME_SIZE - 4)
161 stm (r1 - r15), [sp]+
163 stm (r16 - r28), [r5]+
166 add r5, sp, #S_SP - 4 @ here for interlock avoidance
167 mov r4, #-1 @ "" "" "" ""
168 add r0, sp, #(S_FRAME_SIZE - 4)
169 stw.w r1, [sp+], #-4 @ save the "real" r0 copied
170 @ from the exception stack
175 @ We are now ready to fill in the remaining blanks on the stack:
179 @ r2 - lr_<exception>, already fixed up for correct return/restart
180 @ r3 - bsr_<exception>
181 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
191 sub sp, sp, #S_FRAME_SIZE
192 stm (r1 - r15), [sp+]
194 stm (r16 - r28), [r4]+
197 add r0, sp, #S_PC @ here for interlock avoidance
198 mov r4, #-1 @ "" "" "" ""
200 stw r1, [sp] @ save the "real" r0 copied
201 @ from the exception stack
204 @ We are now ready to fill in the remaining blanks on the stack:
206 @ r2 - lr_<exception>, already fixed up for correct return/restart
207 @ r3 - bsr_<exception>
208 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
210 @ Also, separately save sp_user and lr_user
216 @ Enable the alignment trap while in kernel mode
221 @ Clear FP to mark the first stack frame
229 @ __invalid - generic code for failed exception
230 @ (re-entrant version of handlers)
233 sub sp, sp, #S_FRAME_SIZE
234 stm (r1 - r15), [sp+]
236 stm (r16 - r28, sp, lr), [r1]+
241 add r0, sp, #S_PC @ here for interlock avoidance
242 mov r7, #-1 @ "" "" "" ""
243 stw r4, [sp] @ save preserved r0
244 stm (r5 - r7), [r0]+ @ lr_<exception>,
245 @ asr_<exception>, "old_r0"
257 @ get ready to re-enable interrupts if appropriate
260 cand.a r3, #PSR_I_BIT
262 andn r17, r17, #PSR_I_BIT
266 @ Call the processor-specific abort handler:
268 @ r2 - aborted context pc
269 @ r3 - aborted context asr
271 @ The abort handler must return the aborted address in r0, and
272 @ the fault status register in r1.
274 movc r1, p0.c3, #0 @ get FSR
275 movc r0, p0.c4, #0 @ get FAR
278 @ set desired INTR state, then call main handler
285 @ INTRs off again before pulling preserved data off the stack
290 @ restore BSR and restart the instruction
292 ldw r2, [sp+], #S_PSR
293 priv_exit r2 @ return from exception
304 nop; nop; nop; nop; nop; nop; nop; nop
306 ldw r4, [sp+], #S_PSR @ irqs are already disabled
308 priv_exit r4 @ return from exception
317 mov r0, sp @ struct pt_regs *regs
319 b bad_mode @ not supported
327 @ re-enable interrupts if appropriate
330 cand.a r3, #PSR_I_BIT
332 andn r17, r17, #PSR_I_BIT
336 @ set args, then call main handler
338 @ r0 - address of faulting instruction
339 @ r1 - pointer to registers on stack
341 mov r0, r2 @ pass address of aborted instruction
345 b.l do_PrefetchAbort @ call abort handler
348 @ INTRs off again before pulling preserved data off the stack
353 @ restore BSR and restart the instruction
355 ldw r2, [sp+], #S_PSR
356 priv_exit r2 @ return from exception
367 #ifdef CONFIG_UNICORE_FPU_F64
369 cand.a ip, #0x08000000 @ FPU execption traps?
376 @ fall through to the emulation code, which returns using r19 if
377 @ it has emulated the instruction, or the more conventional lr
378 @ if we are to treat this as a real extended instruction
383 adr r19, ret_from_exception
386 @ fallthrough to call do_uc_f64
389 * Check whether the instruction is a co-processor instruction.
390 * If yes, we need to call the relevant co-processor handler.
392 * Note that we don't do a full check here for the co-processor
393 * instructions; all instructions with bit 27 set are well
394 * defined. The only instructions that should fault are the
395 * co-processor instructions.
397 * Emulators may wish to make use of the following registers:
398 * r0 = instruction opcode.
400 * r19 = normal "successful" return address
401 * r20 = this threads thread_info structure.
402 * lr = unrecognised instruction return address
404 get_thread_info r20 @ get current thread
405 and r8, r0, #0x00003c00 @ mask out CP number
407 stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
409 @ F64 hardware support entry point.
410 @ r0 = faulted instruction
411 @ r19 = return address
414 add r20, r20, #TI_FPSTATE @ r20 = workspace
415 cff r1, s31 @ get fpu FPSCR
416 andn r2, r1, #0x08000000
417 ctf r2, s31 @ clear 27 bit
418 mov r2, sp @ nothing stacked - regdump is at TOS
419 mov lr, r19 @ setup for a return to the user code
421 @ Now call the C code to package up the bounce to the support code
422 @ r0 holds the trigger instruction
423 @ r1 holds the FPSCR value
424 @ r2 pointer to register dump
429 @ Call the processor-specific abort handler:
431 @ r2 - aborted context pc
432 @ r3 - aborted context asr
434 @ The abort handler must return the aborted address in r0, and
435 @ the fault status register in r1.
437 movc r1, p0.c3, #0 @ get FSR
438 movc r0, p0.c4, #0 @ get FAR
441 @ INTRs on, then call the main handler
445 adr lr, ret_from_exception
476 mov r0, r2 @ pass address of aborted instruction.
478 enable_irq r1 @ Enable interrupts
480 b.l do_PrefetchAbort @ call abort handler
483 * This is the return code to user mode for abort handlers
485 ENTRY(ret_from_exception)
490 ENDPROC(ret_from_exception)
493 * Register switch for UniCore V2 processors
494 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
495 * previous and next are guaranteed not to be the same.
498 add ip, r1, #TI_CPU_SAVE
499 stm.w (r4 - r15), [ip]+
500 stm.w (r16 - r27, sp, lr), [ip]+
502 #ifdef CONFIG_UNICORE_FPU_F64
503 add ip, r1, #TI_FPSTATE
504 sfm.w (f0 - f7 ), [ip]+
505 sfm.w (f8 - f15), [ip]+
506 sfm.w (f16 - f23), [ip]+
507 sfm.w (f24 - f31), [ip]+
511 add ip, r2, #TI_FPSTATE
512 lfm.w (f0 - f7 ), [ip]+
513 lfm.w (f8 - f15), [ip]+
514 lfm.w (f16 - f23), [ip]+
515 lfm.w (f24 - f31), [ip]+
519 add ip, r2, #TI_CPU_SAVE
520 ldm.w (r4 - r15), [ip]+
521 ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
526 * This is the fast syscall return path. We do as little as
527 * possible here, and this includes saving r0 back into the PRIV
531 disable_irq r1 @ disable interrupts
532 ldw r1, [tsk+], #TI_FLAGS
533 cand.a r1, #_TIF_WORK_MASK
534 bne fast_work_pending
536 @ fast_restore_user_regs
537 restore_user_regs fast = 1, offset = S_OFF
540 * Ok, we need to do extra processing, enter the slow path.
543 stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
545 cand.a r1, #_TIF_NEED_RESCHED
547 cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
550 mov r2, why @ 'syscall'
551 cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
552 cmovne why, #0 @ prevent further restarts
554 b ret_slow_syscall @ Check work again
559 * "slow" syscall return path. "why" tells us if this was a real syscall.
563 disable_irq r1 @ disable interrupts
564 get_thread_info tsk @ epip4d, one path error?!
565 ldw r1, [tsk+], #TI_FLAGS
566 cand.a r1, #_TIF_WORK_MASK
569 @ slow_restore_user_regs
570 restore_user_regs fast = 0, offset = 0
574 * This is how we return from a fork.
579 ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
581 cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
584 mov r0, #1 @ trace exit [IP = 1]
587 ENDPROC(ret_from_fork)
589 /*=============================================================================
591 *-----------------------------------------------------------------------------
595 sub sp, sp, #S_FRAME_SIZE
596 stm (r0 - r15), [sp]+ @ Calling r0 - r15
598 stm (r16 - r28), [r8]+ @ Calling r16 - r28
600 stur (sp, lr), [r8-] @ Calling sp, lr
601 mov r8, bsr @ called from non-REAL mode
602 stw lr, [sp+], #S_PC @ Save calling PC
603 stw r8, [sp+], #S_PSR @ Save ASR
604 stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
608 * Get the system call number.
611 ldw.u scno, [ip] @ get SWI instruction
613 #ifdef CONFIG_ALIGNMENT_TRAP
614 ldw ip, __cr_alignment
616 movc p0.c1, ip, #0 @ update control register
621 ldw tbl, =sys_call_table @ load syscall table pointer
623 andn scno, scno, #0xff000000 @ mask off SWI op-code
624 andn scno, scno, #0x00ff0000 @ mask off SWI op-code
626 stm.w (r4, r5), [sp-] @ push fifth and sixth args
627 ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
628 cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
631 csub.a scno, #__NR_syscalls @ check upper syscall limit
632 adr lr, ret_fast_syscall @ return address
634 ldw pc, [tbl+], scno << #2 @ call sys_* routine
637 2: mov why, #0 @ no longer a real syscall
638 b sys_ni_syscall @ not private func
641 * This is the really slow path. We're going to be doing
642 * context switches, and waiting for our parent to respond.
647 mov r0, #0 @ trace entry [IP = 0]
650 adr lr, __sys_trace_return @ return address
651 mov scno, r0 @ syscall number (possibly new)
652 add r1, sp, #S_R0 + S_OFF @ pointer to regs
653 csub.a scno, #__NR_syscalls @ check upper syscall limit
655 ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
656 ldw pc, [tbl+], scno << #2 @ call sys_* routine
659 stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
662 mov r0, #1 @ trace exit [IP = 1]
667 #ifdef CONFIG_ALIGNMENT_TRAP
668 .type __cr_alignment, #object
685 ENTRY(sys_rt_sigreturn)
687 mov why, #0 @ prevent syscall restart handling
689 ENDPROC(sys_rt_sigreturn)
691 ENTRY(sys_sigaltstack)
692 ldw r2, [sp+], #S_OFF + S_SP
694 ENDPROC(sys_sigaltstack)
701 * This code is copied to 0xffff0200 so we can use branches in the
702 * vectors, rather than ldr's. Note that this code must not
703 * exceed 0x300 bytes.
705 * Common stub entry macro:
706 * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
708 * SP points to a minimal amount of processor-private memory, the address
709 * of which is copied into r0 for the mode specific abort handler.
711 .macro vector_stub, name, mode
716 @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
720 stw lr, [sp+], #4 @ save r0, lr
722 stw lr, [sp+], #8 @ save bsr
725 @ Prepare for PRIV mode. INTRs remain disabled.
728 xor r0, r0, #(\mode ^ PRIV_MODE)
732 @ the branch table must immediately follow this code
737 ldw lr, [pc+], lr << #2
738 mov.a pc, lr @ branch to handler in PRIV mode
739 ENDPROC(vector_\name)
741 @ handler addresses follow this label
747 * Interrupt dispatcher
749 vector_stub intr, INTR_MODE
751 .long __intr_user @ 0 (USER)
754 .long __intr_priv @ 3 (PRIV)
757 * Data abort dispatcher
758 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
760 vector_stub dabt, ABRT_MODE
762 .long __dabt_user @ 0 (USER)
764 .long __invalid @ 2 (INTR)
765 .long __dabt_priv @ 3 (PRIV)
768 * Prefetch abort dispatcher
769 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
771 vector_stub pabt, ABRT_MODE
773 .long __pabt_user @ 0 (USER)
775 .long __invalid @ 2 (INTR)
776 .long __pabt_priv @ 3 (PRIV)
779 * Undef instr entry dispatcher
780 * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
782 vector_stub extn, EXTN_MODE
784 .long __extn_user @ 0 (USER)
786 .long __invalid @ 2 (INTR)
787 .long __extn_priv @ 3 (PRIV)
790 * We group all the following data together to optimise
791 * for CPUs with separate I & D caches.
801 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
803 .globl __vectors_start
806 b vector_extn + stubs_offset
807 ldw pc, .LCvswi + stubs_offset
808 b vector_pabt + stubs_offset
809 b vector_dabt + stubs_offset
811 b vector_intr + stubs_offset
820 .globl cr_no_alignment