3 * linux/arch/m68k/kernel/entry.S
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file README.legal in the main directory of this archive
11 * Linux/m68k support by Hamish Macdonald
13 * 68060 fixes by Jesper Skov
18 * entry.S contains the system-call and fault low-level handling routines.
19 * This also contains the timer-interrupt handler, as well as all interrupts
20 * and faults that can result in a task-switch.
22 * NOTE: This code handles signal-recognition, which happens every time
23 * after a timer-interrupt and after each system call.
28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
29 * all pointers that used to be 'current' are now entry
30 * number 0 in the 'current_set' list.
32 * 6/05/00 RZ: addedd writeback completion after return from sighandler
36 #include <linux/linkage.h>
37 #include <asm/errno.h>
38 #include <asm/setup.h>
39 #include <asm/segment.h>
40 #include <asm/traps.h>
41 #include <asm/unistd.h>
42 #include <asm/asm-offsets.h>
43 #include <asm/entry.h>
45 .globl system_call, buserr, trap, resume
47 .globl sys_fork, sys_clone, sys_vfork
48 .globl ret_from_interrupt, bad_interrupt
49 .globl auto_irqhandler_fixup
50 .globl user_irqvec_fixup
55 pea %sp@(SWITCH_STACK_SIZE)
63 pea %sp@(SWITCH_STACK_SIZE)
71 pea %sp@(SWITCH_STACK_SIZE)
83 ENTRY(sys_rt_sigreturn)
92 movel %sp,%sp@- | stack frame pointer argument
95 jra ret_from_exception
100 movel %sp,%sp@- | stack frame pointer argument
103 jra ret_from_exception
105 | After a fork we jump here directly from resume,
106 | so that %d1 contains the previous task
107 | schedule_tail now used regardless of CONFIG_SMP
112 jra ret_from_exception
114 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
116 #ifdef TRAP_DBG_INTERRUPT
122 movel %sp,%sp@- /* stack frame pointer argument */
125 jra ret_from_exception
129 /* save top of frame */
133 pea ret_from_exception
136 ENTRY(ret_from_user_signal)
137 moveq #__NR_sigreturn,%d0
140 ENTRY(ret_from_user_rt_signal)
141 movel #__NR_rt_sigreturn,%d0
147 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
153 movel %sp@(PT_OFF_ORIG_D0),%d0
154 cmpl #NR_syscalls,%d0
157 movel #-ENOSYS,%sp@(PT_OFF_D0)
166 jra .Lret_from_exception
168 ENTRY(ret_from_signal)
169 movel %curptr@(TASK_STACK),%a1
170 tstb %a1@(TINFO_FLAGS+2)
173 1: RESTORE_SWITCH_STACK
175 /* on 68040 complete pending writebacks if any */
177 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
178 subql #7,%d0 | bus error frame ?
185 jra .Lret_from_exception
194 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
197 tstb %a1@(TINFO_FLAGS+2)
199 cmpl #NR_syscalls,%d0
202 jbsr @(sys_call_table,%d0:l:4)@(0)
203 movel %d0,%sp@(PT_OFF_D0) | save the return value
206 movel %curptr@(TASK_STACK),%a1
207 movew %a1@(TINFO_FLAGS+2),%d0
208 jne syscall_exit_work
212 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
213 bnes 1b | if so, skip resched, signals
223 ENTRY(ret_from_exception)
224 .Lret_from_exception:
225 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
226 bnes 1f | if so, skip resched, signals
227 | only allow interrupts when we are really the last one on the
228 | kernel stack, otherwise stack overflow can occur during
229 | heavy interrupt load
233 movel %curptr@(TASK_STACK),%a1
234 moveb %a1@(TINFO_FLAGS+3),%d0
240 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
249 subql #4,%sp | dummy return address
251 pea %sp@(SWITCH_STACK_SIZE)
252 bsrl do_notify_resume
256 jbra resume_userspace
259 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
266 jbra resume_userspace
269 /* This is the main interrupt handler for autovector interrupts */
271 ENTRY(auto_inthandler)
275 addqb #1,%a1@(TINFO_PREEMPT+1)
276 | put exception # in d0
277 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
281 movel %d0,%sp@- | put vector # on stack
282 auto_irqhandler_fixup = . + 2
283 jsr do_IRQ | process the IRQ
284 addql #8,%sp | pop parameters off stack
287 movel %curptr@(TASK_STACK),%a1
288 subqb #1,%a1@(TINFO_PREEMPT+1)
289 jeq ret_from_last_interrupt
293 ret_from_last_interrupt:
294 moveq #(~ALLOWINT>>8)&0xff,%d0
295 andb %sp@(PT_OFF_SR),%d0
298 /* check if we need to do software interrupts */
299 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
300 jeq .Lret_from_exception
301 pea ret_from_exception
304 /* Handler for user defined interrupt vectors */
306 ENTRY(user_inthandler)
310 addqb #1,%a1@(TINFO_PREEMPT+1)
311 | put exception # in d0
312 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
313 user_irqvec_fixup = . + 2
317 movel %d0,%sp@- | put vector # on stack
318 jsr do_IRQ | process the IRQ
319 addql #8,%sp | pop parameters off stack
321 movel %curptr@(TASK_STACK),%a1
322 subqb #1,%a1@(TINFO_PREEMPT+1)
323 jeq ret_from_last_interrupt
326 /* Handler for uninitialized and spurious interrupts */
328 ENTRY(bad_inthandler)
332 addqb #1,%a1@(TINFO_PREEMPT+1)
338 movel %curptr@(TASK_STACK),%a1
339 subqb #1,%a1@(TINFO_PREEMPT+1)
340 jeq ret_from_last_interrupt
346 * Beware - when entering resume, prev (the current task) is
347 * in a0, next (the new task) is in a1,so don't change these
348 * registers until their contents are no longer needed.
352 movew %sr,%a0@(TASK_THREAD+THREAD_SR)
354 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
356 movew %d0,%a0@(TASK_THREAD+THREAD_FS)
359 /* it is better to use a movel here instead of a movew 8*) */
361 movel %d0,%a0@(TASK_THREAD+THREAD_USP)
363 /* save non-scratch registers on stack */
366 /* save current kernel stack pointer */
367 movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
369 /* save floating point context */
370 #ifndef CONFIG_M68KFPU_EMU_ONLY
371 #ifdef CONFIG_M68KFPU_EMU
375 fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
377 #if defined(CONFIG_M68060)
378 #if !defined(CPU_M68060_ONLY)
379 btst #3,m68k_cputype+3
382 /* The 060 FPU keeps status in bits 15-8 of the first longword */
383 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
385 #if !defined(CPU_M68060_ONLY)
388 #endif /* CONFIG_M68060 */
389 #if !defined(CPU_M68060_ONLY)
390 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
393 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
394 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
396 #endif /* CONFIG_M68KFPU_EMU_ONLY */
397 /* Return previous task in %d1 */
400 /* switch to new task (a1 contains new task) */
403 /* restore floating point context */
404 #ifndef CONFIG_M68KFPU_EMU_ONLY
405 #ifdef CONFIG_M68KFPU_EMU
409 #if defined(CONFIG_M68060)
410 #if !defined(CPU_M68060_ONLY)
411 btst #3,m68k_cputype+3
414 /* The 060 FPU keeps status in bits 15-8 of the first longword */
415 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
417 #if !defined(CPU_M68060_ONLY)
420 #endif /* CONFIG_M68060 */
421 #if !defined(CPU_M68060_ONLY)
422 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
425 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
426 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
427 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
429 #endif /* CONFIG_M68KFPU_EMU_ONLY */
431 /* restore the kernel stack pointer */
432 movel %a1@(TASK_THREAD+THREAD_KSP),%sp
434 /* restore non-scratch registers */
437 /* restore user stack pointer */
438 movel %a1@(TASK_THREAD+THREAD_USP),%a0
441 /* restore fs (sfc,%dfc) */
442 movew %a1@(TASK_THREAD+THREAD_FS),%a0
446 /* restore status register */
447 movew %a1@(TASK_THREAD+THREAD_SR),%sr
451 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */