2 * linux/arch/m68knommu/platform/68328/entry.S
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file README.legal in the main directory of this archive
10 * Linux/m68k support by Hamish Macdonald
13 #include <linux/sys.h>
14 #include <linux/linkage.h>
15 #include <asm/thread_info.h>
16 #include <asm/unistd.h>
17 #include <asm/errno.h>
18 #include <asm/setup.h>
19 #include <asm/segment.h>
20 #include <asm/traps.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/entry.h>
28 .globl ret_from_exception
29 .globl ret_from_signal
31 .globl ret_from_interrupt
42 movel #-ENOSYS,%sp@(PT_D0)
43 jra ret_from_exception
46 movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/
52 movel %sp@(PT_ORIG_D0),%d1
57 lea sys_call_table, %a0
60 1: movel %d0,%sp@(PT_D0) /* save the return value */
61 subql #4,%sp /* dummy return address */
68 jra ret_from_exception
73 /* save top of frame*/
78 movel %sp@(PT_ORIG_D0),%d0
80 movel %sp,%d1 /* get thread_info pointer */
81 andl #-THREAD_SIZE,%d1
83 btst #TIF_SYSCALL_TRACE,%a2@(TI_FLAGS)
88 lea sys_call_table,%a0
91 movel %d0,%sp@(PT_D0) /* save the return value*/
94 btst #5,%sp@(PT_SR) /* check if returning to kernel*/
95 jeq Luser_return /* if so, skip resched, signals*/
101 /* only allow interrupts when we are really the last one on the*/
102 /* kernel stack, otherwise stack overflow can occur during*/
103 /* heavy interrupt load*/
106 movel %sp,%d1 /* get thread_info pointer */
107 andl #-THREAD_SIZE,%d1
109 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
110 andl #_TIF_WORK_MASK,%d1
115 movel %a2@(TI_FLAGS),%d1 /* thread_info->flags */
116 btst #TIF_NEED_RESCHED,%d1
120 subql #4,%sp /* dummy return address*/
122 pea %sp@(SWITCH_STACK_SIZE)
132 * This is the main interrupt handler, responsible for calling process_int()
136 addql #1,local_irq_count /* put exception # in d0*/
137 movew %sp@(PT_VECTOR), %d0
141 movel #65,%sp@- /* put vector # on stack*/
142 jbsr process_int /* process the IRQ*/
143 3: addql #8,%sp /* pop parameters off stack*/
144 bra ret_from_interrupt
148 addql #1,local_irq_count /* put exception # in d0*/
149 movew %sp@(PT_VECTOR), %d0
153 movel #66,%sp@- /* put vector # on stack*/
154 jbsr process_int /* process the IRQ*/
155 3: addql #8,%sp /* pop parameters off stack*/
156 bra ret_from_interrupt
160 addql #1,local_irq_count /* put exception # in d0*/
161 movew %sp@(PT_VECTOR), %d0
165 movel #67,%sp@- /* put vector # on stack*/
166 jbsr process_int /* process the IRQ*/
167 3: addql #8,%sp /* pop parameters off stack*/
168 bra ret_from_interrupt
172 addql #1,local_irq_count /* put exception # in d0*/
173 movew %sp@(PT_VECTOR), %d0
177 movel #68,%sp@- /* put vector # on stack*/
178 jbsr process_int /* process the IRQ*/
179 3: addql #8,%sp /* pop parameters off stack*/
180 bra ret_from_interrupt
184 addql #1,local_irq_count /* put exception # in d0*/
185 movew %sp@(PT_VECTOR), %d0
189 movel #69,%sp@- /* put vector # on stack*/
190 jbsr process_int /* process the IRQ*/
191 3: addql #8,%sp /* pop parameters off stack*/
192 bra ret_from_interrupt
196 addql #1,local_irq_count /* put exception # in d0*/
197 movew %sp@(PT_VECTOR), %d0
201 movel #70,%sp@- /* put vector # on stack*/
202 jbsr process_int /* process the IRQ*/
203 3: addql #8,%sp /* pop parameters off stack*/
204 bra ret_from_interrupt
208 addql #1,local_irq_count /* put exception # in d0*/
209 movew %sp@(PT_VECTOR), %d0
213 movel #71,%sp@- /* put vector # on stack*/
214 jbsr process_int /* process the IRQ*/
215 3: addql #8,%sp /* pop parameters off stack*/
216 bra ret_from_interrupt
220 addql #1,local_irq_count /* put exception # in d0*/
221 movew %sp@(PT_VECTOR), %d0
225 movel %d0,%sp@- /* put vector # on stack*/
226 jbsr process_int /* process the IRQ*/
227 3: addql #8,%sp /* pop parameters off stack*/
228 bra ret_from_interrupt
231 subql #1,local_irq_count
236 moveb %sp@(PT_SR), %d0
240 /* check if we need to do software interrupts */
241 movel local_irq_count,%d0
242 jeq ret_from_exception
244 pea ret_from_exception
249 * Handler for uninitialized and spurious interrupts.
252 addql #1,num_spurious
256 * Beware - when entering resume, prev (the current task) is
257 * in a0, next (the new task) is in a1,so don't change these
258 * registers until their contents are no longer needed.
261 movel %a0,%d1 /* save prev thread in d1 */
262 movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
263 movel %usp,%a2 /* save usp */
264 movel %a2,%a0@(TASK_THREAD+THREAD_USP)
267 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
268 movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
271 movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
273 movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */