2 * linux/arch/m68k/kernel/process.c
4 * Copyright (C) 1995 Hamish Macdonald
6 * 68060 fixes by Jesper Skov
10 * This file handles the architecture-dependent parts of process handling..
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
18 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/user.h>
25 #include <linux/reboot.h>
26 #include <linux/init_task.h>
27 #include <linux/mqueue.h>
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <asm/traps.h>
32 #include <asm/machdep.h>
33 #include <asm/setup.h>
34 #include <asm/pgtable.h>
37 asmlinkage
void ret_from_fork(void);
41 * Return saved PC from a blocked thread
43 unsigned long thread_saved_pc(struct task_struct
*tsk
)
45 struct switch_stack
*sw
= (struct switch_stack
*)tsk
->thread
.ksp
;
46 /* Check whether the thread is blocked in resume() */
47 if (in_sched_functions(sw
->retpc
))
48 return ((unsigned long *)sw
->a6
)[1];
54 * The idle loop on an m68k..
56 static void default_idle(void)
59 #if defined(MACH_ATARI_ONLY)
60 /* block out HSYNC on the atari (falcon) */
61 __asm__("stop #0x2200" : : : "cc");
63 __asm__("stop #0x2000" : : : "cc");
67 void (*idle
)(void) = default_idle
;
70 * The idle thread. There's no useful work to be
71 * done, so just try to conserve power and have a
72 * low exit latency (ie sit in a loop waiting for
73 * somebody to say that they'd like to reschedule)
77 /* endless idle loop with no priority at all */
79 while (!need_resched())
81 preempt_enable_no_resched();
87 void machine_restart(char * __unused
)
94 void machine_halt(void)
101 void machine_power_off(void)
108 void (*pm_power_off
)(void) = machine_power_off
;
109 EXPORT_SYMBOL(pm_power_off
);
111 void show_regs(struct pt_regs
* regs
)
114 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
115 regs
->format
, regs
->vector
, regs
->pc
, regs
->sr
, print_tainted());
116 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
117 regs
->orig_d0
, regs
->d0
, regs
->a2
, regs
->a1
);
118 printk("A0: %08lx D5: %08lx D4: %08lx\n",
119 regs
->a0
, regs
->d5
, regs
->d4
);
120 printk("D3: %08lx D2: %08lx D1: %08lx\n",
121 regs
->d3
, regs
->d2
, regs
->d1
);
122 if (!(regs
->sr
& PS_S
))
123 printk("USP: %08lx\n", rdusp());
127 * Create a kernel thread
129 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
138 register long retval
__asm__ ("d0");
139 register long clone_arg
__asm__ ("d1") = flags
| CLONE_VM
| CLONE_UNTRACED
;
144 "trap #0\n\t" /* Linux/m68k system call */
145 "tstl %0\n\t" /* child or parent */
146 "jne 1f\n\t" /* parent - jump */
147 "lea %%sp@(%c7),%6\n\t" /* reload current */
149 "movel %3,%%sp@-\n\t" /* push argument */
150 "jsr %4@\n\t" /* call fn */
151 "movel %0,%%d1\n\t" /* pass exit value */
152 "movel %2,%%d0\n\t" /* exit */
156 : "i" (__NR_clone
), "i" (__NR_exit
),
157 "r" (arg
), "a" (fn
), "d" (clone_arg
), "r" (current
),
167 EXPORT_SYMBOL(kernel_thread
);
169 void flush_thread(void)
171 unsigned long zero
= 0;
173 current
->thread
.fs
= __USER_DS
;
175 asm volatile("frestore %0": :"m" (zero
));
179 * "m68k_fork()".. By the time we get here, the
180 * non-volatile registers have also been saved on the
181 * stack. We do some ugly pointer stuff here.. (see
185 asmlinkage
int m68k_fork(struct pt_regs
*regs
)
187 return do_fork(SIGCHLD
, rdusp(), regs
, 0, NULL
, NULL
);
190 asmlinkage
int m68k_vfork(struct pt_regs
*regs
)
192 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, rdusp(), regs
, 0,
196 asmlinkage
int m68k_clone(struct pt_regs
*regs
)
198 unsigned long clone_flags
;
200 int __user
*parent_tidptr
, *child_tidptr
;
202 /* syscall2 puts clone_flags in d1 and usp in d2 */
203 clone_flags
= regs
->d1
;
205 parent_tidptr
= (int __user
*)regs
->d3
;
206 child_tidptr
= (int __user
*)regs
->d4
;
209 return do_fork(clone_flags
, newsp
, regs
, 0,
210 parent_tidptr
, child_tidptr
);
213 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
214 unsigned long unused
,
215 struct task_struct
* p
, struct pt_regs
* regs
)
217 struct pt_regs
* childregs
;
218 struct switch_stack
* childstack
, *stack
;
221 childregs
= (struct pt_regs
*) (task_stack_page(p
) + THREAD_SIZE
) - 1;
226 retp
= ((unsigned long *) regs
);
227 stack
= ((struct switch_stack
*) retp
) - 1;
229 childstack
= ((struct switch_stack
*) childregs
) - 1;
230 *childstack
= *stack
;
231 childstack
->retpc
= (unsigned long)ret_from_fork
;
234 p
->thread
.ksp
= (unsigned long)childstack
;
236 if (clone_flags
& CLONE_SETTLS
)
237 task_thread_info(p
)->tp_value
= regs
->d5
;
240 * Must save the current SFC/DFC value, NOT the value when
241 * the parent was last descheduled - RGH 10-08-96
243 p
->thread
.fs
= get_fs().seg
;
246 /* Copy the current fpu state */
247 asm volatile ("fsave %0" : : "m" (p
->thread
.fpstate
[0]) : "memory");
249 if (!CPU_IS_060
? p
->thread
.fpstate
[0] : p
->thread
.fpstate
[2]) {
250 if (CPU_IS_COLDFIRE
) {
251 asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
252 "fmovel %/fpiar,%1\n\t"
253 "fmovel %/fpcr,%2\n\t"
256 : "m" (p
->thread
.fp
[0]),
257 "m" (p
->thread
.fpcntl
[0]),
258 "m" (p
->thread
.fpcntl
[1]),
259 "m" (p
->thread
.fpcntl
[2])
262 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
263 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
265 : "m" (p
->thread
.fp
[0]),
266 "m" (p
->thread
.fpcntl
[0])
271 /* Restore the state in case the fpu was busy */
272 asm volatile ("frestore %0" : : "m" (p
->thread
.fpstate
[0]));
278 /* Fill in the fpu structure for a core dump. */
280 int dump_fpu (struct pt_regs
*regs
, struct user_m68kfp_struct
*fpu
)
287 memcpy(fpu
->fpcntl
, current
->thread
.fpcntl
, 12);
288 memcpy(fpu
->fpregs
, current
->thread
.fp
, 96);
289 /* Convert internal fpu reg representation
290 * into long double format
292 for (i
= 0; i
< 24; i
+= 3)
293 fpu
->fpregs
[i
] = ((fpu
->fpregs
[i
] & 0xffff0000) << 15) |
294 ((fpu
->fpregs
[i
] & 0x0000ffff) << 16);
298 /* First dump the fpu context to avoid protocol violation. */
299 asm volatile ("fsave %0" :: "m" (fpustate
[0]) : "memory");
300 if (!CPU_IS_060
? !fpustate
[0] : !fpustate
[2])
303 if (CPU_IS_COLDFIRE
) {
304 asm volatile ("fmovel %/fpiar,%0\n\t"
305 "fmovel %/fpcr,%1\n\t"
306 "fmovel %/fpsr,%2\n\t"
307 "fmovemd %/fp0-%/fp7,%3"
309 : "m" (fpu
->fpcntl
[0]),
310 "m" (fpu
->fpcntl
[1]),
311 "m" (fpu
->fpcntl
[2]),
315 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
317 : "m" (fpu
->fpcntl
[0])
319 asm volatile ("fmovemx %/fp0-%/fp7,%0"
321 : "m" (fpu
->fpregs
[0])
327 EXPORT_SYMBOL(dump_fpu
);
330 * sys_execve() executes a new program.
332 asmlinkage
int sys_execve(const char __user
*name
,
333 const char __user
*const __user
*argv
,
334 const char __user
*const __user
*envp
)
338 struct pt_regs
*regs
= (struct pt_regs
*) &name
;
340 filename
= getname(name
);
341 error
= PTR_ERR(filename
);
342 if (IS_ERR(filename
))
344 error
= do_execve(filename
, argv
, envp
, regs
);
349 unsigned long get_wchan(struct task_struct
*p
)
351 unsigned long fp
, pc
;
352 unsigned long stack_page
;
354 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
357 stack_page
= (unsigned long)task_stack_page(p
);
358 fp
= ((struct switch_stack
*)p
->thread
.ksp
)->a6
;
360 if (fp
< stack_page
+sizeof(struct thread_info
) ||
361 fp
>= 8184+stack_page
)
363 pc
= ((unsigned long *)fp
)[1];
364 if (!in_sched_functions(pc
))
366 fp
= *(unsigned long *) fp
;
367 } while (count
++ < 16);