x86: cpa self-test, WARN_ON()
[wrt350n-kernel.git] / arch / avr32 / kernel / process.c
blobeaaa69bbdc38d5546b395311f29896a6a2c357db
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/sched.h>
9 #include <linux/module.h>
10 #include <linux/kallsyms.h>
11 #include <linux/fs.h>
12 #include <linux/ptrace.h>
13 #include <linux/reboot.h>
14 #include <linux/uaccess.h>
15 #include <linux/unistd.h>
17 #include <asm/sysreg.h>
18 #include <asm/ocd.h>
20 void (*pm_power_off)(void) = NULL;
21 EXPORT_SYMBOL(pm_power_off);
23 extern void cpu_idle_sleep(void);
26 * This file handles the architecture-dependent parts of process handling..
29 void cpu_idle(void)
31 /* endless idle loop with no priority at all */
32 while (1) {
33 while (!need_resched())
34 cpu_idle_sleep();
35 preempt_enable_no_resched();
36 schedule();
37 preempt_disable();
41 void machine_halt(void)
44 * Enter Stop mode. The 32 kHz oscillator will keep running so
45 * the RTC will keep the time properly and the system will
46 * boot quickly.
48 asm volatile("sleep 3\n\t"
49 "sub pc, -2");
52 void machine_power_off(void)
56 void machine_restart(char *cmd)
58 ocd_write(DC, (1 << OCD_DC_DBE_BIT));
59 ocd_write(DC, (1 << OCD_DC_RES_BIT));
60 while (1) ;
64 * PC is actually discarded when returning from a system call -- the
65 * return address must be stored in LR. This function will make sure
66 * LR points to do_exit before starting the thread.
68 * Also, when returning from fork(), r12 is 0, so we must copy the
69 * argument as well.
71 * r0 : The argument to the main thread function
72 * r1 : The address of do_exit
73 * r2 : The address of the main thread function
75 asmlinkage extern void kernel_thread_helper(void);
76 __asm__(" .type kernel_thread_helper, @function\n"
77 "kernel_thread_helper:\n"
78 " mov r12, r0\n"
79 " mov lr, r2\n"
80 " mov pc, r1\n"
81 " .size kernel_thread_helper, . - kernel_thread_helper");
83 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
85 struct pt_regs regs;
87 memset(&regs, 0, sizeof(regs));
89 regs.r0 = (unsigned long)arg;
90 regs.r1 = (unsigned long)fn;
91 regs.r2 = (unsigned long)do_exit;
92 regs.lr = (unsigned long)kernel_thread_helper;
93 regs.pc = (unsigned long)kernel_thread_helper;
94 regs.sr = MODE_SUPERVISOR;
96 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
97 0, &regs, 0, NULL, NULL);
99 EXPORT_SYMBOL(kernel_thread);
102 * Free current thread data structures etc
104 void exit_thread(void)
106 ocd_disable(current);
109 void flush_thread(void)
111 /* nothing to do */
114 void release_thread(struct task_struct *dead_task)
116 /* do nothing */
119 static void dump_mem(const char *str, const char *log_lvl,
120 unsigned long bottom, unsigned long top)
122 unsigned long p;
123 int i;
125 printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
127 for (p = bottom & ~31; p < top; ) {
128 printk("%s%04lx: ", log_lvl, p & 0xffff);
130 for (i = 0; i < 8; i++, p += 4) {
131 unsigned int val;
133 if (p < bottom || p >= top)
134 printk(" ");
135 else {
136 if (__get_user(val, (unsigned int __user *)p)) {
137 printk("\n");
138 goto out;
140 printk("%08x ", val);
143 printk("\n");
146 out:
147 return;
150 static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
152 return (p > (unsigned long)tinfo)
153 && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
156 #ifdef CONFIG_FRAME_POINTER
157 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
158 struct pt_regs *regs, const char *log_lvl)
160 unsigned long lr, fp;
161 struct thread_info *tinfo;
163 if (regs)
164 fp = regs->r7;
165 else if (tsk == current)
166 asm("mov %0, r7" : "=r"(fp));
167 else
168 fp = tsk->thread.cpu_context.r7;
171 * Walk the stack as long as the frame pointer (a) is within
172 * the kernel stack of the task, and (b) it doesn't move
173 * downwards.
175 tinfo = task_thread_info(tsk);
176 printk("%sCall trace:\n", log_lvl);
177 while (valid_stack_ptr(tinfo, fp)) {
178 unsigned long new_fp;
180 lr = *(unsigned long *)fp;
181 #ifdef CONFIG_KALLSYMS
182 printk("%s [<%08lx>] ", log_lvl, lr);
183 #else
184 printk(" [<%08lx>] ", lr);
185 #endif
186 print_symbol("%s\n", lr);
188 new_fp = *(unsigned long *)(fp + 4);
189 if (new_fp <= fp)
190 break;
191 fp = new_fp;
193 printk("\n");
195 #else
196 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
197 struct pt_regs *regs, const char *log_lvl)
199 unsigned long addr;
201 printk("%sCall trace:\n", log_lvl);
203 while (!kstack_end(sp)) {
204 addr = *sp++;
205 if (kernel_text_address(addr)) {
206 #ifdef CONFIG_KALLSYMS
207 printk("%s [<%08lx>] ", log_lvl, addr);
208 #else
209 printk(" [<%08lx>] ", addr);
210 #endif
211 print_symbol("%s\n", addr);
214 printk("\n");
216 #endif
218 void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
219 struct pt_regs *regs, const char *log_lvl)
221 struct thread_info *tinfo;
223 if (sp == 0) {
224 if (tsk)
225 sp = tsk->thread.cpu_context.ksp;
226 else
227 sp = (unsigned long)&tinfo;
229 if (!tsk)
230 tsk = current;
232 tinfo = task_thread_info(tsk);
234 if (valid_stack_ptr(tinfo, sp)) {
235 dump_mem("Stack: ", log_lvl, sp,
236 THREAD_SIZE + (unsigned long)tinfo);
237 show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
241 void show_stack(struct task_struct *tsk, unsigned long *stack)
243 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
246 void dump_stack(void)
248 unsigned long stack;
250 show_trace_log_lvl(current, &stack, NULL, "");
252 EXPORT_SYMBOL(dump_stack);
254 static const char *cpu_modes[] = {
255 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
256 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
259 void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
261 unsigned long sp = regs->sp;
262 unsigned long lr = regs->lr;
263 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
265 if (!user_mode(regs)) {
266 sp = (unsigned long)regs + FRAME_SIZE_FULL;
268 printk("%s", log_lvl);
269 print_symbol("PC is at %s\n", instruction_pointer(regs));
270 printk("%s", log_lvl);
271 print_symbol("LR is at %s\n", lr);
274 printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n"
275 "%ssp : %08lx r12: %08lx r11: %08lx\n",
276 log_lvl, instruction_pointer(regs), lr, print_tainted(),
277 log_lvl, sp, regs->r12, regs->r11);
278 printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n",
279 log_lvl, regs->r10, regs->r9, regs->r8);
280 printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
281 log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
282 printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
283 log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
284 printk("%sFlags: %c%c%c%c%c\n", log_lvl,
285 regs->sr & SR_Q ? 'Q' : 'q',
286 regs->sr & SR_V ? 'V' : 'v',
287 regs->sr & SR_N ? 'N' : 'n',
288 regs->sr & SR_Z ? 'Z' : 'z',
289 regs->sr & SR_C ? 'C' : 'c');
290 printk("%sMode bits: %c%c%c%c%c%c%c%c%c%c\n", log_lvl,
291 regs->sr & SR_H ? 'H' : 'h',
292 regs->sr & SR_J ? 'J' : 'j',
293 regs->sr & SR_DM ? 'M' : 'm',
294 regs->sr & SR_D ? 'D' : 'd',
295 regs->sr & SR_EM ? 'E' : 'e',
296 regs->sr & SR_I3M ? '3' : '.',
297 regs->sr & SR_I2M ? '2' : '.',
298 regs->sr & SR_I1M ? '1' : '.',
299 regs->sr & SR_I0M ? '0' : '.',
300 regs->sr & SR_GM ? 'G' : 'g');
301 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
302 printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
303 log_lvl, current->comm, current->pid, current,
304 task_thread_info(current));
307 void show_regs(struct pt_regs *regs)
309 unsigned long sp = regs->sp;
311 if (!user_mode(regs))
312 sp = (unsigned long)regs + FRAME_SIZE_FULL;
314 show_regs_log_lvl(regs, "");
315 show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
317 EXPORT_SYMBOL(show_regs);
319 /* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
320 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
322 /* Not valid */
323 return 0;
326 asmlinkage void ret_from_fork(void);
328 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
329 unsigned long unused,
330 struct task_struct *p, struct pt_regs *regs)
332 struct pt_regs *childregs;
334 childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
335 *childregs = *regs;
337 if (user_mode(regs))
338 childregs->sp = usp;
339 else
340 childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
342 childregs->r12 = 0; /* Set return value for child */
344 p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
345 p->thread.cpu_context.ksp = (unsigned long)childregs;
346 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
348 if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
349 ocd_enable(p);
351 return 0;
354 /* r12-r8 are dummy parameters to force the compiler to use the stack */
355 asmlinkage int sys_fork(struct pt_regs *regs)
357 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
360 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
361 unsigned long parent_tidptr,
362 unsigned long child_tidptr, struct pt_regs *regs)
364 if (!newsp)
365 newsp = regs->sp;
366 return do_fork(clone_flags, newsp, regs, 0,
367 (int __user *)parent_tidptr,
368 (int __user *)child_tidptr);
371 asmlinkage int sys_vfork(struct pt_regs *regs)
373 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs,
374 0, NULL, NULL);
377 asmlinkage int sys_execve(char __user *ufilename, char __user *__user *uargv,
378 char __user *__user *uenvp, struct pt_regs *regs)
380 int error;
381 char *filename;
383 filename = getname(ufilename);
384 error = PTR_ERR(filename);
385 if (IS_ERR(filename))
386 goto out;
388 error = do_execve(filename, uargv, uenvp, regs);
389 if (error == 0)
390 current->ptrace &= ~PT_DTRACE;
391 putname(filename);
393 out:
394 return error;
399 * This function is supposed to answer the question "who called
400 * schedule()?"
402 unsigned long get_wchan(struct task_struct *p)
404 unsigned long pc;
405 unsigned long stack_page;
407 if (!p || p == current || p->state == TASK_RUNNING)
408 return 0;
410 stack_page = (unsigned long)task_stack_page(p);
411 BUG_ON(!stack_page);
414 * The stored value of PC is either the address right after
415 * the call to __switch_to() or ret_from_fork.
417 pc = thread_saved_pc(p);
418 if (in_sched_functions(pc)) {
419 #ifdef CONFIG_FRAME_POINTER
420 unsigned long fp = p->thread.cpu_context.r7;
421 BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
422 pc = *(unsigned long *)fp;
423 #else
425 * We depend on the frame size of schedule here, which
426 * is actually quite ugly. It might be possible to
427 * determine the frame size automatically at build
428 * time by doing this:
429 * - compile sched.c
430 * - disassemble the resulting sched.o
431 * - look for 'sub sp,??' shortly after '<schedule>:'
433 unsigned long sp = p->thread.cpu_context.ksp + 16;
434 BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
435 pc = *(unsigned long *)sp;
436 #endif
439 return pc;