2 * arch/sh/kernel/process.c
4 * This file handles the architecture-dependent parts of process handling..
6 * Copyright (C) 1995 Linus Torvalds
8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
10 * Copyright (C) 2002 - 2007 Paul Mundt
12 #include <linux/module.h>
14 #include <linux/elfcore.h>
16 #include <linux/kallsyms.h>
17 #include <linux/kexec.h>
18 #include <linux/kdebug.h>
19 #include <linux/tick.h>
20 #include <linux/reboot.h>
22 #include <linux/preempt.h>
23 #include <asm/uaccess.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgalloc.h>
26 #include <asm/system.h>
29 static int hlt_counter
;
32 void (*pm_idle
)(void);
33 void (*pm_power_off
)(void);
34 EXPORT_SYMBOL(pm_power_off
);
36 void disable_hlt(void)
40 EXPORT_SYMBOL(disable_hlt
);
46 EXPORT_SYMBOL(enable_hlt
);
48 static int __init
nohlt_setup(char *__unused
)
53 __setup("nohlt", nohlt_setup
);
55 static int __init
hlt_setup(char *__unused
)
60 __setup("hlt", hlt_setup
);
62 void default_idle(void)
65 clear_thread_flag(TIF_POLLING_NRFLAG
);
66 smp_mb__after_clear_bit();
68 while (!need_resched())
71 set_thread_flag(TIF_POLLING_NRFLAG
);
73 while (!need_resched())
79 set_thread_flag(TIF_POLLING_NRFLAG
);
81 /* endless idle loop with no priority at all */
83 void (*idle
)(void) = pm_idle
;
88 tick_nohz_stop_sched_tick();
89 while (!need_resched())
91 tick_nohz_restart_sched_tick();
93 preempt_enable_no_resched();
100 void machine_restart(char * __unused
)
102 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
103 asm volatile("ldc %0, sr\n\t"
104 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
107 void machine_halt(void)
115 void machine_power_off(void)
121 void show_regs(struct pt_regs
* regs
)
124 printk("Pid : %d, Comm: %20s\n", task_pid_nr(current
), current
->comm
);
125 print_symbol("PC is at %s\n", instruction_pointer(regs
));
126 printk("PC : %08lx SP : %08lx SR : %08lx ",
127 regs
->pc
, regs
->regs
[15], regs
->sr
);
129 printk("TEA : %08x ", ctrl_inl(MMU_TEA
));
133 printk("%s\n", print_tainted());
135 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
136 regs
->regs
[0],regs
->regs
[1],
137 regs
->regs
[2],regs
->regs
[3]);
138 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
139 regs
->regs
[4],regs
->regs
[5],
140 regs
->regs
[6],regs
->regs
[7]);
141 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
142 regs
->regs
[8],regs
->regs
[9],
143 regs
->regs
[10],regs
->regs
[11]);
144 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
145 regs
->regs
[12],regs
->regs
[13],
147 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
148 regs
->mach
, regs
->macl
, regs
->gbr
, regs
->pr
);
150 show_trace(NULL
, (unsigned long *)regs
->regs
[15], regs
);
154 * Create a kernel thread
158 * This is the mechanism for creating a new kernel thread.
161 extern void kernel_thread_helper(void);
163 "kernel_thread_helper:\n\t"
172 /* Don't use this in BL=1(cli). Or else, CPU resets! */
173 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
177 memset(®s
, 0, sizeof(regs
));
178 regs
.regs
[4] = (unsigned long)arg
;
179 regs
.regs
[5] = (unsigned long)fn
;
181 regs
.pc
= (unsigned long)kernel_thread_helper
;
184 /* Ok, create the new process.. */
185 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0,
186 ®s
, 0, NULL
, NULL
);
190 * Free current thread data structures etc..
192 void exit_thread(void)
194 if (current
->thread
.ubc_pc
) {
195 current
->thread
.ubc_pc
= 0;
200 void flush_thread(void)
202 #if defined(CONFIG_SH_FPU)
203 struct task_struct
*tsk
= current
;
204 /* Forget lazy FPU state */
205 clear_fpu(tsk
, task_pt_regs(tsk
));
210 void release_thread(struct task_struct
*dead_task
)
215 /* Fill in the fpu structure for a core dump.. */
216 int dump_fpu(struct pt_regs
*regs
, elf_fpregset_t
*fpu
)
220 #if defined(CONFIG_SH_FPU)
221 struct task_struct
*tsk
= current
;
223 fpvalid
= !!tsk_used_math(tsk
);
225 unlazy_fpu(tsk
, regs
);
226 memcpy(fpu
, &tsk
->thread
.fpu
.hard
, sizeof(*fpu
));
233 asmlinkage
void ret_from_fork(void);
235 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long usp
,
236 unsigned long unused
,
237 struct task_struct
*p
, struct pt_regs
*regs
)
239 struct thread_info
*ti
= task_thread_info(p
);
240 struct pt_regs
*childregs
;
241 #if defined(CONFIG_SH_FPU)
242 struct task_struct
*tsk
= current
;
244 unlazy_fpu(tsk
, regs
);
245 p
->thread
.fpu
= tsk
->thread
.fpu
;
246 copy_to_stopped_child_used_math(p
);
249 childregs
= task_pt_regs(p
);
252 if (user_mode(regs
)) {
253 childregs
->regs
[15] = usp
;
254 ti
->addr_limit
= USER_DS
;
256 childregs
->regs
[15] = (unsigned long)childregs
;
257 ti
->addr_limit
= KERNEL_DS
;
260 if (clone_flags
& CLONE_SETTLS
)
261 childregs
->gbr
= childregs
->regs
[0];
263 childregs
->regs
[0] = 0; /* Set return value for child */
265 p
->thread
.sp
= (unsigned long) childregs
;
266 p
->thread
.pc
= (unsigned long) ret_from_fork
;
268 p
->thread
.ubc_pc
= 0;
273 /* Tracing by user break controller. */
274 static void ubc_set_tracing(int asid
, unsigned long pc
)
276 #if defined(CONFIG_CPU_SH4A)
279 val
= (UBC_CBR_ID_INST
| UBC_CBR_RW_READ
| UBC_CBR_CE
);
280 val
|= (UBC_CBR_AIE
| UBC_CBR_AIV_SET(asid
));
282 ctrl_outl(val
, UBC_CBR0
);
283 ctrl_outl(pc
, UBC_CAR0
);
284 ctrl_outl(0x0, UBC_CAMR0
);
285 ctrl_outl(0x0, UBC_CBCR
);
287 val
= (UBC_CRR_RES
| UBC_CRR_PCB
| UBC_CRR_BIE
);
288 ctrl_outl(val
, UBC_CRR0
);
290 /* Read UBC register that we wrote last, for checking update */
291 val
= ctrl_inl(UBC_CRR0
);
293 #else /* CONFIG_CPU_SH4A */
294 ctrl_outl(pc
, UBC_BARA
);
297 ctrl_outb(asid
, UBC_BASRA
);
300 ctrl_outl(0, UBC_BAMRA
);
302 if (current_cpu_data
.type
== CPU_SH7729
||
303 current_cpu_data
.type
== CPU_SH7710
||
304 current_cpu_data
.type
== CPU_SH7712
) {
305 ctrl_outw(BBR_INST
| BBR_READ
| BBR_CPU
, UBC_BBRA
);
306 ctrl_outl(BRCR_PCBA
| BRCR_PCTE
, UBC_BRCR
);
308 ctrl_outw(BBR_INST
| BBR_READ
, UBC_BBRA
);
309 ctrl_outw(BRCR_PCBA
, UBC_BRCR
);
311 #endif /* CONFIG_CPU_SH4A */
315 * switch_to(x,y) should switch tasks from x to y.
318 struct task_struct
*__switch_to(struct task_struct
*prev
,
319 struct task_struct
*next
)
321 #if defined(CONFIG_SH_FPU)
322 unlazy_fpu(prev
, task_pt_regs(prev
));
327 * Restore the kernel mode register
330 asm volatile("ldc %0, r7_bank"
332 : "r" (task_thread_info(next
)));
335 /* If no tasks are using the UBC, we're done */
336 if (ubc_usercnt
== 0)
337 /* If no tasks are using the UBC, we're done */;
338 else if (next
->thread
.ubc_pc
&& next
->mm
) {
341 asid
|= cpu_asid(smp_processor_id(), next
->mm
);
343 ubc_set_tracing(asid
, next
->thread
.ubc_pc
);
345 #if defined(CONFIG_CPU_SH4A)
346 ctrl_outl(UBC_CBR_INIT
, UBC_CBR0
);
347 ctrl_outl(UBC_CRR_INIT
, UBC_CRR0
);
349 ctrl_outw(0, UBC_BBRA
);
350 ctrl_outw(0, UBC_BBRB
);
357 asmlinkage
int sys_fork(unsigned long r4
, unsigned long r5
,
358 unsigned long r6
, unsigned long r7
,
359 struct pt_regs __regs
)
362 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
363 return do_fork(SIGCHLD
, regs
->regs
[15], regs
, 0, NULL
, NULL
);
365 /* fork almost works, enough to trick you into looking elsewhere :-( */
370 asmlinkage
int sys_clone(unsigned long clone_flags
, unsigned long newsp
,
371 unsigned long parent_tidptr
,
372 unsigned long child_tidptr
,
373 struct pt_regs __regs
)
375 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
377 newsp
= regs
->regs
[15];
378 return do_fork(clone_flags
, newsp
, regs
, 0,
379 (int __user
*)parent_tidptr
,
380 (int __user
*)child_tidptr
);
384 * This is trivial, and on the face of it looks like it
385 * could equally well be done in user mode.
387 * Not so, for quite unobvious reasons - register pressure.
388 * In user mode vfork() cannot have a stack frame, and if
389 * done by calling the "clone()" system call directly, you
390 * do not have enough call-clobbered registers to hold all
391 * the information you need.
393 asmlinkage
int sys_vfork(unsigned long r4
, unsigned long r5
,
394 unsigned long r6
, unsigned long r7
,
395 struct pt_regs __regs
)
397 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
398 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->regs
[15], regs
,
403 * sys_execve() executes a new program.
405 asmlinkage
int sys_execve(char __user
*ufilename
, char __user
* __user
*uargv
,
406 char __user
* __user
*uenvp
, unsigned long r7
,
407 struct pt_regs __regs
)
409 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
413 filename
= getname(ufilename
);
414 error
= PTR_ERR(filename
);
415 if (IS_ERR(filename
))
418 error
= do_execve(filename
, uargv
, uenvp
, regs
);
421 current
->ptrace
&= ~PT_DTRACE
;
422 task_unlock(current
);
429 unsigned long get_wchan(struct task_struct
*p
)
433 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
437 * The same comment as on the Alpha applies here, too ...
439 pc
= thread_saved_pc(p
);
441 #ifdef CONFIG_FRAME_POINTER
442 if (in_sched_functions(pc
)) {
443 unsigned long schedule_frame
= (unsigned long)p
->thread
.sp
;
444 return ((unsigned long *)schedule_frame
)[21];
451 asmlinkage
void break_point_trap(void)
454 #if defined(CONFIG_CPU_SH4A)
455 ctrl_outl(UBC_CBR_INIT
, UBC_CBR0
);
456 ctrl_outl(UBC_CRR_INIT
, UBC_CRR0
);
458 ctrl_outw(0, UBC_BBRA
);
459 ctrl_outw(0, UBC_BBRB
);
461 current
->thread
.ubc_pc
= 0;
464 force_sig(SIGTRAP
, current
);