2 * arch/sh/kernel/process.c
4 * This file handles the architecture-dependent parts of process handling..
6 * Copyright (C) 1995 Linus Torvalds
8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
10 * Copyright (C) 2002 - 2007 Paul Mundt
12 #include <linux/module.h>
14 #include <linux/elfcore.h>
16 #include <linux/kallsyms.h>
17 #include <linux/kexec.h>
18 #include <linux/kdebug.h>
19 #include <linux/tick.h>
20 #include <linux/reboot.h>
22 #include <linux/preempt.h>
23 #include <asm/uaccess.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgalloc.h>
26 #include <asm/system.h>
30 static int hlt_counter
;
33 void (*pm_idle
)(void);
34 void (*pm_power_off
)(void);
35 EXPORT_SYMBOL(pm_power_off
);
37 void disable_hlt(void)
41 EXPORT_SYMBOL(disable_hlt
);
47 EXPORT_SYMBOL(enable_hlt
);
49 static int __init
nohlt_setup(char *__unused
)
54 __setup("nohlt", nohlt_setup
);
56 static int __init
hlt_setup(char *__unused
)
61 __setup("hlt", hlt_setup
);
63 void default_idle(void)
66 clear_thread_flag(TIF_POLLING_NRFLAG
);
67 smp_mb__after_clear_bit();
69 while (!need_resched())
72 set_thread_flag(TIF_POLLING_NRFLAG
);
74 while (!need_resched())
80 set_thread_flag(TIF_POLLING_NRFLAG
);
82 /* endless idle loop with no priority at all */
84 void (*idle
)(void) = pm_idle
;
89 tick_nohz_stop_sched_tick();
90 while (!need_resched())
92 tick_nohz_restart_sched_tick();
94 preempt_enable_no_resched();
101 void machine_restart(char * __unused
)
103 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
104 asm volatile("ldc %0, sr\n\t"
105 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
108 void machine_halt(void)
116 void machine_power_off(void)
122 void show_regs(struct pt_regs
* regs
)
125 printk("Pid : %d, Comm: %20s\n", task_pid_nr(current
), current
->comm
);
126 print_symbol("PC is at %s\n", instruction_pointer(regs
));
127 printk("PC : %08lx SP : %08lx SR : %08lx ",
128 regs
->pc
, regs
->regs
[15], regs
->sr
);
130 printk("TEA : %08x ", ctrl_inl(MMU_TEA
));
134 printk("%s\n", print_tainted());
136 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
137 regs
->regs
[0],regs
->regs
[1],
138 regs
->regs
[2],regs
->regs
[3]);
139 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
140 regs
->regs
[4],regs
->regs
[5],
141 regs
->regs
[6],regs
->regs
[7]);
142 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
143 regs
->regs
[8],regs
->regs
[9],
144 regs
->regs
[10],regs
->regs
[11]);
145 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
146 regs
->regs
[12],regs
->regs
[13],
148 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
149 regs
->mach
, regs
->macl
, regs
->gbr
, regs
->pr
);
151 show_trace(NULL
, (unsigned long *)regs
->regs
[15], regs
);
155 * Create a kernel thread
159 * This is the mechanism for creating a new kernel thread.
162 extern void kernel_thread_helper(void);
164 "kernel_thread_helper:\n\t"
173 /* Don't use this in BL=1(cli). Or else, CPU resets! */
174 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
178 memset(®s
, 0, sizeof(regs
));
179 regs
.regs
[4] = (unsigned long)arg
;
180 regs
.regs
[5] = (unsigned long)fn
;
182 regs
.pc
= (unsigned long)kernel_thread_helper
;
185 /* Ok, create the new process.. */
186 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0,
187 ®s
, 0, NULL
, NULL
);
191 * Free current thread data structures etc..
193 void exit_thread(void)
195 if (current
->thread
.ubc_pc
) {
196 current
->thread
.ubc_pc
= 0;
201 void flush_thread(void)
203 #if defined(CONFIG_SH_FPU)
204 struct task_struct
*tsk
= current
;
205 /* Forget lazy FPU state */
206 clear_fpu(tsk
, task_pt_regs(tsk
));
211 void release_thread(struct task_struct
*dead_task
)
216 /* Fill in the fpu structure for a core dump.. */
217 int dump_fpu(struct pt_regs
*regs
, elf_fpregset_t
*fpu
)
221 #if defined(CONFIG_SH_FPU)
222 struct task_struct
*tsk
= current
;
224 fpvalid
= !!tsk_used_math(tsk
);
226 unlazy_fpu(tsk
, regs
);
227 memcpy(fpu
, &tsk
->thread
.fpu
.hard
, sizeof(*fpu
));
234 asmlinkage
void ret_from_fork(void);
236 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long usp
,
237 unsigned long unused
,
238 struct task_struct
*p
, struct pt_regs
*regs
)
240 struct thread_info
*ti
= task_thread_info(p
);
241 struct pt_regs
*childregs
;
242 #if defined(CONFIG_SH_FPU)
243 struct task_struct
*tsk
= current
;
245 unlazy_fpu(tsk
, regs
);
246 p
->thread
.fpu
= tsk
->thread
.fpu
;
247 copy_to_stopped_child_used_math(p
);
250 childregs
= task_pt_regs(p
);
253 if (user_mode(regs
)) {
254 childregs
->regs
[15] = usp
;
255 ti
->addr_limit
= USER_DS
;
257 childregs
->regs
[15] = (unsigned long)childregs
;
258 ti
->addr_limit
= KERNEL_DS
;
261 if (clone_flags
& CLONE_SETTLS
)
262 childregs
->gbr
= childregs
->regs
[0];
264 childregs
->regs
[0] = 0; /* Set return value for child */
266 p
->thread
.sp
= (unsigned long) childregs
;
267 p
->thread
.pc
= (unsigned long) ret_from_fork
;
269 p
->thread
.ubc_pc
= 0;
274 /* Tracing by user break controller. */
275 static void ubc_set_tracing(int asid
, unsigned long pc
)
277 #if defined(CONFIG_CPU_SH4A)
280 val
= (UBC_CBR_ID_INST
| UBC_CBR_RW_READ
| UBC_CBR_CE
);
281 val
|= (UBC_CBR_AIE
| UBC_CBR_AIV_SET(asid
));
283 ctrl_outl(val
, UBC_CBR0
);
284 ctrl_outl(pc
, UBC_CAR0
);
285 ctrl_outl(0x0, UBC_CAMR0
);
286 ctrl_outl(0x0, UBC_CBCR
);
288 val
= (UBC_CRR_RES
| UBC_CRR_PCB
| UBC_CRR_BIE
);
289 ctrl_outl(val
, UBC_CRR0
);
291 /* Read UBC register that we wrote last, for checking update */
292 val
= ctrl_inl(UBC_CRR0
);
294 #else /* CONFIG_CPU_SH4A */
295 ctrl_outl(pc
, UBC_BARA
);
298 ctrl_outb(asid
, UBC_BASRA
);
301 ctrl_outl(0, UBC_BAMRA
);
303 if (current_cpu_data
.type
== CPU_SH7729
||
304 current_cpu_data
.type
== CPU_SH7710
||
305 current_cpu_data
.type
== CPU_SH7712
) {
306 ctrl_outw(BBR_INST
| BBR_READ
| BBR_CPU
, UBC_BBRA
);
307 ctrl_outl(BRCR_PCBA
| BRCR_PCTE
, UBC_BRCR
);
309 ctrl_outw(BBR_INST
| BBR_READ
, UBC_BBRA
);
310 ctrl_outw(BRCR_PCBA
, UBC_BRCR
);
312 #endif /* CONFIG_CPU_SH4A */
316 * switch_to(x,y) should switch tasks from x to y.
319 struct task_struct
*__switch_to(struct task_struct
*prev
,
320 struct task_struct
*next
)
322 #if defined(CONFIG_SH_FPU)
323 unlazy_fpu(prev
, task_pt_regs(prev
));
328 * Restore the kernel mode register
331 asm volatile("ldc %0, r7_bank"
333 : "r" (task_thread_info(next
)));
336 /* If no tasks are using the UBC, we're done */
337 if (ubc_usercnt
== 0)
338 /* If no tasks are using the UBC, we're done */;
339 else if (next
->thread
.ubc_pc
&& next
->mm
) {
342 asid
|= cpu_asid(smp_processor_id(), next
->mm
);
344 ubc_set_tracing(asid
, next
->thread
.ubc_pc
);
346 #if defined(CONFIG_CPU_SH4A)
347 ctrl_outl(UBC_CBR_INIT
, UBC_CBR0
);
348 ctrl_outl(UBC_CRR_INIT
, UBC_CRR0
);
350 ctrl_outw(0, UBC_BBRA
);
351 ctrl_outw(0, UBC_BBRB
);
358 asmlinkage
int sys_fork(unsigned long r4
, unsigned long r5
,
359 unsigned long r6
, unsigned long r7
,
360 struct pt_regs __regs
)
363 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
364 return do_fork(SIGCHLD
, regs
->regs
[15], regs
, 0, NULL
, NULL
);
366 /* fork almost works, enough to trick you into looking elsewhere :-( */
371 asmlinkage
int sys_clone(unsigned long clone_flags
, unsigned long newsp
,
372 unsigned long parent_tidptr
,
373 unsigned long child_tidptr
,
374 struct pt_regs __regs
)
376 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
378 newsp
= regs
->regs
[15];
379 return do_fork(clone_flags
, newsp
, regs
, 0,
380 (int __user
*)parent_tidptr
,
381 (int __user
*)child_tidptr
);
385 * This is trivial, and on the face of it looks like it
386 * could equally well be done in user mode.
388 * Not so, for quite unobvious reasons - register pressure.
389 * In user mode vfork() cannot have a stack frame, and if
390 * done by calling the "clone()" system call directly, you
391 * do not have enough call-clobbered registers to hold all
392 * the information you need.
394 asmlinkage
int sys_vfork(unsigned long r4
, unsigned long r5
,
395 unsigned long r6
, unsigned long r7
,
396 struct pt_regs __regs
)
398 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
399 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
->regs
[15], regs
,
404 * sys_execve() executes a new program.
406 asmlinkage
int sys_execve(char __user
*ufilename
, char __user
* __user
*uargv
,
407 char __user
* __user
*uenvp
, unsigned long r7
,
408 struct pt_regs __regs
)
410 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
414 filename
= getname(ufilename
);
415 error
= PTR_ERR(filename
);
416 if (IS_ERR(filename
))
419 error
= do_execve(filename
, uargv
, uenvp
, regs
);
422 current
->ptrace
&= ~PT_DTRACE
;
423 task_unlock(current
);
430 unsigned long get_wchan(struct task_struct
*p
)
434 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
438 * The same comment as on the Alpha applies here, too ...
440 pc
= thread_saved_pc(p
);
442 #ifdef CONFIG_FRAME_POINTER
443 if (in_sched_functions(pc
)) {
444 unsigned long schedule_frame
= (unsigned long)p
->thread
.sp
;
445 return ((unsigned long *)schedule_frame
)[21];
452 asmlinkage
void break_point_trap(void)
455 #if defined(CONFIG_CPU_SH4A)
456 ctrl_outl(UBC_CBR_INIT
, UBC_CBR0
);
457 ctrl_outl(UBC_CRR_INIT
, UBC_CRR0
);
459 ctrl_outw(0, UBC_BBRA
);
460 ctrl_outw(0, UBC_BBRB
);
462 current
->thread
.ubc_pc
= 0;
465 force_sig(SIGTRAP
, current
);