2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/tick.h>
14 #include <linux/kernel.h>
16 #include <linux/stddef.h>
17 #include <linux/unistd.h>
18 #include <linux/export.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
21 #include <linux/personality.h>
22 #include <linux/sys.h>
23 #include <linux/user.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
30 #include <asm/bootinfo.h>
34 #include <asm/pgtable.h>
35 #include <asm/system.h>
36 #include <asm/mipsregs.h>
37 #include <asm/processor.h>
38 #include <asm/uaccess.h>
41 #include <asm/isadep.h>
43 #include <asm/stacktrace.h>
46 * The idle thread. There's no useful work to be done, so just try to conserve
47 * power and have a low exit latency (ie sit in a loop waiting for somebody to
48 * say that they'd like to reschedule)
50 void __noreturn
cpu_idle(void)
54 /* CPU is going idle. */
55 cpu
= smp_processor_id();
57 /* endless idle loop with no priority at all */
59 tick_nohz_stop_sched_tick(1);
60 while (!need_resched() && cpu_online(cpu
)) {
61 #ifdef CONFIG_MIPS_MT_SMTC
62 extern void smtc_idle_loop_hook(void);
64 smtc_idle_loop_hook();
68 /* Don't trace irqs off for idle */
69 stop_critical_timings();
71 start_critical_timings();
74 #ifdef CONFIG_HOTPLUG_CPU
75 if (!cpu_online(cpu
) && !cpu_isset(cpu
, cpu_callin_map
) &&
76 (system_state
== SYSTEM_RUNNING
||
77 system_state
== SYSTEM_BOOTING
))
80 tick_nohz_restart_sched_tick();
81 preempt_enable_no_resched();
87 asmlinkage
void ret_from_fork(void);
89 void start_thread(struct pt_regs
* regs
, unsigned long pc
, unsigned long sp
)
93 /* New thread loses kernel privileges. */
94 status
= regs
->cp0_status
& ~(ST0_CU0
|ST0_CU1
|ST0_FR
|KU_MASK
);
96 status
|= test_thread_flag(TIF_32BIT_REGS
) ? 0 : ST0_FR
;
99 regs
->cp0_status
= status
;
108 void exit_thread(void)
112 void flush_thread(void)
116 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
117 unsigned long unused
, struct task_struct
*p
, struct pt_regs
*regs
)
119 struct thread_info
*ti
= task_thread_info(p
);
120 struct pt_regs
*childregs
;
121 unsigned long childksp
;
122 p
->set_child_tid
= p
->clear_child_tid
= NULL
;
124 childksp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
- 32;
136 /* set up new TSS. */
137 childregs
= (struct pt_regs
*) childksp
- 1;
138 /* Put the stack after the struct pt_regs. */
139 childksp
= (unsigned long) childregs
;
141 childregs
->regs
[7] = 0; /* Clear error flag */
143 childregs
->regs
[2] = 0; /* Child gets zero as return value */
145 if (childregs
->cp0_status
& ST0_CU0
) {
146 childregs
->regs
[28] = (unsigned long) ti
;
147 childregs
->regs
[29] = childksp
;
148 ti
->addr_limit
= KERNEL_DS
;
150 childregs
->regs
[29] = usp
;
151 ti
->addr_limit
= USER_DS
;
153 p
->thread
.reg29
= (unsigned long) childregs
;
154 p
->thread
.reg31
= (unsigned long) ret_from_fork
;
157 * New tasks lose permission to use the fpu. This accelerates context
158 * switching for most programs since they don't use the fpu.
160 p
->thread
.cp0_status
= read_c0_status() & ~(ST0_CU2
|ST0_CU1
);
161 childregs
->cp0_status
&= ~(ST0_CU2
|ST0_CU1
);
163 #ifdef CONFIG_MIPS_MT_SMTC
165 * SMTC restores TCStatus after Status, and the CU bits
168 childregs
->cp0_tcstatus
&= ~(ST0_CU2
|ST0_CU1
);
170 clear_tsk_thread_flag(p
, TIF_USEDFPU
);
172 #ifdef CONFIG_MIPS_MT_FPAFF
173 clear_tsk_thread_flag(p
, TIF_FPUBOUND
);
174 #endif /* CONFIG_MIPS_MT_FPAFF */
176 if (clone_flags
& CLONE_SETTLS
)
177 ti
->tp_value
= regs
->regs
[7];
182 /* Fill in the fpu structure for a core dump.. */
183 int dump_fpu(struct pt_regs
*regs
, elf_fpregset_t
*r
)
185 memcpy(r
, ¤t
->thread
.fpu
, sizeof(current
->thread
.fpu
));
190 void elf_dump_regs(elf_greg_t
*gp
, struct pt_regs
*regs
)
194 for (i
= 0; i
< EF_R0
; i
++)
197 for (i
= 1; i
<= 31; i
++)
198 gp
[EF_R0
+ i
] = regs
->regs
[i
];
201 gp
[EF_LO
] = regs
->lo
;
202 gp
[EF_HI
] = regs
->hi
;
203 gp
[EF_CP0_EPC
] = regs
->cp0_epc
;
204 gp
[EF_CP0_BADVADDR
] = regs
->cp0_badvaddr
;
205 gp
[EF_CP0_STATUS
] = regs
->cp0_status
;
206 gp
[EF_CP0_CAUSE
] = regs
->cp0_cause
;
212 int dump_task_regs(struct task_struct
*tsk
, elf_gregset_t
*regs
)
214 elf_dump_regs(*regs
, task_pt_regs(tsk
));
218 int dump_task_fpu(struct task_struct
*t
, elf_fpregset_t
*fpr
)
220 memcpy(fpr
, &t
->thread
.fpu
, sizeof(current
->thread
.fpu
));
226 * Create a kernel thread
228 static void __noreturn
kernel_thread_helper(void *arg
, int (*fn
)(void *))
233 long kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
)
237 memset(®s
, 0, sizeof(regs
));
239 regs
.regs
[4] = (unsigned long) arg
;
240 regs
.regs
[5] = (unsigned long) fn
;
241 regs
.cp0_epc
= (unsigned long) kernel_thread_helper
;
242 regs
.cp0_status
= read_c0_status();
243 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
244 regs
.cp0_status
= (regs
.cp0_status
& ~(ST0_KUP
| ST0_IEP
| ST0_IEC
)) |
245 ((regs
.cp0_status
& (ST0_KUC
| ST0_IEC
)) << 2);
247 regs
.cp0_status
|= ST0_EXL
;
250 /* Ok, create the new process.. */
251 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0, ®s
, 0, NULL
, NULL
);
257 struct mips_frame_info
{
259 unsigned long func_size
;
264 static inline int is_ra_save_ins(union mips_instruction
*ip
)
266 /* sw / sd $ra, offset($sp) */
267 return (ip
->i_format
.opcode
== sw_op
|| ip
->i_format
.opcode
== sd_op
) &&
268 ip
->i_format
.rs
== 29 &&
269 ip
->i_format
.rt
== 31;
272 static inline int is_jal_jalr_jr_ins(union mips_instruction
*ip
)
274 if (ip
->j_format
.opcode
== jal_op
)
276 if (ip
->r_format
.opcode
!= spec_op
)
278 return ip
->r_format
.func
== jalr_op
|| ip
->r_format
.func
== jr_op
;
281 static inline int is_sp_move_ins(union mips_instruction
*ip
)
283 /* addiu/daddiu sp,sp,-imm */
284 if (ip
->i_format
.rs
!= 29 || ip
->i_format
.rt
!= 29)
286 if (ip
->i_format
.opcode
== addiu_op
|| ip
->i_format
.opcode
== daddiu_op
)
291 static int get_frame_info(struct mips_frame_info
*info
)
293 union mips_instruction
*ip
= info
->func
;
294 unsigned max_insns
= info
->func_size
/ sizeof(union mips_instruction
);
297 info
->pc_offset
= -1;
298 info
->frame_size
= 0;
304 max_insns
= 128U; /* unknown function size */
305 max_insns
= min(128U, max_insns
);
307 for (i
= 0; i
< max_insns
; i
++, ip
++) {
309 if (is_jal_jalr_jr_ins(ip
))
311 if (!info
->frame_size
) {
312 if (is_sp_move_ins(ip
))
313 info
->frame_size
= - ip
->i_format
.simmediate
;
316 if (info
->pc_offset
== -1 && is_ra_save_ins(ip
)) {
318 ip
->i_format
.simmediate
/ sizeof(long);
322 if (info
->frame_size
&& info
->pc_offset
>= 0) /* nested */
324 if (info
->pc_offset
< 0) /* leaf */
326 /* prologue seems boggus... */
331 static struct mips_frame_info schedule_mfi __read_mostly
;
333 static int __init
frame_info_init(void)
335 unsigned long size
= 0;
336 #ifdef CONFIG_KALLSYMS
339 kallsyms_lookup_size_offset((unsigned long)schedule
, &size
, &ofs
);
341 schedule_mfi
.func
= schedule
;
342 schedule_mfi
.func_size
= size
;
344 get_frame_info(&schedule_mfi
);
347 * Without schedule() frame info, result given by
348 * thread_saved_pc() and get_wchan() are not reliable.
350 if (schedule_mfi
.pc_offset
< 0)
351 printk("Can't analyze schedule() prologue at %p\n", schedule
);
356 arch_initcall(frame_info_init
);
359 * Return saved PC of a blocked thread.
361 unsigned long thread_saved_pc(struct task_struct
*tsk
)
363 struct thread_struct
*t
= &tsk
->thread
;
365 /* New born processes are a special case */
366 if (t
->reg31
== (unsigned long) ret_from_fork
)
368 if (schedule_mfi
.pc_offset
< 0)
370 return ((unsigned long *)t
->reg29
)[schedule_mfi
.pc_offset
];
374 #ifdef CONFIG_KALLSYMS
375 /* generic stack unwinding function */
376 unsigned long notrace
unwind_stack_by_address(unsigned long stack_page
,
381 struct mips_frame_info info
;
382 unsigned long size
, ofs
;
384 extern void ret_from_irq(void);
385 extern void ret_from_exception(void);
391 * If we reached the bottom of interrupt context,
392 * return saved pc in pt_regs.
394 if (pc
== (unsigned long)ret_from_irq
||
395 pc
== (unsigned long)ret_from_exception
) {
396 struct pt_regs
*regs
;
397 if (*sp
>= stack_page
&&
398 *sp
+ sizeof(*regs
) <= stack_page
+ THREAD_SIZE
- 32) {
399 regs
= (struct pt_regs
*)*sp
;
401 if (__kernel_text_address(pc
)) {
402 *sp
= regs
->regs
[29];
403 *ra
= regs
->regs
[31];
409 if (!kallsyms_lookup_size_offset(pc
, &size
, &ofs
))
412 * Return ra if an exception occurred at the first instruction
414 if (unlikely(ofs
== 0)) {
420 info
.func
= (void *)(pc
- ofs
);
421 info
.func_size
= ofs
; /* analyze from start to ofs */
422 leaf
= get_frame_info(&info
);
426 if (*sp
< stack_page
||
427 *sp
+ info
.frame_size
> stack_page
+ THREAD_SIZE
- 32)
432 * For some extreme cases, get_frame_info() can
433 * consider wrongly a nested function as a leaf
434 * one. In that cases avoid to return always the
437 pc
= pc
!= *ra
? *ra
: 0;
439 pc
= ((unsigned long *)(*sp
))[info
.pc_offset
];
441 *sp
+= info
.frame_size
;
443 return __kernel_text_address(pc
) ? pc
: 0;
445 EXPORT_SYMBOL(unwind_stack_by_address
);
447 /* used by show_backtrace() */
448 unsigned long unwind_stack(struct task_struct
*task
, unsigned long *sp
,
449 unsigned long pc
, unsigned long *ra
)
451 unsigned long stack_page
= (unsigned long)task_stack_page(task
);
452 return unwind_stack_by_address(stack_page
, sp
, pc
, ra
);
457 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
459 unsigned long get_wchan(struct task_struct
*task
)
461 unsigned long pc
= 0;
462 #ifdef CONFIG_KALLSYMS
464 unsigned long ra
= 0;
467 if (!task
|| task
== current
|| task
->state
== TASK_RUNNING
)
469 if (!task_stack_page(task
))
472 pc
= thread_saved_pc(task
);
474 #ifdef CONFIG_KALLSYMS
475 sp
= task
->thread
.reg29
+ schedule_mfi
.frame_size
;
477 while (in_sched_functions(pc
))
478 pc
= unwind_stack(task
, &sp
, pc
, &ra
);
486 * Don't forget that the stack pointer must be aligned on a 8 bytes
487 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
489 unsigned long arch_align_stack(unsigned long sp
)
491 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
492 sp
-= get_random_int() & ~PAGE_MASK
;