2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/tick.h>
18 #include <linux/kernel.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/export.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/personality.h>
26 #include <linux/sys.h>
27 #include <linux/init.h>
28 #include <linux/completion.h>
29 #include <linux/kallsyms.h>
30 #include <linux/random.h>
31 #include <linux/prctl.h>
32 #include <linux/nmi.h>
33 #include <linux/cpu.h>
37 #include <asm/bootinfo.h>
39 #include <asm/dsemul.h>
43 #include <asm/mips-cps.h>
45 #include <asm/mipsregs.h>
46 #include <asm/processor.h>
48 #include <linux/uaccess.h>
51 #include <asm/isadep.h>
53 #include <asm/stacktrace.h>
54 #include <asm/irq_regs.h>
57 #ifdef CONFIG_HOTPLUG_CPU
58 void arch_cpu_idle_dead(void)
64 asmlinkage
void ret_from_fork(void);
65 asmlinkage
void ret_from_kernel_thread(void);
67 void start_thread(struct pt_regs
* regs
, unsigned long pc
, unsigned long sp
)
71 /* New thread loses kernel privileges. */
72 status
= regs
->cp0_status
& ~(ST0_CU0
|ST0_CU1
|ST0_CU2
|ST0_FR
|KU_MASK
);
74 regs
->cp0_status
= status
;
76 clear_thread_flag(TIF_MSA_CTX_LIVE
);
78 #ifdef CONFIG_MIPS_FP_SUPPORT
79 atomic_set(¤t
->thread
.bd_emu_frame
, BD_EMUFRAME_NONE
);
86 void exit_thread(struct task_struct
*tsk
)
89 * User threads may have allocated a delay slot emulation frame.
90 * If so, clean up that allocation.
92 if (!(current
->flags
& PF_KTHREAD
))
93 dsemul_thread_cleanup(tsk
);
96 int arch_dup_task_struct(struct task_struct
*dst
, struct task_struct
*src
)
99 * Save any process state which is live in hardware registers to the
100 * parent context prior to duplication. This prevents the new child
101 * state becoming stale if the parent is preempted before copy_thread()
102 * gets a chance to save the parent's live hardware registers to the
107 if (is_msa_enabled())
109 else if (is_fpu_owner())
121 * Copy architecture-specific thread state
123 int copy_thread(unsigned long clone_flags
, unsigned long usp
,
124 unsigned long kthread_arg
, struct task_struct
*p
,
127 struct thread_info
*ti
= task_thread_info(p
);
128 struct pt_regs
*childregs
, *regs
= current_pt_regs();
129 unsigned long childksp
;
131 childksp
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
- 32;
133 /* set up new TSS. */
134 childregs
= (struct pt_regs
*) childksp
- 1;
135 /* Put the stack after the struct pt_regs. */
136 childksp
= (unsigned long) childregs
;
137 p
->thread
.cp0_status
= (read_c0_status() & ~(ST0_CU2
|ST0_CU1
)) | ST0_KERNEL_CUMASK
;
138 if (unlikely(p
->flags
& PF_KTHREAD
)) {
140 unsigned long status
= p
->thread
.cp0_status
;
141 memset(childregs
, 0, sizeof(struct pt_regs
));
142 ti
->addr_limit
= KERNEL_DS
;
143 p
->thread
.reg16
= usp
; /* fn */
144 p
->thread
.reg17
= kthread_arg
;
145 p
->thread
.reg29
= childksp
;
146 p
->thread
.reg31
= (unsigned long) ret_from_kernel_thread
;
147 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
148 status
= (status
& ~(ST0_KUP
| ST0_IEP
| ST0_IEC
)) |
149 ((status
& (ST0_KUC
| ST0_IEC
)) << 2);
153 childregs
->cp0_status
= status
;
159 childregs
->regs
[7] = 0; /* Clear error flag */
160 childregs
->regs
[2] = 0; /* Child gets zero as return value */
162 childregs
->regs
[29] = usp
;
163 ti
->addr_limit
= USER_DS
;
165 p
->thread
.reg29
= (unsigned long) childregs
;
166 p
->thread
.reg31
= (unsigned long) ret_from_fork
;
169 * New tasks lose permission to use the fpu. This accelerates context
170 * switching for most programs since they don't use the fpu.
172 childregs
->cp0_status
&= ~(ST0_CU2
|ST0_CU1
);
174 clear_tsk_thread_flag(p
, TIF_USEDFPU
);
175 clear_tsk_thread_flag(p
, TIF_USEDMSA
);
176 clear_tsk_thread_flag(p
, TIF_MSA_CTX_LIVE
);
178 #ifdef CONFIG_MIPS_MT_FPAFF
179 clear_tsk_thread_flag(p
, TIF_FPUBOUND
);
180 #endif /* CONFIG_MIPS_MT_FPAFF */
182 #ifdef CONFIG_MIPS_FP_SUPPORT
183 atomic_set(&p
->thread
.bd_emu_frame
, BD_EMUFRAME_NONE
);
186 if (clone_flags
& CLONE_SETTLS
)
192 #ifdef CONFIG_STACKPROTECTOR
193 #include <linux/stackprotector.h>
194 unsigned long __stack_chk_guard __read_mostly
;
195 EXPORT_SYMBOL(__stack_chk_guard
);
198 struct mips_frame_info
{
200 unsigned long func_size
;
205 #define J_TARGET(pc,target) \
206 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
208 static inline int is_ra_save_ins(union mips_instruction
*ip
, int *poff
)
210 #ifdef CONFIG_CPU_MICROMIPS
213 * swm16 reglist,offset(sp)
214 * swm32 reglist,offset(sp)
216 * jradiussp - NOT SUPPORTED
218 * microMIPS is way more fun...
220 if (mm_insn_16bit(ip
->word
>> 16)) {
221 switch (ip
->mm16_r5_format
.opcode
) {
223 if (ip
->mm16_r5_format
.rt
!= 31)
226 *poff
= ip
->mm16_r5_format
.imm
;
227 *poff
= (*poff
<< 2) / sizeof(ulong
);
231 switch (ip
->mm16_m_format
.func
) {
233 *poff
= ip
->mm16_m_format
.imm
;
234 *poff
+= 1 + ip
->mm16_m_format
.rlist
;
235 *poff
= (*poff
<< 2) / sizeof(ulong
);
247 switch (ip
->i_format
.opcode
) {
249 if (ip
->i_format
.rs
!= 29)
251 if (ip
->i_format
.rt
!= 31)
254 *poff
= ip
->i_format
.simmediate
/ sizeof(ulong
);
258 switch (ip
->mm_m_format
.func
) {
260 if (ip
->mm_m_format
.rd
< 0x10)
262 if (ip
->mm_m_format
.base
!= 29)
265 *poff
= ip
->mm_m_format
.simmediate
;
266 *poff
+= (ip
->mm_m_format
.rd
& 0xf) * sizeof(u32
);
267 *poff
/= sizeof(ulong
);
277 /* sw / sd $ra, offset($sp) */
278 if ((ip
->i_format
.opcode
== sw_op
|| ip
->i_format
.opcode
== sd_op
) &&
279 ip
->i_format
.rs
== 29 && ip
->i_format
.rt
== 31) {
280 *poff
= ip
->i_format
.simmediate
/ sizeof(ulong
);
283 #ifdef CONFIG_CPU_LOONGSON64
284 if ((ip
->loongson3_lswc2_format
.opcode
== swc2_op
) &&
285 (ip
->loongson3_lswc2_format
.ls
== 1) &&
286 (ip
->loongson3_lswc2_format
.fr
== 0) &&
287 (ip
->loongson3_lswc2_format
.base
== 29)) {
288 if (ip
->loongson3_lswc2_format
.rt
== 31) {
289 *poff
= ip
->loongson3_lswc2_format
.offset
<< 1;
292 if (ip
->loongson3_lswc2_format
.rq
== 31) {
293 *poff
= (ip
->loongson3_lswc2_format
.offset
<< 1) + 1;
302 static inline int is_jump_ins(union mips_instruction
*ip
)
304 #ifdef CONFIG_CPU_MICROMIPS
306 * jr16,jrc,jalr16,jalr16
308 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
309 * jraddiusp - NOT SUPPORTED
311 * microMIPS is kind of more fun...
313 if (mm_insn_16bit(ip
->word
>> 16)) {
314 if ((ip
->mm16_r5_format
.opcode
== mm_pool16c_op
&&
315 (ip
->mm16_r5_format
.rt
& mm_jr16_op
) == mm_jr16_op
))
320 if (ip
->j_format
.opcode
== mm_j32_op
)
322 if (ip
->j_format
.opcode
== mm_jal32_op
)
324 if (ip
->r_format
.opcode
!= mm_pool32a_op
||
325 ip
->r_format
.func
!= mm_pool32axf_op
)
327 return ((ip
->u_format
.uimmediate
>> 6) & mm_jalr_op
) == mm_jalr_op
;
329 if (ip
->j_format
.opcode
== j_op
)
331 if (ip
->j_format
.opcode
== jal_op
)
333 if (ip
->r_format
.opcode
!= spec_op
)
335 return ip
->r_format
.func
== jalr_op
|| ip
->r_format
.func
== jr_op
;
339 static inline int is_sp_move_ins(union mips_instruction
*ip
, int *frame_size
)
341 #ifdef CONFIG_CPU_MICROMIPS
348 * jradiussp - NOT SUPPORTED
350 * microMIPS is not more fun...
352 if (mm_insn_16bit(ip
->word
>> 16)) {
353 if (ip
->mm16_r3_format
.opcode
== mm_pool16d_op
&&
354 ip
->mm16_r3_format
.simmediate
& mm_addiusp_func
) {
355 tmp
= ip
->mm_b0_format
.simmediate
>> 1;
356 tmp
= ((tmp
& 0x1ff) ^ 0x100) - 0x100;
357 if ((tmp
+ 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
359 *frame_size
= -(signed short)(tmp
<< 2);
362 if (ip
->mm16_r5_format
.opcode
== mm_pool16d_op
&&
363 ip
->mm16_r5_format
.rt
== 29) {
364 tmp
= ip
->mm16_r5_format
.imm
>> 1;
365 *frame_size
= -(signed short)(tmp
& 0xf);
371 if (ip
->mm_i_format
.opcode
== mm_addiu32_op
&&
372 ip
->mm_i_format
.rt
== 29 && ip
->mm_i_format
.rs
== 29) {
373 *frame_size
= -ip
->i_format
.simmediate
;
377 /* addiu/daddiu sp,sp,-imm */
378 if (ip
->i_format
.rs
!= 29 || ip
->i_format
.rt
!= 29)
381 if (ip
->i_format
.opcode
== addiu_op
||
382 ip
->i_format
.opcode
== daddiu_op
) {
383 *frame_size
= -ip
->i_format
.simmediate
;
390 static int get_frame_info(struct mips_frame_info
*info
)
392 bool is_mmips
= IS_ENABLED(CONFIG_CPU_MICROMIPS
);
393 union mips_instruction insn
, *ip
;
394 const unsigned int max_insns
= 128;
395 unsigned int last_insn_size
= 0;
397 bool saw_jump
= false;
399 info
->pc_offset
= -1;
400 info
->frame_size
= 0;
402 ip
= (void *)msk_isa16_mode((ulong
)info
->func
);
406 for (i
= 0; i
< max_insns
; i
++) {
407 ip
= (void *)ip
+ last_insn_size
;
409 if (is_mmips
&& mm_insn_16bit(ip
->halfword
[0])) {
410 insn
.word
= ip
->halfword
[0] << 16;
412 } else if (is_mmips
) {
413 insn
.word
= ip
->halfword
[0] << 16 | ip
->halfword
[1];
416 insn
.word
= ip
->word
;
420 if (!info
->frame_size
) {
421 is_sp_move_ins(&insn
, &info
->frame_size
);
423 } else if (!saw_jump
&& is_jump_ins(ip
)) {
425 * If we see a jump instruction, we are finished
426 * with the frame save.
428 * Some functions can have a shortcut return at
429 * the beginning of the function, so don't start
430 * looking for jump instruction until we see the
433 * The RA save instruction can get put into the
434 * delay slot of the jump instruction, so look
435 * at the next instruction, too.
440 if (info
->pc_offset
== -1 &&
441 is_ra_save_ins(&insn
, &info
->pc_offset
))
446 if (info
->frame_size
&& info
->pc_offset
>= 0) /* nested */
448 if (info
->pc_offset
< 0) /* leaf */
450 /* prologue seems bogus... */
455 static struct mips_frame_info schedule_mfi __read_mostly
;
457 #ifdef CONFIG_KALLSYMS
458 static unsigned long get___schedule_addr(void)
460 return kallsyms_lookup_name("__schedule");
463 static unsigned long get___schedule_addr(void)
465 union mips_instruction
*ip
= (void *)schedule
;
469 for (i
= 0; i
< max_insns
; i
++, ip
++) {
470 if (ip
->j_format
.opcode
== j_op
)
471 return J_TARGET(ip
, ip
->j_format
.target
);
477 static int __init
frame_info_init(void)
479 unsigned long size
= 0;
480 #ifdef CONFIG_KALLSYMS
485 addr
= get___schedule_addr();
487 addr
= (unsigned long)schedule
;
489 #ifdef CONFIG_KALLSYMS
490 kallsyms_lookup_size_offset(addr
, &size
, &ofs
);
492 schedule_mfi
.func
= (void *)addr
;
493 schedule_mfi
.func_size
= size
;
495 get_frame_info(&schedule_mfi
);
498 * Without schedule() frame info, result given by
499 * thread_saved_pc() and get_wchan() are not reliable.
501 if (schedule_mfi
.pc_offset
< 0)
502 printk("Can't analyze schedule() prologue at %p\n", schedule
);
507 arch_initcall(frame_info_init
);
510 * Return saved PC of a blocked thread.
512 static unsigned long thread_saved_pc(struct task_struct
*tsk
)
514 struct thread_struct
*t
= &tsk
->thread
;
516 /* New born processes are a special case */
517 if (t
->reg31
== (unsigned long) ret_from_fork
)
519 if (schedule_mfi
.pc_offset
< 0)
521 return ((unsigned long *)t
->reg29
)[schedule_mfi
.pc_offset
];
525 #ifdef CONFIG_KALLSYMS
526 /* generic stack unwinding function */
527 unsigned long notrace
unwind_stack_by_address(unsigned long stack_page
,
532 unsigned long low
, high
, irq_stack_high
;
533 struct mips_frame_info info
;
534 unsigned long size
, ofs
;
535 struct pt_regs
*regs
;
542 * IRQ stacks start at IRQ_STACK_START
543 * task stacks at THREAD_SIZE - 32
546 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp
)) {
547 high
= stack_page
+ IRQ_STACK_START
;
548 irq_stack_high
= high
;
550 high
= stack_page
+ THREAD_SIZE
- 32;
555 * If we reached the top of the interrupt stack, start unwinding
556 * the interrupted task stack.
558 if (unlikely(*sp
== irq_stack_high
)) {
559 unsigned long task_sp
= *(unsigned long *)*sp
;
562 * Check that the pointer saved in the IRQ stack head points to
563 * something within the stack of the current task
565 if (!object_is_on_stack((void *)task_sp
))
569 * Follow pointer to tasks kernel stack frame where interrupted
572 regs
= (struct pt_regs
*)task_sp
;
574 if (!user_mode(regs
) && __kernel_text_address(pc
)) {
575 *sp
= regs
->regs
[29];
576 *ra
= regs
->regs
[31];
581 if (!kallsyms_lookup_size_offset(pc
, &size
, &ofs
))
584 * Return ra if an exception occurred at the first instruction
586 if (unlikely(ofs
== 0)) {
592 info
.func
= (void *)(pc
- ofs
);
593 info
.func_size
= ofs
; /* analyze from start to ofs */
594 leaf
= get_frame_info(&info
);
598 if (*sp
< low
|| *sp
+ info
.frame_size
> high
)
603 * For some extreme cases, get_frame_info() can
604 * consider wrongly a nested function as a leaf
605 * one. In that cases avoid to return always the
608 pc
= pc
!= *ra
? *ra
: 0;
610 pc
= ((unsigned long *)(*sp
))[info
.pc_offset
];
612 *sp
+= info
.frame_size
;
614 return __kernel_text_address(pc
) ? pc
: 0;
616 EXPORT_SYMBOL(unwind_stack_by_address
);
618 /* used by show_backtrace() */
619 unsigned long unwind_stack(struct task_struct
*task
, unsigned long *sp
,
620 unsigned long pc
, unsigned long *ra
)
622 unsigned long stack_page
= 0;
625 for_each_possible_cpu(cpu
) {
626 if (on_irq_stack(cpu
, *sp
)) {
627 stack_page
= (unsigned long)irq_stack
[cpu
];
633 stack_page
= (unsigned long)task_stack_page(task
);
635 return unwind_stack_by_address(stack_page
, sp
, pc
, ra
);
640 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
642 unsigned long get_wchan(struct task_struct
*task
)
644 unsigned long pc
= 0;
645 #ifdef CONFIG_KALLSYMS
647 unsigned long ra
= 0;
650 if (!task
|| task
== current
|| task
->state
== TASK_RUNNING
)
652 if (!task_stack_page(task
))
655 pc
= thread_saved_pc(task
);
657 #ifdef CONFIG_KALLSYMS
658 sp
= task
->thread
.reg29
+ schedule_mfi
.frame_size
;
660 while (in_sched_functions(pc
))
661 pc
= unwind_stack(task
, &sp
, pc
, &ra
);
668 unsigned long mips_stack_top(void)
670 unsigned long top
= TASK_SIZE
& PAGE_MASK
;
672 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT
)) {
673 /* One page for branch delay slot "emulation" */
677 /* Space for the VDSO, data page & GIC user page */
678 top
-= PAGE_ALIGN(current
->thread
.abi
->vdso
->size
);
680 top
-= mips_gic_present() ? PAGE_SIZE
: 0;
682 /* Space for cache colour alignment */
683 if (cpu_has_dc_aliases
)
684 top
-= shm_align_mask
+ 1;
686 /* Space to randomize the VDSO base */
687 if (current
->flags
& PF_RANDOMIZE
)
688 top
-= VDSO_RANDOMIZE_SIZE
;
694 * Don't forget that the stack pointer must be aligned on a 8 bytes
695 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
697 unsigned long arch_align_stack(unsigned long sp
)
699 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
700 sp
-= get_random_int() & ~PAGE_MASK
;
705 static struct cpumask backtrace_csd_busy
;
707 static void handle_backtrace(void *info
)
709 nmi_cpu_backtrace(get_irq_regs());
710 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy
);
713 static DEFINE_PER_CPU(call_single_data_t
, backtrace_csd
) =
714 CSD_INIT(handle_backtrace
, NULL
);
716 static void raise_backtrace(cpumask_t
*mask
)
718 call_single_data_t
*csd
;
721 for_each_cpu(cpu
, mask
) {
723 * If we previously sent an IPI to the target CPU & it hasn't
724 * cleared its bit in the busy cpumask then it didn't handle
725 * our previous IPI & it's not safe for us to reuse the
726 * call_single_data_t.
728 if (cpumask_test_and_set_cpu(cpu
, &backtrace_csd_busy
)) {
729 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
734 csd
= &per_cpu(backtrace_csd
, cpu
);
735 smp_call_function_single_async(cpu
, csd
);
739 void arch_trigger_cpumask_backtrace(const cpumask_t
*mask
, bool exclude_self
)
741 nmi_trigger_cpumask_backtrace(mask
, exclude_self
, raise_backtrace
);
744 int mips_get_process_fp_mode(struct task_struct
*task
)
748 if (!test_tsk_thread_flag(task
, TIF_32BIT_FPREGS
))
749 value
|= PR_FP_MODE_FR
;
750 if (test_tsk_thread_flag(task
, TIF_HYBRID_FPREGS
))
751 value
|= PR_FP_MODE_FRE
;
756 static long prepare_for_fp_mode_switch(void *unused
)
759 * This is icky, but we use this to simply ensure that all CPUs have
760 * context switched, regardless of whether they were previously running
761 * kernel or user code. This ensures that no CPU that a mode-switching
762 * program may execute on keeps its FPU enabled (& in the old mode)
763 * throughout the mode switch.
768 int mips_set_process_fp_mode(struct task_struct
*task
, unsigned int value
)
770 const unsigned int known_bits
= PR_FP_MODE_FR
| PR_FP_MODE_FRE
;
771 struct task_struct
*t
;
772 struct cpumask process_cpus
;
775 /* If nothing to change, return right away, successfully. */
776 if (value
== mips_get_process_fp_mode(task
))
779 /* Only accept a mode change if 64-bit FP enabled for o32. */
780 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT
))
783 /* And only for o32 tasks. */
784 if (IS_ENABLED(CONFIG_64BIT
) && !test_thread_flag(TIF_32BIT_REGS
))
787 /* Check the value is valid */
788 if (value
& ~known_bits
)
791 /* Setting FRE without FR is not supported. */
792 if ((value
& (PR_FP_MODE_FR
| PR_FP_MODE_FRE
)) == PR_FP_MODE_FRE
)
795 /* Avoid inadvertently triggering emulation */
796 if ((value
& PR_FP_MODE_FR
) && raw_cpu_has_fpu
&&
797 !(raw_current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
799 if ((value
& PR_FP_MODE_FRE
) && raw_cpu_has_fpu
&& !cpu_has_fre
)
802 /* FR = 0 not supported in MIPS R6 */
803 if (!(value
& PR_FP_MODE_FR
) && raw_cpu_has_fpu
&& cpu_has_mips_r6
)
806 /* Indicate the new FP mode in each thread */
807 for_each_thread(task
, t
) {
808 /* Update desired FP register width */
809 if (value
& PR_FP_MODE_FR
) {
810 clear_tsk_thread_flag(t
, TIF_32BIT_FPREGS
);
812 set_tsk_thread_flag(t
, TIF_32BIT_FPREGS
);
813 clear_tsk_thread_flag(t
, TIF_MSA_CTX_LIVE
);
816 /* Update desired FP single layout */
817 if (value
& PR_FP_MODE_FRE
)
818 set_tsk_thread_flag(t
, TIF_HYBRID_FPREGS
);
820 clear_tsk_thread_flag(t
, TIF_HYBRID_FPREGS
);
824 * We need to ensure that all threads in the process have switched mode
825 * before returning, in order to allow userland to not worry about
826 * races. We can do this by forcing all CPUs that any thread in the
827 * process may be running on to schedule something else - in this case
828 * prepare_for_fp_mode_switch().
830 * We begin by generating a mask of all CPUs that any thread in the
831 * process may be running on.
833 cpumask_clear(&process_cpus
);
834 for_each_thread(task
, t
)
835 cpumask_set_cpu(task_cpu(t
), &process_cpus
);
838 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
840 * The CPUs may have rescheduled already since we switched mode or
841 * generated the cpumask, but that doesn't matter. If the task in this
842 * process is scheduled out then our scheduling
843 * prepare_for_fp_mode_switch() will simply be redundant. If it's
844 * scheduled in then it will already have picked up the new FP mode
848 for_each_cpu_and(cpu
, &process_cpus
, cpu_online_mask
)
849 work_on_cpu(cpu
, prepare_for_fp_mode_switch
, NULL
);
855 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
856 void mips_dump_regs32(u32
*uregs
, const struct pt_regs
*regs
)
860 for (i
= MIPS32_EF_R1
; i
<= MIPS32_EF_R31
; i
++) {
861 /* k0/k1 are copied as zero. */
862 if (i
== MIPS32_EF_R26
|| i
== MIPS32_EF_R27
)
865 uregs
[i
] = regs
->regs
[i
- MIPS32_EF_R0
];
868 uregs
[MIPS32_EF_LO
] = regs
->lo
;
869 uregs
[MIPS32_EF_HI
] = regs
->hi
;
870 uregs
[MIPS32_EF_CP0_EPC
] = regs
->cp0_epc
;
871 uregs
[MIPS32_EF_CP0_BADVADDR
] = regs
->cp0_badvaddr
;
872 uregs
[MIPS32_EF_CP0_STATUS
] = regs
->cp0_status
;
873 uregs
[MIPS32_EF_CP0_CAUSE
] = regs
->cp0_cause
;
875 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
878 void mips_dump_regs64(u64
*uregs
, const struct pt_regs
*regs
)
882 for (i
= MIPS64_EF_R1
; i
<= MIPS64_EF_R31
; i
++) {
883 /* k0/k1 are copied as zero. */
884 if (i
== MIPS64_EF_R26
|| i
== MIPS64_EF_R27
)
887 uregs
[i
] = regs
->regs
[i
- MIPS64_EF_R0
];
890 uregs
[MIPS64_EF_LO
] = regs
->lo
;
891 uregs
[MIPS64_EF_HI
] = regs
->hi
;
892 uregs
[MIPS64_EF_CP0_EPC
] = regs
->cp0_epc
;
893 uregs
[MIPS64_EF_CP0_BADVADDR
] = regs
->cp0_badvaddr
;
894 uregs
[MIPS64_EF_CP0_STATUS
] = regs
->cp0_status
;
895 uregs
[MIPS64_EF_CP0_CAUSE
] = regs
->cp0_cause
;
897 #endif /* CONFIG_64BIT */