2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
12 #ifndef _ASM_SWITCH_TO_H
13 #define _ASM_SWITCH_TO_H
15 #include <asm/cpu-features.h>
16 #include <asm/watch.h>
24 * resume - resume execution of a task
25 * @prev: The task previously executed.
26 * @next: The task to begin executing.
27 * @next_ti: task_thread_info(next).
29 * This function is used whilst scheduling to save the context of prev & load
30 * the context of next. Returns prev.
32 extern asmlinkage
struct task_struct
*resume(struct task_struct
*prev
,
33 struct task_struct
*next
, struct thread_info
*next_ti
);
35 extern unsigned int ll_bit
;
36 extern struct task_struct
*ll_task
;
38 #ifdef CONFIG_MIPS_MT_FPAFF
41 * Handle the scheduler resume end of FPU affinity management. We do this
42 * inline to try to keep the overhead down. If we have been forced to run on
43 * a "CPU" with an FPU because of a previous high level of FP computation,
44 * but did not actually use the FPU during the most recent time-slice (CU1
45 * isn't set), we undo the restriction on cpus_mask.
47 * We're not calling set_cpus_allowed() here, because we have no need to
48 * force prompt migration - we're already switching the current CPU to a
52 #define __mips_mt_fpaff_switch_to(prev) \
54 struct thread_info *__prev_ti = task_thread_info(prev); \
57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
60 prev->cpus_mask = prev->thread.user_cpus_allowed; \
62 next->thread.emulated_fp = 0; \
66 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
70 * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used
71 * unconditionally when returning to userland in entry.S.
73 #define __clear_r5_hw_ll_bit() do { \
74 if (cpu_has_mips_r5 || cpu_has_mips_r6) \
78 #define __clear_software_ll_bit() do { \
79 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
84 * Check FCSR for any unmasked exceptions pending set with `ptrace',
85 * clear them and send a signal.
87 #ifdef CONFIG_MIPS_FP_SUPPORT
88 # define __sanitize_fcr31(next) \
90 unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
93 if (unlikely(fcr31)) { \
94 pc = (void __user *)task_pt_regs(next)->cp0_epc; \
95 next->thread.fpu.fcr31 &= ~fcr31; \
96 force_fcr31_sig(fcr31, pc, next); \
100 # define __sanitize_fcr31(next) do { (void) (next); } while (0)
104 * For newly created kernel threads switch_to() will return to
105 * ret_from_kernel_thread, newly created user threads to ret_from_fork.
106 * That is, everything following resume() will be skipped for new threads.
107 * So everything that matters to new threads should be placed before resume().
109 #define switch_to(prev, next, last) \
111 __mips_mt_fpaff_switch_to(prev); \
112 lose_fpu_inatomic(1, prev); \
113 if (tsk_used_math(next)) \
114 __sanitize_fcr31(next); \
117 __restore_dsp(next); \
119 if (cop2_present) { \
120 u32 status = read_c0_status(); \
122 set_c0_status(ST0_CU2); \
123 if ((KSTK_STATUS(prev) & ST0_CU2)) { \
124 if (cop2_lazy_restore) \
125 KSTK_STATUS(prev) &= ~ST0_CU2; \
128 if (KSTK_STATUS(next) & ST0_CU2 && \
129 !cop2_lazy_restore) { \
130 cop2_restore(next); \
132 write_c0_status(status); \
134 __clear_r5_hw_ll_bit(); \
135 __clear_software_ll_bit(); \
136 if (cpu_has_userlocal) \
137 write_c0_userlocal(task_thread_info(next)->tp_value); \
138 __restore_watch(next); \
139 (last) = resume(prev, next, task_thread_info(next)); \
142 #endif /* _ASM_SWITCH_TO_H */