1 #ifndef _ASM_X86_SWITCH_TO_H
2 #define _ASM_X86_SWITCH_TO_H
4 struct task_struct
; /* one of the stranger aspects of C forward declarations */
5 __visible
struct task_struct
*__switch_to(struct task_struct
*prev
,
6 struct task_struct
*next
);
8 void __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
9 struct tss_struct
*tss
);
13 #ifdef CONFIG_CC_STACKPROTECTOR
14 #define __switch_canary \
15 "movl %P[task_canary](%[next]), %%ebx\n\t" \
16 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
17 #define __switch_canary_oparam \
18 , [stack_canary] "=m" (stack_canary.canary)
19 #define __switch_canary_iparam \
20 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
21 #else /* CC_STACKPROTECTOR */
22 #define __switch_canary
23 #define __switch_canary_oparam
24 #define __switch_canary_iparam
25 #endif /* CC_STACKPROTECTOR */
28 * Saving eflags is important. It switches not only IOPL between tasks,
29 * it also protects other tasks from NT leaking through sysenter etc.
31 #define switch_to(prev, next, last) \
34 * Context-switching clobbers all registers, so we clobber \
35 * them explicitly, via unused output variables. \
36 * (EAX and EBP is not listed because EBP is saved/restored \
37 * explicitly for wchan access and EAX is the return value of \
40 unsigned long ebx, ecx, edx, esi, edi; \
42 asm volatile("pushfl\n\t" /* save flags */ \
43 "pushl %%ebp\n\t" /* save EBP */ \
44 "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
45 "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
46 "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
47 "pushl %[next_ip]\n\t" /* restore EIP */ \
49 "jmp __switch_to\n" /* regparm call */ \
51 "popl %%ebp\n\t" /* restore EBP */ \
52 "popfl\n" /* restore flags */ \
54 /* output parameters */ \
55 : [prev_sp] "=m" (prev->thread.sp), \
56 [prev_ip] "=m" (prev->thread.ip), \
59 /* clobbered output registers: */ \
60 "=b" (ebx), "=c" (ecx), "=d" (edx), \
61 "=S" (esi), "=D" (edi) \
63 __switch_canary_oparam \
65 /* input parameters: */ \
66 : [next_sp] "m" (next->thread.sp), \
67 [next_ip] "m" (next->thread.ip), \
69 /* regparm parameters for __switch_to(): */ \
73 __switch_canary_iparam \
75 : /* reloaded segment registers */ \
79 #else /* CONFIG_X86_32 */
81 /* frame pointer must be last for get_wchan */
82 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
85 #define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15"
89 #ifdef CONFIG_CC_STACKPROTECTOR
90 #define __switch_canary \
91 "movq %P[task_canary](%%rsi),%%r8\n\t" \
92 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
93 #define __switch_canary_oparam \
94 , [gs_canary] "=m" (irq_stack_union.stack_canary)
95 #define __switch_canary_iparam \
96 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
97 #else /* CC_STACKPROTECTOR */
98 #define __switch_canary
99 #define __switch_canary_oparam
100 #define __switch_canary_iparam
101 #endif /* CC_STACKPROTECTOR */
103 /* Save restore flags to clear handle leaking NT */
104 #define switch_to(prev, next, last) \
105 asm volatile(SAVE_CONTEXT \
106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
107 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
108 "call __switch_to\n\t" \
109 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
111 "movq %P[thread_info](%%rsi),%%r8\n\t" \
112 "movq %%rax,%%rdi\n\t" \
113 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
114 "jnz ret_from_fork\n\t" \
117 __switch_canary_oparam \
118 : [next] "S" (next), [prev] "D" (prev), \
119 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
120 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
121 [_tif_fork] "i" (_TIF_FORK), \
122 [thread_info] "i" (offsetof(struct task_struct, stack)), \
123 [current_task] "m" (current_task) \
124 __switch_canary_iparam \
125 : "memory", "cc" __EXTRA_CLOBBER)
127 #endif /* CONFIG_X86_32 */
129 #endif /* _ASM_X86_SWITCH_TO_H */