2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/tracehook.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/export.h>
22 #include <linux/context_tracking.h>
23 #include <linux/user-return-notifier.h>
24 #include <linux/nospec.h>
25 #include <linux/uprobes.h>
26 #include <linux/livepatch.h>
27 #include <linux/syscalls.h>
30 #include <asm/traps.h>
32 #include <linux/uaccess.h>
33 #include <asm/cpufeature.h>
34 #include <asm/nospec-branch.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/syscalls.h>
39 #ifdef CONFIG_CONTEXT_TRACKING
40 /* Called on entry from user mode with IRQs off. */
41 __visible
inline void enter_from_user_mode(void)
43 CT_WARN_ON(ct_state() != CONTEXT_USER
);
47 static inline void enter_from_user_mode(void) {}
50 static void do_audit_syscall_entry(struct pt_regs
*regs
, u32 arch
)
53 if (arch
== AUDIT_ARCH_X86_64
) {
54 audit_syscall_entry(regs
->orig_ax
, regs
->di
,
55 regs
->si
, regs
->dx
, regs
->r10
);
59 audit_syscall_entry(regs
->orig_ax
, regs
->bx
,
60 regs
->cx
, regs
->dx
, regs
->si
);
65 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
66 * to skip the syscall.
68 static long syscall_trace_enter(struct pt_regs
*regs
)
70 u32 arch
= in_ia32_syscall() ? AUDIT_ARCH_I386
: AUDIT_ARCH_X86_64
;
72 struct thread_info
*ti
= current_thread_info();
73 unsigned long ret
= 0;
74 bool emulated
= false;
77 if (IS_ENABLED(CONFIG_DEBUG_ENTRY
))
78 BUG_ON(regs
!= task_pt_regs(current
));
80 work
= ACCESS_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
;
82 if (unlikely(work
& _TIF_SYSCALL_EMU
))
85 if ((emulated
|| (work
& _TIF_SYSCALL_TRACE
)) &&
86 tracehook_report_syscall_entry(regs
))
94 * Do seccomp after ptrace, to catch any tracer changes.
96 if (work
& _TIF_SECCOMP
) {
97 struct seccomp_data sd
;
100 sd
.nr
= regs
->orig_ax
;
101 sd
.instruction_pointer
= regs
->ip
;
103 if (arch
== AUDIT_ARCH_X86_64
) {
104 sd
.args
[0] = regs
->di
;
105 sd
.args
[1] = regs
->si
;
106 sd
.args
[2] = regs
->dx
;
107 sd
.args
[3] = regs
->r10
;
108 sd
.args
[4] = regs
->r8
;
109 sd
.args
[5] = regs
->r9
;
113 sd
.args
[0] = regs
->bx
;
114 sd
.args
[1] = regs
->cx
;
115 sd
.args
[2] = regs
->dx
;
116 sd
.args
[3] = regs
->si
;
117 sd
.args
[4] = regs
->di
;
118 sd
.args
[5] = regs
->bp
;
121 ret
= __secure_computing(&sd
);
127 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
128 trace_sys_enter(regs
, regs
->orig_ax
);
130 do_audit_syscall_entry(regs
, arch
);
132 return ret
?: regs
->orig_ax
;
135 #define EXIT_TO_USERMODE_LOOP_FLAGS \
136 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
137 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
139 static void exit_to_usermode_loop(struct pt_regs
*regs
, u32 cached_flags
)
142 * In order to return to user mode, we need to have IRQs off with
143 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
144 * can be set at any time on preemptable kernels if we have IRQs on,
145 * so we need to loop. Disabling preemption wouldn't help: doing the
146 * work to clear some of the flags can sleep.
149 /* We have work to do. */
152 if (cached_flags
& _TIF_NEED_RESCHED
)
155 if (cached_flags
& _TIF_UPROBE
)
156 uprobe_notify_resume(regs
);
158 /* deal with pending signal delivery */
159 if (cached_flags
& _TIF_SIGPENDING
)
162 if (cached_flags
& _TIF_NOTIFY_RESUME
) {
163 clear_thread_flag(TIF_NOTIFY_RESUME
);
164 tracehook_notify_resume(regs
);
167 if (cached_flags
& _TIF_USER_RETURN_NOTIFY
)
168 fire_user_return_notifiers();
170 if (cached_flags
& _TIF_PATCH_PENDING
)
171 klp_update_patch_state(current
);
173 /* Disable IRQs and retry */
176 cached_flags
= READ_ONCE(current_thread_info()->flags
);
178 if (!(cached_flags
& EXIT_TO_USERMODE_LOOP_FLAGS
))
183 /* Called with IRQs disabled. */
184 __visible
inline void prepare_exit_to_usermode(struct pt_regs
*regs
)
186 struct thread_info
*ti
= current_thread_info();
189 addr_limit_user_check();
191 if (IS_ENABLED(CONFIG_PROVE_LOCKING
) && WARN_ON(!irqs_disabled()))
196 cached_flags
= READ_ONCE(ti
->flags
);
198 if (unlikely(cached_flags
& EXIT_TO_USERMODE_LOOP_FLAGS
))
199 exit_to_usermode_loop(regs
, cached_flags
);
203 * Compat syscalls set TS_COMPAT. Make sure we clear it before
204 * returning to user mode. We need to clear it *after* signal
205 * handling, because syscall restart has a fixup for compat
206 * syscalls. The fixup is exercised by the ptrace_syscall_32
209 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
210 * special case only applies after poking regs and before the
211 * very next return to user mode.
213 ti
->status
&= ~(TS_COMPAT
|TS_I386_REGS_POKED
);
218 mds_user_clear_cpu_buffers();
221 #define SYSCALL_EXIT_WORK_FLAGS \
222 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
223 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
225 static void syscall_slow_exit_work(struct pt_regs
*regs
, u32 cached_flags
)
229 audit_syscall_exit(regs
);
231 if (cached_flags
& _TIF_SYSCALL_TRACEPOINT
)
232 trace_sys_exit(regs
, regs
->ax
);
235 * If TIF_SYSCALL_EMU is set, we only get here because of
236 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
237 * We already reported this syscall instruction in
238 * syscall_trace_enter().
241 (cached_flags
& (_TIF_SINGLESTEP
| _TIF_SYSCALL_EMU
))
243 if (step
|| cached_flags
& _TIF_SYSCALL_TRACE
)
244 tracehook_report_syscall_exit(regs
, step
);
248 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
249 * state such that we can immediately switch to user mode.
251 __visible
inline void syscall_return_slowpath(struct pt_regs
*regs
)
253 struct thread_info
*ti
= current_thread_info();
254 u32 cached_flags
= READ_ONCE(ti
->flags
);
256 CT_WARN_ON(ct_state() != CONTEXT_KERNEL
);
258 if (IS_ENABLED(CONFIG_PROVE_LOCKING
) &&
259 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs
->orig_ax
))
263 * First do one-time work. If these work items are enabled, we
264 * want to run them exactly once per syscall exit with IRQs on.
266 if (unlikely(cached_flags
& SYSCALL_EXIT_WORK_FLAGS
))
267 syscall_slow_exit_work(regs
, cached_flags
);
270 prepare_exit_to_usermode(regs
);
274 __visible
void do_syscall_64(struct pt_regs
*regs
)
276 struct thread_info
*ti
= current_thread_info();
277 unsigned long nr
= regs
->orig_ax
;
279 enter_from_user_mode();
282 if (READ_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
)
283 nr
= syscall_trace_enter(regs
);
286 * NB: Native and x32 syscalls are dispatched from the same
287 * table. The only functional difference is the x32 bit in
288 * regs->orig_ax, which changes the behavior of some syscalls.
290 if (likely((nr
& __SYSCALL_MASK
) < NR_syscalls
)) {
291 nr
= array_index_nospec(nr
& __SYSCALL_MASK
, NR_syscalls
);
292 regs
->ax
= sys_call_table
[nr
](
293 regs
->di
, regs
->si
, regs
->dx
,
294 regs
->r10
, regs
->r8
, regs
->r9
);
297 syscall_return_slowpath(regs
);
301 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
303 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
304 * all entry and exit work and returns with IRQs off. This function is
305 * extremely hot in workloads that use it, and it's usually called from
306 * do_fast_syscall_32, so forcibly inline it to improve performance.
308 static __always_inline
void do_syscall_32_irqs_on(struct pt_regs
*regs
)
310 struct thread_info
*ti
= current_thread_info();
311 unsigned int nr
= (unsigned int)regs
->orig_ax
;
313 #ifdef CONFIG_IA32_EMULATION
314 ti
->status
|= TS_COMPAT
;
317 if (READ_ONCE(ti
->flags
) & _TIF_WORK_SYSCALL_ENTRY
) {
319 * Subtlety here: if ptrace pokes something larger than
320 * 2^32-1 into orig_ax, this truncates it. This may or
321 * may not be necessary, but it matches the old asm
324 nr
= syscall_trace_enter(regs
);
327 if (likely(nr
< IA32_NR_syscalls
)) {
328 nr
= array_index_nospec(nr
, IA32_NR_syscalls
);
330 * It's possible that a 32-bit syscall implementation
331 * takes a 64-bit parameter but nonetheless assumes that
332 * the high bits are zero. Make sure we zero-extend all
335 regs
->ax
= ia32_sys_call_table
[nr
](
336 (unsigned int)regs
->bx
, (unsigned int)regs
->cx
,
337 (unsigned int)regs
->dx
, (unsigned int)regs
->si
,
338 (unsigned int)regs
->di
, (unsigned int)regs
->bp
);
341 syscall_return_slowpath(regs
);
344 /* Handles int $0x80 */
345 __visible
void do_int80_syscall_32(struct pt_regs
*regs
)
347 enter_from_user_mode();
349 do_syscall_32_irqs_on(regs
);
352 /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
353 __visible
long do_fast_syscall_32(struct pt_regs
*regs
)
356 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
357 * convention. Adjust regs so it looks like we entered using int80.
360 unsigned long landing_pad
= (unsigned long)current
->mm
->context
.vdso
+
361 vdso_image_32
.sym_int80_landing_pad
;
364 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
365 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
368 regs
->ip
= landing_pad
;
370 enter_from_user_mode();
374 /* Fetch EBP from where the vDSO stashed it. */
378 * Micro-optimization: the pointer we're following is explicitly
379 * 32 bits, so it can't be out of range.
381 __get_user(*(u32
*)®s
->bp
,
382 (u32 __user __force
*)(unsigned long)(u32
)regs
->sp
)
384 get_user(*(u32
*)®s
->bp
,
385 (u32 __user __force
*)(unsigned long)(u32
)regs
->sp
)
389 /* User code screwed up. */
392 prepare_exit_to_usermode(regs
);
393 return 0; /* Keep it simple: use IRET. */
396 /* Now this is just like a normal syscall. */
397 do_syscall_32_irqs_on(regs
);
401 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
402 * SYSRETL is available on all 64-bit CPUs, so we don't need to
403 * bother with SYSEXIT.
405 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
406 * because the ECX fixup above will ensure that this is essentially
409 return regs
->cs
== __USER32_CS
&& regs
->ss
== __USER_DS
&&
410 regs
->ip
== landing_pad
&&
411 (regs
->flags
& (X86_EFLAGS_RF
| X86_EFLAGS_TF
)) == 0;
414 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
416 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
417 * because the ECX fixup above will ensure that this is essentially
420 * We don't allow syscalls at all from VM86 mode, but we still
421 * need to check VM, because we might be returning from sys_vm86.
423 return static_cpu_has(X86_FEATURE_SEP
) &&
424 regs
->cs
== __USER_CS
&& regs
->ss
== __USER_DS
&&
425 regs
->ip
== landing_pad
&&
426 (regs
->flags
& (X86_EFLAGS_RF
| X86_EFLAGS_TF
| X86_EFLAGS_VM
)) == 0;