Merge tag 'driver-core-5.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / kernel / seccomp.c
blobd7f538847b8413eca4c91112f26a563623a59904
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/kernel/seccomp.c
5 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
7 * Copyright (C) 2012 Google, Inc.
8 * Will Drewry <wad@chromium.org>
10 * This defines a simple but solid secure-computing facility.
12 * Mode 1 uses a fixed list of allowed system calls.
13 * Mode 2 allows user-defined system call filters in the form
14 * of Berkeley Packet Filters/Linux Socket Filters.
17 #include <linux/refcount.h>
18 #include <linux/audit.h>
19 #include <linux/compat.h>
20 #include <linux/coredump.h>
21 #include <linux/kmemleak.h>
22 #include <linux/nospec.h>
23 #include <linux/prctl.h>
24 #include <linux/sched.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/seccomp.h>
27 #include <linux/slab.h>
28 #include <linux/syscalls.h>
29 #include <linux/sysctl.h>
31 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
32 #include <asm/syscall.h>
33 #endif
35 #ifdef CONFIG_SECCOMP_FILTER
36 #include <linux/file.h>
37 #include <linux/filter.h>
38 #include <linux/pid.h>
39 #include <linux/ptrace.h>
40 #include <linux/security.h>
41 #include <linux/tracehook.h>
42 #include <linux/uaccess.h>
43 #include <linux/anon_inodes.h>
45 enum notify_state {
46 SECCOMP_NOTIFY_INIT,
47 SECCOMP_NOTIFY_SENT,
48 SECCOMP_NOTIFY_REPLIED,
51 struct seccomp_knotif {
52 /* The struct pid of the task whose filter triggered the notification */
53 struct task_struct *task;
55 /* The "cookie" for this request; this is unique for this filter. */
56 u64 id;
59 * The seccomp data. This pointer is valid the entire time this
60 * notification is active, since it comes from __seccomp_filter which
61 * eclipses the entire lifecycle here.
63 const struct seccomp_data *data;
66 * Notification states. When SECCOMP_RET_USER_NOTIF is returned, a
67 * struct seccomp_knotif is created and starts out in INIT. Once the
68 * handler reads the notification off of an FD, it transitions to SENT.
69 * If a signal is received the state transitions back to INIT and
70 * another message is sent. When the userspace handler replies, state
71 * transitions to REPLIED.
73 enum notify_state state;
75 /* The return values, only valid when in SECCOMP_NOTIFY_REPLIED */
76 int error;
77 long val;
79 /* Signals when this has entered SECCOMP_NOTIFY_REPLIED */
80 struct completion ready;
82 struct list_head list;
85 /**
86 * struct notification - container for seccomp userspace notifications. Since
87 * most seccomp filters will not have notification listeners attached and this
88 * structure is fairly large, we store the notification-specific stuff in a
89 * separate structure.
91 * @request: A semaphore that users of this notification can wait on for
92 * changes. Actual reads and writes are still controlled with
93 * filter->notify_lock.
94 * @next_id: The id of the next request.
95 * @notifications: A list of struct seccomp_knotif elements.
96 * @wqh: A wait queue for poll.
98 struct notification {
99 struct semaphore request;
100 u64 next_id;
101 struct list_head notifications;
102 wait_queue_head_t wqh;
106 * struct seccomp_filter - container for seccomp BPF programs
108 * @usage: reference count to manage the object lifetime.
109 * get/put helpers should be used when accessing an instance
110 * outside of a lifetime-guarded section. In general, this
111 * is only needed for handling filters shared across tasks.
112 * @log: true if all actions except for SECCOMP_RET_ALLOW should be logged
113 * @prev: points to a previously installed, or inherited, filter
114 * @prog: the BPF program to evaluate
115 * @notif: the struct that holds all notification related information
116 * @notify_lock: A lock for all notification-related accesses.
118 * seccomp_filter objects are organized in a tree linked via the @prev
119 * pointer. For any task, it appears to be a singly-linked list starting
120 * with current->seccomp.filter, the most recently attached or inherited filter.
121 * However, multiple filters may share a @prev node, by way of fork(), which
122 * results in a unidirectional tree existing in memory. This is similar to
123 * how namespaces work.
125 * seccomp_filter objects should never be modified after being attached
126 * to a task_struct (other than @usage).
128 struct seccomp_filter {
129 refcount_t usage;
130 bool log;
131 struct seccomp_filter *prev;
132 struct bpf_prog *prog;
133 struct notification *notif;
134 struct mutex notify_lock;
137 /* Limit any path through the tree to 256KB worth of instructions. */
138 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
141 * Endianness is explicitly ignored and left for BPF program authors to manage
142 * as per the specific architecture.
144 static void populate_seccomp_data(struct seccomp_data *sd)
146 struct task_struct *task = current;
147 struct pt_regs *regs = task_pt_regs(task);
148 unsigned long args[6];
150 sd->nr = syscall_get_nr(task, regs);
151 sd->arch = syscall_get_arch();
152 syscall_get_arguments(task, regs, 0, 6, args);
153 sd->args[0] = args[0];
154 sd->args[1] = args[1];
155 sd->args[2] = args[2];
156 sd->args[3] = args[3];
157 sd->args[4] = args[4];
158 sd->args[5] = args[5];
159 sd->instruction_pointer = KSTK_EIP(task);
163 * seccomp_check_filter - verify seccomp filter code
164 * @filter: filter to verify
165 * @flen: length of filter
167 * Takes a previously checked filter (by bpf_check_classic) and
168 * redirects all filter code that loads struct sk_buff data
169 * and related data through seccomp_bpf_load. It also
170 * enforces length and alignment checking of those loads.
172 * Returns 0 if the rule set is legal or -EINVAL if not.
174 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
176 int pc;
177 for (pc = 0; pc < flen; pc++) {
178 struct sock_filter *ftest = &filter[pc];
179 u16 code = ftest->code;
180 u32 k = ftest->k;
182 switch (code) {
183 case BPF_LD | BPF_W | BPF_ABS:
184 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
185 /* 32-bit aligned and not out of bounds. */
186 if (k >= sizeof(struct seccomp_data) || k & 3)
187 return -EINVAL;
188 continue;
189 case BPF_LD | BPF_W | BPF_LEN:
190 ftest->code = BPF_LD | BPF_IMM;
191 ftest->k = sizeof(struct seccomp_data);
192 continue;
193 case BPF_LDX | BPF_W | BPF_LEN:
194 ftest->code = BPF_LDX | BPF_IMM;
195 ftest->k = sizeof(struct seccomp_data);
196 continue;
197 /* Explicitly include allowed calls. */
198 case BPF_RET | BPF_K:
199 case BPF_RET | BPF_A:
200 case BPF_ALU | BPF_ADD | BPF_K:
201 case BPF_ALU | BPF_ADD | BPF_X:
202 case BPF_ALU | BPF_SUB | BPF_K:
203 case BPF_ALU | BPF_SUB | BPF_X:
204 case BPF_ALU | BPF_MUL | BPF_K:
205 case BPF_ALU | BPF_MUL | BPF_X:
206 case BPF_ALU | BPF_DIV | BPF_K:
207 case BPF_ALU | BPF_DIV | BPF_X:
208 case BPF_ALU | BPF_AND | BPF_K:
209 case BPF_ALU | BPF_AND | BPF_X:
210 case BPF_ALU | BPF_OR | BPF_K:
211 case BPF_ALU | BPF_OR | BPF_X:
212 case BPF_ALU | BPF_XOR | BPF_K:
213 case BPF_ALU | BPF_XOR | BPF_X:
214 case BPF_ALU | BPF_LSH | BPF_K:
215 case BPF_ALU | BPF_LSH | BPF_X:
216 case BPF_ALU | BPF_RSH | BPF_K:
217 case BPF_ALU | BPF_RSH | BPF_X:
218 case BPF_ALU | BPF_NEG:
219 case BPF_LD | BPF_IMM:
220 case BPF_LDX | BPF_IMM:
221 case BPF_MISC | BPF_TAX:
222 case BPF_MISC | BPF_TXA:
223 case BPF_LD | BPF_MEM:
224 case BPF_LDX | BPF_MEM:
225 case BPF_ST:
226 case BPF_STX:
227 case BPF_JMP | BPF_JA:
228 case BPF_JMP | BPF_JEQ | BPF_K:
229 case BPF_JMP | BPF_JEQ | BPF_X:
230 case BPF_JMP | BPF_JGE | BPF_K:
231 case BPF_JMP | BPF_JGE | BPF_X:
232 case BPF_JMP | BPF_JGT | BPF_K:
233 case BPF_JMP | BPF_JGT | BPF_X:
234 case BPF_JMP | BPF_JSET | BPF_K:
235 case BPF_JMP | BPF_JSET | BPF_X:
236 continue;
237 default:
238 return -EINVAL;
241 return 0;
245 * seccomp_run_filters - evaluates all seccomp filters against @sd
246 * @sd: optional seccomp data to be passed to filters
247 * @match: stores struct seccomp_filter that resulted in the return value,
248 * unless filter returned SECCOMP_RET_ALLOW, in which case it will
249 * be unchanged.
251 * Returns valid seccomp BPF response codes.
253 #define ACTION_ONLY(ret) ((s32)((ret) & (SECCOMP_RET_ACTION_FULL)))
254 static u32 seccomp_run_filters(const struct seccomp_data *sd,
255 struct seccomp_filter **match)
257 u32 ret = SECCOMP_RET_ALLOW;
258 /* Make sure cross-thread synced filter points somewhere sane. */
259 struct seccomp_filter *f =
260 READ_ONCE(current->seccomp.filter);
262 /* Ensure unexpected behavior doesn't result in failing open. */
263 if (WARN_ON(f == NULL))
264 return SECCOMP_RET_KILL_PROCESS;
267 * All filters in the list are evaluated and the lowest BPF return
268 * value always takes priority (ignoring the DATA).
270 for (; f; f = f->prev) {
271 u32 cur_ret = BPF_PROG_RUN(f->prog, sd);
273 if (ACTION_ONLY(cur_ret) < ACTION_ONLY(ret)) {
274 ret = cur_ret;
275 *match = f;
278 return ret;
280 #endif /* CONFIG_SECCOMP_FILTER */
282 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
284 assert_spin_locked(&current->sighand->siglock);
286 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
287 return false;
289 return true;
292 void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
294 static inline void seccomp_assign_mode(struct task_struct *task,
295 unsigned long seccomp_mode,
296 unsigned long flags)
298 assert_spin_locked(&task->sighand->siglock);
300 task->seccomp.mode = seccomp_mode;
302 * Make sure TIF_SECCOMP cannot be set before the mode (and
303 * filter) is set.
305 smp_mb__before_atomic();
306 /* Assume default seccomp processes want spec flaw mitigation. */
307 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
308 arch_seccomp_spec_mitigate(task);
309 set_tsk_thread_flag(task, TIF_SECCOMP);
312 #ifdef CONFIG_SECCOMP_FILTER
313 /* Returns 1 if the parent is an ancestor of the child. */
314 static int is_ancestor(struct seccomp_filter *parent,
315 struct seccomp_filter *child)
317 /* NULL is the root ancestor. */
318 if (parent == NULL)
319 return 1;
320 for (; child; child = child->prev)
321 if (child == parent)
322 return 1;
323 return 0;
327 * seccomp_can_sync_threads: checks if all threads can be synchronized
329 * Expects sighand and cred_guard_mutex locks to be held.
331 * Returns 0 on success, -ve on error, or the pid of a thread which was
332 * either not in the correct seccomp mode or it did not have an ancestral
333 * seccomp filter.
335 static inline pid_t seccomp_can_sync_threads(void)
337 struct task_struct *thread, *caller;
339 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
340 assert_spin_locked(&current->sighand->siglock);
342 /* Validate all threads being eligible for synchronization. */
343 caller = current;
344 for_each_thread(caller, thread) {
345 pid_t failed;
347 /* Skip current, since it is initiating the sync. */
348 if (thread == caller)
349 continue;
351 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
352 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
353 is_ancestor(thread->seccomp.filter,
354 caller->seccomp.filter)))
355 continue;
357 /* Return the first thread that cannot be synchronized. */
358 failed = task_pid_vnr(thread);
359 /* If the pid cannot be resolved, then return -ESRCH */
360 if (WARN_ON(failed == 0))
361 failed = -ESRCH;
362 return failed;
365 return 0;
369 * seccomp_sync_threads: sets all threads to use current's filter
371 * Expects sighand and cred_guard_mutex locks to be held, and for
372 * seccomp_can_sync_threads() to have returned success already
373 * without dropping the locks.
376 static inline void seccomp_sync_threads(unsigned long flags)
378 struct task_struct *thread, *caller;
380 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
381 assert_spin_locked(&current->sighand->siglock);
383 /* Synchronize all threads. */
384 caller = current;
385 for_each_thread(caller, thread) {
386 /* Skip current, since it needs no changes. */
387 if (thread == caller)
388 continue;
390 /* Get a task reference for the new leaf node. */
391 get_seccomp_filter(caller);
393 * Drop the task reference to the shared ancestor since
394 * current's path will hold a reference. (This also
395 * allows a put before the assignment.)
397 put_seccomp_filter(thread);
398 smp_store_release(&thread->seccomp.filter,
399 caller->seccomp.filter);
402 * Don't let an unprivileged task work around
403 * the no_new_privs restriction by creating
404 * a thread that sets it up, enters seccomp,
405 * then dies.
407 if (task_no_new_privs(caller))
408 task_set_no_new_privs(thread);
411 * Opt the other thread into seccomp if needed.
412 * As threads are considered to be trust-realm
413 * equivalent (see ptrace_may_access), it is safe to
414 * allow one thread to transition the other.
416 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
417 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
418 flags);
423 * seccomp_prepare_filter: Prepares a seccomp filter for use.
424 * @fprog: BPF program to install
426 * Returns filter on success or an ERR_PTR on failure.
428 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
430 struct seccomp_filter *sfilter;
431 int ret;
432 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
434 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
435 return ERR_PTR(-EINVAL);
437 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
440 * Installing a seccomp filter requires that the task has
441 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
442 * This avoids scenarios where unprivileged tasks can affect the
443 * behavior of privileged children.
445 if (!task_no_new_privs(current) &&
446 security_capable_noaudit(current_cred(), current_user_ns(),
447 CAP_SYS_ADMIN) != 0)
448 return ERR_PTR(-EACCES);
450 /* Allocate a new seccomp_filter */
451 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
452 if (!sfilter)
453 return ERR_PTR(-ENOMEM);
455 mutex_init(&sfilter->notify_lock);
456 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
457 seccomp_check_filter, save_orig);
458 if (ret < 0) {
459 kfree(sfilter);
460 return ERR_PTR(ret);
463 refcount_set(&sfilter->usage, 1);
465 return sfilter;
469 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
470 * @user_filter: pointer to the user data containing a sock_fprog.
472 * Returns 0 on success and non-zero otherwise.
474 static struct seccomp_filter *
475 seccomp_prepare_user_filter(const char __user *user_filter)
477 struct sock_fprog fprog;
478 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
480 #ifdef CONFIG_COMPAT
481 if (in_compat_syscall()) {
482 struct compat_sock_fprog fprog32;
483 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
484 goto out;
485 fprog.len = fprog32.len;
486 fprog.filter = compat_ptr(fprog32.filter);
487 } else /* falls through to the if below. */
488 #endif
489 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
490 goto out;
491 filter = seccomp_prepare_filter(&fprog);
492 out:
493 return filter;
497 * seccomp_attach_filter: validate and attach filter
498 * @flags: flags to change filter behavior
499 * @filter: seccomp filter to add to the current process
501 * Caller must be holding current->sighand->siglock lock.
503 * Returns 0 on success, -ve on error.
505 static long seccomp_attach_filter(unsigned int flags,
506 struct seccomp_filter *filter)
508 unsigned long total_insns;
509 struct seccomp_filter *walker;
511 assert_spin_locked(&current->sighand->siglock);
513 /* Validate resulting filter length. */
514 total_insns = filter->prog->len;
515 for (walker = current->seccomp.filter; walker; walker = walker->prev)
516 total_insns += walker->prog->len + 4; /* 4 instr penalty */
517 if (total_insns > MAX_INSNS_PER_PATH)
518 return -ENOMEM;
520 /* If thread sync has been requested, check that it is possible. */
521 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
522 int ret;
524 ret = seccomp_can_sync_threads();
525 if (ret)
526 return ret;
529 /* Set log flag, if present. */
530 if (flags & SECCOMP_FILTER_FLAG_LOG)
531 filter->log = true;
534 * If there is an existing filter, make it the prev and don't drop its
535 * task reference.
537 filter->prev = current->seccomp.filter;
538 current->seccomp.filter = filter;
540 /* Now that the new filter is in place, synchronize to all threads. */
541 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
542 seccomp_sync_threads(flags);
544 return 0;
547 static void __get_seccomp_filter(struct seccomp_filter *filter)
549 refcount_inc(&filter->usage);
552 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
553 void get_seccomp_filter(struct task_struct *tsk)
555 struct seccomp_filter *orig = tsk->seccomp.filter;
556 if (!orig)
557 return;
558 __get_seccomp_filter(orig);
561 static inline void seccomp_filter_free(struct seccomp_filter *filter)
563 if (filter) {
564 bpf_prog_destroy(filter->prog);
565 kfree(filter);
569 static void __put_seccomp_filter(struct seccomp_filter *orig)
571 /* Clean up single-reference branches iteratively. */
572 while (orig && refcount_dec_and_test(&orig->usage)) {
573 struct seccomp_filter *freeme = orig;
574 orig = orig->prev;
575 seccomp_filter_free(freeme);
579 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
580 void put_seccomp_filter(struct task_struct *tsk)
582 __put_seccomp_filter(tsk->seccomp.filter);
585 static void seccomp_init_siginfo(kernel_siginfo_t *info, int syscall, int reason)
587 clear_siginfo(info);
588 info->si_signo = SIGSYS;
589 info->si_code = SYS_SECCOMP;
590 info->si_call_addr = (void __user *)KSTK_EIP(current);
591 info->si_errno = reason;
592 info->si_arch = syscall_get_arch();
593 info->si_syscall = syscall;
597 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
598 * @syscall: syscall number to send to userland
599 * @reason: filter-supplied reason code to send to userland (via si_errno)
601 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
603 static void seccomp_send_sigsys(int syscall, int reason)
605 struct kernel_siginfo info;
606 seccomp_init_siginfo(&info, syscall, reason);
607 force_sig_info(SIGSYS, &info, current);
609 #endif /* CONFIG_SECCOMP_FILTER */
611 /* For use with seccomp_actions_logged */
612 #define SECCOMP_LOG_KILL_PROCESS (1 << 0)
613 #define SECCOMP_LOG_KILL_THREAD (1 << 1)
614 #define SECCOMP_LOG_TRAP (1 << 2)
615 #define SECCOMP_LOG_ERRNO (1 << 3)
616 #define SECCOMP_LOG_TRACE (1 << 4)
617 #define SECCOMP_LOG_LOG (1 << 5)
618 #define SECCOMP_LOG_ALLOW (1 << 6)
619 #define SECCOMP_LOG_USER_NOTIF (1 << 7)
621 static u32 seccomp_actions_logged = SECCOMP_LOG_KILL_PROCESS |
622 SECCOMP_LOG_KILL_THREAD |
623 SECCOMP_LOG_TRAP |
624 SECCOMP_LOG_ERRNO |
625 SECCOMP_LOG_USER_NOTIF |
626 SECCOMP_LOG_TRACE |
627 SECCOMP_LOG_LOG;
629 static inline void seccomp_log(unsigned long syscall, long signr, u32 action,
630 bool requested)
632 bool log = false;
634 switch (action) {
635 case SECCOMP_RET_ALLOW:
636 break;
637 case SECCOMP_RET_TRAP:
638 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRAP;
639 break;
640 case SECCOMP_RET_ERRNO:
641 log = requested && seccomp_actions_logged & SECCOMP_LOG_ERRNO;
642 break;
643 case SECCOMP_RET_TRACE:
644 log = requested && seccomp_actions_logged & SECCOMP_LOG_TRACE;
645 break;
646 case SECCOMP_RET_USER_NOTIF:
647 log = requested && seccomp_actions_logged & SECCOMP_LOG_USER_NOTIF;
648 break;
649 case SECCOMP_RET_LOG:
650 log = seccomp_actions_logged & SECCOMP_LOG_LOG;
651 break;
652 case SECCOMP_RET_KILL_THREAD:
653 log = seccomp_actions_logged & SECCOMP_LOG_KILL_THREAD;
654 break;
655 case SECCOMP_RET_KILL_PROCESS:
656 default:
657 log = seccomp_actions_logged & SECCOMP_LOG_KILL_PROCESS;
661 * Emit an audit message when the action is RET_KILL_*, RET_LOG, or the
662 * FILTER_FLAG_LOG bit was set. The admin has the ability to silence
663 * any action from being logged by removing the action name from the
664 * seccomp_actions_logged sysctl.
666 if (!log)
667 return;
669 audit_seccomp(syscall, signr, action);
673 * Secure computing mode 1 allows only read/write/exit/sigreturn.
674 * To be fully secure this must be combined with rlimit
675 * to limit the stack allocations too.
677 static const int mode1_syscalls[] = {
678 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
679 0, /* null terminated */
682 static void __secure_computing_strict(int this_syscall)
684 const int *syscall_whitelist = mode1_syscalls;
685 #ifdef CONFIG_COMPAT
686 if (in_compat_syscall())
687 syscall_whitelist = get_compat_mode1_syscalls();
688 #endif
689 do {
690 if (*syscall_whitelist == this_syscall)
691 return;
692 } while (*++syscall_whitelist);
694 #ifdef SECCOMP_DEBUG
695 dump_stack();
696 #endif
697 seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
698 do_exit(SIGKILL);
701 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
702 void secure_computing_strict(int this_syscall)
704 int mode = current->seccomp.mode;
706 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
707 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
708 return;
710 if (mode == SECCOMP_MODE_DISABLED)
711 return;
712 else if (mode == SECCOMP_MODE_STRICT)
713 __secure_computing_strict(this_syscall);
714 else
715 BUG();
717 #else
719 #ifdef CONFIG_SECCOMP_FILTER
720 static u64 seccomp_next_notify_id(struct seccomp_filter *filter)
723 * Note: overflow is ok here, the id just needs to be unique per
724 * filter.
726 lockdep_assert_held(&filter->notify_lock);
727 return filter->notif->next_id++;
730 static void seccomp_do_user_notification(int this_syscall,
731 struct seccomp_filter *match,
732 const struct seccomp_data *sd)
734 int err;
735 long ret = 0;
736 struct seccomp_knotif n = {};
738 mutex_lock(&match->notify_lock);
739 err = -ENOSYS;
740 if (!match->notif)
741 goto out;
743 n.task = current;
744 n.state = SECCOMP_NOTIFY_INIT;
745 n.data = sd;
746 n.id = seccomp_next_notify_id(match);
747 init_completion(&n.ready);
748 list_add(&n.list, &match->notif->notifications);
750 up(&match->notif->request);
751 wake_up_poll(&match->notif->wqh, EPOLLIN | EPOLLRDNORM);
752 mutex_unlock(&match->notify_lock);
755 * This is where we wait for a reply from userspace.
757 err = wait_for_completion_interruptible(&n.ready);
758 mutex_lock(&match->notify_lock);
759 if (err == 0) {
760 ret = n.val;
761 err = n.error;
765 * Note that it's possible the listener died in between the time when
766 * we were notified of a respons (or a signal) and when we were able to
767 * re-acquire the lock, so only delete from the list if the
768 * notification actually exists.
770 * Also note that this test is only valid because there's no way to
771 * *reattach* to a notifier right now. If one is added, we'll need to
772 * keep track of the notif itself and make sure they match here.
774 if (match->notif)
775 list_del(&n.list);
776 out:
777 mutex_unlock(&match->notify_lock);
778 syscall_set_return_value(current, task_pt_regs(current),
779 err, ret);
782 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
783 const bool recheck_after_trace)
785 u32 filter_ret, action;
786 struct seccomp_filter *match = NULL;
787 int data;
788 struct seccomp_data sd_local;
791 * Make sure that any changes to mode from another thread have
792 * been seen after TIF_SECCOMP was seen.
794 rmb();
796 if (!sd) {
797 populate_seccomp_data(&sd_local);
798 sd = &sd_local;
801 filter_ret = seccomp_run_filters(sd, &match);
802 data = filter_ret & SECCOMP_RET_DATA;
803 action = filter_ret & SECCOMP_RET_ACTION_FULL;
805 switch (action) {
806 case SECCOMP_RET_ERRNO:
807 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
808 if (data > MAX_ERRNO)
809 data = MAX_ERRNO;
810 syscall_set_return_value(current, task_pt_regs(current),
811 -data, 0);
812 goto skip;
814 case SECCOMP_RET_TRAP:
815 /* Show the handler the original registers. */
816 syscall_rollback(current, task_pt_regs(current));
817 /* Let the filter pass back 16 bits of data. */
818 seccomp_send_sigsys(this_syscall, data);
819 goto skip;
821 case SECCOMP_RET_TRACE:
822 /* We've been put in this state by the ptracer already. */
823 if (recheck_after_trace)
824 return 0;
826 /* ENOSYS these calls if there is no tracer attached. */
827 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
828 syscall_set_return_value(current,
829 task_pt_regs(current),
830 -ENOSYS, 0);
831 goto skip;
834 /* Allow the BPF to provide the event message */
835 ptrace_event(PTRACE_EVENT_SECCOMP, data);
837 * The delivery of a fatal signal during event
838 * notification may silently skip tracer notification,
839 * which could leave us with a potentially unmodified
840 * syscall that the tracer would have liked to have
841 * changed. Since the process is about to die, we just
842 * force the syscall to be skipped and let the signal
843 * kill the process and correctly handle any tracer exit
844 * notifications.
846 if (fatal_signal_pending(current))
847 goto skip;
848 /* Check if the tracer forced the syscall to be skipped. */
849 this_syscall = syscall_get_nr(current, task_pt_regs(current));
850 if (this_syscall < 0)
851 goto skip;
854 * Recheck the syscall, since it may have changed. This
855 * intentionally uses a NULL struct seccomp_data to force
856 * a reload of all registers. This does not goto skip since
857 * a skip would have already been reported.
859 if (__seccomp_filter(this_syscall, NULL, true))
860 return -1;
862 return 0;
864 case SECCOMP_RET_USER_NOTIF:
865 seccomp_do_user_notification(this_syscall, match, sd);
866 goto skip;
868 case SECCOMP_RET_LOG:
869 seccomp_log(this_syscall, 0, action, true);
870 return 0;
872 case SECCOMP_RET_ALLOW:
874 * Note that the "match" filter will always be NULL for
875 * this action since SECCOMP_RET_ALLOW is the starting
876 * state in seccomp_run_filters().
878 return 0;
880 case SECCOMP_RET_KILL_THREAD:
881 case SECCOMP_RET_KILL_PROCESS:
882 default:
883 seccomp_log(this_syscall, SIGSYS, action, true);
884 /* Dump core only if this is the last remaining thread. */
885 if (action == SECCOMP_RET_KILL_PROCESS ||
886 get_nr_threads(current) == 1) {
887 kernel_siginfo_t info;
889 /* Show the original registers in the dump. */
890 syscall_rollback(current, task_pt_regs(current));
891 /* Trigger a manual coredump since do_exit skips it. */
892 seccomp_init_siginfo(&info, this_syscall, data);
893 do_coredump(&info);
895 if (action == SECCOMP_RET_KILL_PROCESS)
896 do_group_exit(SIGSYS);
897 else
898 do_exit(SIGSYS);
901 unreachable();
903 skip:
904 seccomp_log(this_syscall, 0, action, match ? match->log : false);
905 return -1;
907 #else
908 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
909 const bool recheck_after_trace)
911 BUG();
913 #endif
915 int __secure_computing(const struct seccomp_data *sd)
917 int mode = current->seccomp.mode;
918 int this_syscall;
920 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
921 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
922 return 0;
924 this_syscall = sd ? sd->nr :
925 syscall_get_nr(current, task_pt_regs(current));
927 switch (mode) {
928 case SECCOMP_MODE_STRICT:
929 __secure_computing_strict(this_syscall); /* may call do_exit */
930 return 0;
931 case SECCOMP_MODE_FILTER:
932 return __seccomp_filter(this_syscall, sd, false);
933 default:
934 BUG();
937 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
939 long prctl_get_seccomp(void)
941 return current->seccomp.mode;
945 * seccomp_set_mode_strict: internal function for setting strict seccomp
947 * Once current->seccomp.mode is non-zero, it may not be changed.
949 * Returns 0 on success or -EINVAL on failure.
951 static long seccomp_set_mode_strict(void)
953 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
954 long ret = -EINVAL;
956 spin_lock_irq(&current->sighand->siglock);
958 if (!seccomp_may_assign_mode(seccomp_mode))
959 goto out;
961 #ifdef TIF_NOTSC
962 disable_TSC();
963 #endif
964 seccomp_assign_mode(current, seccomp_mode, 0);
965 ret = 0;
967 out:
968 spin_unlock_irq(&current->sighand->siglock);
970 return ret;
973 #ifdef CONFIG_SECCOMP_FILTER
974 static int seccomp_notify_release(struct inode *inode, struct file *file)
976 struct seccomp_filter *filter = file->private_data;
977 struct seccomp_knotif *knotif;
979 mutex_lock(&filter->notify_lock);
982 * If this file is being closed because e.g. the task who owned it
983 * died, let's wake everyone up who was waiting on us.
985 list_for_each_entry(knotif, &filter->notif->notifications, list) {
986 if (knotif->state == SECCOMP_NOTIFY_REPLIED)
987 continue;
989 knotif->state = SECCOMP_NOTIFY_REPLIED;
990 knotif->error = -ENOSYS;
991 knotif->val = 0;
993 complete(&knotif->ready);
996 kfree(filter->notif);
997 filter->notif = NULL;
998 mutex_unlock(&filter->notify_lock);
999 __put_seccomp_filter(filter);
1000 return 0;
1003 static long seccomp_notify_recv(struct seccomp_filter *filter,
1004 void __user *buf)
1006 struct seccomp_knotif *knotif = NULL, *cur;
1007 struct seccomp_notif unotif;
1008 ssize_t ret;
1010 memset(&unotif, 0, sizeof(unotif));
1012 ret = down_interruptible(&filter->notif->request);
1013 if (ret < 0)
1014 return ret;
1016 mutex_lock(&filter->notify_lock);
1017 list_for_each_entry(cur, &filter->notif->notifications, list) {
1018 if (cur->state == SECCOMP_NOTIFY_INIT) {
1019 knotif = cur;
1020 break;
1025 * If we didn't find a notification, it could be that the task was
1026 * interrupted by a fatal signal between the time we were woken and
1027 * when we were able to acquire the rw lock.
1029 if (!knotif) {
1030 ret = -ENOENT;
1031 goto out;
1034 unotif.id = knotif->id;
1035 unotif.pid = task_pid_vnr(knotif->task);
1036 unotif.data = *(knotif->data);
1038 knotif->state = SECCOMP_NOTIFY_SENT;
1039 wake_up_poll(&filter->notif->wqh, EPOLLOUT | EPOLLWRNORM);
1040 ret = 0;
1041 out:
1042 mutex_unlock(&filter->notify_lock);
1044 if (ret == 0 && copy_to_user(buf, &unotif, sizeof(unotif))) {
1045 ret = -EFAULT;
1048 * Userspace screwed up. To make sure that we keep this
1049 * notification alive, let's reset it back to INIT. It
1050 * may have died when we released the lock, so we need to make
1051 * sure it's still around.
1053 knotif = NULL;
1054 mutex_lock(&filter->notify_lock);
1055 list_for_each_entry(cur, &filter->notif->notifications, list) {
1056 if (cur->id == unotif.id) {
1057 knotif = cur;
1058 break;
1062 if (knotif) {
1063 knotif->state = SECCOMP_NOTIFY_INIT;
1064 up(&filter->notif->request);
1066 mutex_unlock(&filter->notify_lock);
1069 return ret;
1072 static long seccomp_notify_send(struct seccomp_filter *filter,
1073 void __user *buf)
1075 struct seccomp_notif_resp resp = {};
1076 struct seccomp_knotif *knotif = NULL, *cur;
1077 long ret;
1079 if (copy_from_user(&resp, buf, sizeof(resp)))
1080 return -EFAULT;
1082 if (resp.flags)
1083 return -EINVAL;
1085 ret = mutex_lock_interruptible(&filter->notify_lock);
1086 if (ret < 0)
1087 return ret;
1089 list_for_each_entry(cur, &filter->notif->notifications, list) {
1090 if (cur->id == resp.id) {
1091 knotif = cur;
1092 break;
1096 if (!knotif) {
1097 ret = -ENOENT;
1098 goto out;
1101 /* Allow exactly one reply. */
1102 if (knotif->state != SECCOMP_NOTIFY_SENT) {
1103 ret = -EINPROGRESS;
1104 goto out;
1107 ret = 0;
1108 knotif->state = SECCOMP_NOTIFY_REPLIED;
1109 knotif->error = resp.error;
1110 knotif->val = resp.val;
1111 complete(&knotif->ready);
1112 out:
1113 mutex_unlock(&filter->notify_lock);
1114 return ret;
1117 static long seccomp_notify_id_valid(struct seccomp_filter *filter,
1118 void __user *buf)
1120 struct seccomp_knotif *knotif = NULL;
1121 u64 id;
1122 long ret;
1124 if (copy_from_user(&id, buf, sizeof(id)))
1125 return -EFAULT;
1127 ret = mutex_lock_interruptible(&filter->notify_lock);
1128 if (ret < 0)
1129 return ret;
1131 ret = -ENOENT;
1132 list_for_each_entry(knotif, &filter->notif->notifications, list) {
1133 if (knotif->id == id) {
1134 if (knotif->state == SECCOMP_NOTIFY_SENT)
1135 ret = 0;
1136 goto out;
1140 out:
1141 mutex_unlock(&filter->notify_lock);
1142 return ret;
1145 static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
1146 unsigned long arg)
1148 struct seccomp_filter *filter = file->private_data;
1149 void __user *buf = (void __user *)arg;
1151 switch (cmd) {
1152 case SECCOMP_IOCTL_NOTIF_RECV:
1153 return seccomp_notify_recv(filter, buf);
1154 case SECCOMP_IOCTL_NOTIF_SEND:
1155 return seccomp_notify_send(filter, buf);
1156 case SECCOMP_IOCTL_NOTIF_ID_VALID:
1157 return seccomp_notify_id_valid(filter, buf);
1158 default:
1159 return -EINVAL;
1163 static __poll_t seccomp_notify_poll(struct file *file,
1164 struct poll_table_struct *poll_tab)
1166 struct seccomp_filter *filter = file->private_data;
1167 __poll_t ret = 0;
1168 struct seccomp_knotif *cur;
1170 poll_wait(file, &filter->notif->wqh, poll_tab);
1172 if (mutex_lock_interruptible(&filter->notify_lock) < 0)
1173 return EPOLLERR;
1175 list_for_each_entry(cur, &filter->notif->notifications, list) {
1176 if (cur->state == SECCOMP_NOTIFY_INIT)
1177 ret |= EPOLLIN | EPOLLRDNORM;
1178 if (cur->state == SECCOMP_NOTIFY_SENT)
1179 ret |= EPOLLOUT | EPOLLWRNORM;
1180 if ((ret & EPOLLIN) && (ret & EPOLLOUT))
1181 break;
1184 mutex_unlock(&filter->notify_lock);
1186 return ret;
1189 static const struct file_operations seccomp_notify_ops = {
1190 .poll = seccomp_notify_poll,
1191 .release = seccomp_notify_release,
1192 .unlocked_ioctl = seccomp_notify_ioctl,
1195 static struct file *init_listener(struct seccomp_filter *filter)
1197 struct file *ret = ERR_PTR(-EBUSY);
1198 struct seccomp_filter *cur;
1200 for (cur = current->seccomp.filter; cur; cur = cur->prev) {
1201 if (cur->notif)
1202 goto out;
1205 ret = ERR_PTR(-ENOMEM);
1206 filter->notif = kzalloc(sizeof(*(filter->notif)), GFP_KERNEL);
1207 if (!filter->notif)
1208 goto out;
1210 sema_init(&filter->notif->request, 0);
1211 filter->notif->next_id = get_random_u64();
1212 INIT_LIST_HEAD(&filter->notif->notifications);
1213 init_waitqueue_head(&filter->notif->wqh);
1215 ret = anon_inode_getfile("seccomp notify", &seccomp_notify_ops,
1216 filter, O_RDWR);
1217 if (IS_ERR(ret))
1218 goto out_notif;
1220 /* The file has a reference to it now */
1221 __get_seccomp_filter(filter);
1223 out_notif:
1224 if (IS_ERR(ret))
1225 kfree(filter->notif);
1226 out:
1227 return ret;
1231 * seccomp_set_mode_filter: internal function for setting seccomp filter
1232 * @flags: flags to change filter behavior
1233 * @filter: struct sock_fprog containing filter
1235 * This function may be called repeatedly to install additional filters.
1236 * Every filter successfully installed will be evaluated (in reverse order)
1237 * for each system call the task makes.
1239 * Once current->seccomp.mode is non-zero, it may not be changed.
1241 * Returns 0 on success or -EINVAL on failure.
1243 static long seccomp_set_mode_filter(unsigned int flags,
1244 const char __user *filter)
1246 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
1247 struct seccomp_filter *prepared = NULL;
1248 long ret = -EINVAL;
1249 int listener = -1;
1250 struct file *listener_f = NULL;
1252 /* Validate flags. */
1253 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
1254 return -EINVAL;
1256 /* Prepare the new filter before holding any locks. */
1257 prepared = seccomp_prepare_user_filter(filter);
1258 if (IS_ERR(prepared))
1259 return PTR_ERR(prepared);
1261 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1262 listener = get_unused_fd_flags(O_CLOEXEC);
1263 if (listener < 0) {
1264 ret = listener;
1265 goto out_free;
1268 listener_f = init_listener(prepared);
1269 if (IS_ERR(listener_f)) {
1270 put_unused_fd(listener);
1271 ret = PTR_ERR(listener_f);
1272 goto out_free;
1277 * Make sure we cannot change seccomp or nnp state via TSYNC
1278 * while another thread is in the middle of calling exec.
1280 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
1281 mutex_lock_killable(&current->signal->cred_guard_mutex))
1282 goto out_put_fd;
1284 spin_lock_irq(&current->sighand->siglock);
1286 if (!seccomp_may_assign_mode(seccomp_mode))
1287 goto out;
1289 ret = seccomp_attach_filter(flags, prepared);
1290 if (ret)
1291 goto out;
1292 /* Do not free the successfully attached filter. */
1293 prepared = NULL;
1295 seccomp_assign_mode(current, seccomp_mode, flags);
1296 out:
1297 spin_unlock_irq(&current->sighand->siglock);
1298 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
1299 mutex_unlock(&current->signal->cred_guard_mutex);
1300 out_put_fd:
1301 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1302 if (ret < 0) {
1303 fput(listener_f);
1304 put_unused_fd(listener);
1305 } else {
1306 fd_install(listener, listener_f);
1307 ret = listener;
1310 out_free:
1311 seccomp_filter_free(prepared);
1312 return ret;
1314 #else
1315 static inline long seccomp_set_mode_filter(unsigned int flags,
1316 const char __user *filter)
1318 return -EINVAL;
1320 #endif
1322 static long seccomp_get_action_avail(const char __user *uaction)
1324 u32 action;
1326 if (copy_from_user(&action, uaction, sizeof(action)))
1327 return -EFAULT;
1329 switch (action) {
1330 case SECCOMP_RET_KILL_PROCESS:
1331 case SECCOMP_RET_KILL_THREAD:
1332 case SECCOMP_RET_TRAP:
1333 case SECCOMP_RET_ERRNO:
1334 case SECCOMP_RET_USER_NOTIF:
1335 case SECCOMP_RET_TRACE:
1336 case SECCOMP_RET_LOG:
1337 case SECCOMP_RET_ALLOW:
1338 break;
1339 default:
1340 return -EOPNOTSUPP;
1343 return 0;
1346 static long seccomp_get_notif_sizes(void __user *usizes)
1348 struct seccomp_notif_sizes sizes = {
1349 .seccomp_notif = sizeof(struct seccomp_notif),
1350 .seccomp_notif_resp = sizeof(struct seccomp_notif_resp),
1351 .seccomp_data = sizeof(struct seccomp_data),
1354 if (copy_to_user(usizes, &sizes, sizeof(sizes)))
1355 return -EFAULT;
1357 return 0;
1360 /* Common entry point for both prctl and syscall. */
1361 static long do_seccomp(unsigned int op, unsigned int flags,
1362 void __user *uargs)
1364 switch (op) {
1365 case SECCOMP_SET_MODE_STRICT:
1366 if (flags != 0 || uargs != NULL)
1367 return -EINVAL;
1368 return seccomp_set_mode_strict();
1369 case SECCOMP_SET_MODE_FILTER:
1370 return seccomp_set_mode_filter(flags, uargs);
1371 case SECCOMP_GET_ACTION_AVAIL:
1372 if (flags != 0)
1373 return -EINVAL;
1375 return seccomp_get_action_avail(uargs);
1376 case SECCOMP_GET_NOTIF_SIZES:
1377 if (flags != 0)
1378 return -EINVAL;
1380 return seccomp_get_notif_sizes(uargs);
1381 default:
1382 return -EINVAL;
1386 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
1387 void __user *, uargs)
1389 return do_seccomp(op, flags, uargs);
1393 * prctl_set_seccomp: configures current->seccomp.mode
1394 * @seccomp_mode: requested mode to use
1395 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
1397 * Returns 0 on success or -EINVAL on failure.
1399 long prctl_set_seccomp(unsigned long seccomp_mode, void __user *filter)
1401 unsigned int op;
1402 void __user *uargs;
1404 switch (seccomp_mode) {
1405 case SECCOMP_MODE_STRICT:
1406 op = SECCOMP_SET_MODE_STRICT;
1408 * Setting strict mode through prctl always ignored filter,
1409 * so make sure it is always NULL here to pass the internal
1410 * check in do_seccomp().
1412 uargs = NULL;
1413 break;
1414 case SECCOMP_MODE_FILTER:
1415 op = SECCOMP_SET_MODE_FILTER;
1416 uargs = filter;
1417 break;
1418 default:
1419 return -EINVAL;
1422 /* prctl interface doesn't have flags, so they are always zero. */
1423 return do_seccomp(op, 0, uargs);
1426 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
1427 static struct seccomp_filter *get_nth_filter(struct task_struct *task,
1428 unsigned long filter_off)
1430 struct seccomp_filter *orig, *filter;
1431 unsigned long count;
1434 * Note: this is only correct because the caller should be the (ptrace)
1435 * tracer of the task, otherwise lock_task_sighand is needed.
1437 spin_lock_irq(&task->sighand->siglock);
1439 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
1440 spin_unlock_irq(&task->sighand->siglock);
1441 return ERR_PTR(-EINVAL);
1444 orig = task->seccomp.filter;
1445 __get_seccomp_filter(orig);
1446 spin_unlock_irq(&task->sighand->siglock);
1448 count = 0;
1449 for (filter = orig; filter; filter = filter->prev)
1450 count++;
1452 if (filter_off >= count) {
1453 filter = ERR_PTR(-ENOENT);
1454 goto out;
1457 count -= filter_off;
1458 for (filter = orig; filter && count > 1; filter = filter->prev)
1459 count--;
1461 if (WARN_ON(count != 1 || !filter)) {
1462 filter = ERR_PTR(-ENOENT);
1463 goto out;
1466 __get_seccomp_filter(filter);
1468 out:
1469 __put_seccomp_filter(orig);
1470 return filter;
1473 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1474 void __user *data)
1476 struct seccomp_filter *filter;
1477 struct sock_fprog_kern *fprog;
1478 long ret;
1480 if (!capable(CAP_SYS_ADMIN) ||
1481 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1482 return -EACCES;
1485 filter = get_nth_filter(task, filter_off);
1486 if (IS_ERR(filter))
1487 return PTR_ERR(filter);
1489 fprog = filter->prog->orig_prog;
1490 if (!fprog) {
1491 /* This must be a new non-cBPF filter, since we save
1492 * every cBPF filter's orig_prog above when
1493 * CONFIG_CHECKPOINT_RESTORE is enabled.
1495 ret = -EMEDIUMTYPE;
1496 goto out;
1499 ret = fprog->len;
1500 if (!data)
1501 goto out;
1503 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1504 ret = -EFAULT;
1506 out:
1507 __put_seccomp_filter(filter);
1508 return ret;
1511 long seccomp_get_metadata(struct task_struct *task,
1512 unsigned long size, void __user *data)
1514 long ret;
1515 struct seccomp_filter *filter;
1516 struct seccomp_metadata kmd = {};
1518 if (!capable(CAP_SYS_ADMIN) ||
1519 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
1520 return -EACCES;
1523 size = min_t(unsigned long, size, sizeof(kmd));
1525 if (size < sizeof(kmd.filter_off))
1526 return -EINVAL;
1528 if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
1529 return -EFAULT;
1531 filter = get_nth_filter(task, kmd.filter_off);
1532 if (IS_ERR(filter))
1533 return PTR_ERR(filter);
1535 if (filter->log)
1536 kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
1538 ret = size;
1539 if (copy_to_user(data, &kmd, size))
1540 ret = -EFAULT;
1542 __put_seccomp_filter(filter);
1543 return ret;
1545 #endif
1547 #ifdef CONFIG_SYSCTL
1549 /* Human readable action names for friendly sysctl interaction */
1550 #define SECCOMP_RET_KILL_PROCESS_NAME "kill_process"
1551 #define SECCOMP_RET_KILL_THREAD_NAME "kill_thread"
1552 #define SECCOMP_RET_TRAP_NAME "trap"
1553 #define SECCOMP_RET_ERRNO_NAME "errno"
1554 #define SECCOMP_RET_USER_NOTIF_NAME "user_notif"
1555 #define SECCOMP_RET_TRACE_NAME "trace"
1556 #define SECCOMP_RET_LOG_NAME "log"
1557 #define SECCOMP_RET_ALLOW_NAME "allow"
1559 static const char seccomp_actions_avail[] =
1560 SECCOMP_RET_KILL_PROCESS_NAME " "
1561 SECCOMP_RET_KILL_THREAD_NAME " "
1562 SECCOMP_RET_TRAP_NAME " "
1563 SECCOMP_RET_ERRNO_NAME " "
1564 SECCOMP_RET_USER_NOTIF_NAME " "
1565 SECCOMP_RET_TRACE_NAME " "
1566 SECCOMP_RET_LOG_NAME " "
1567 SECCOMP_RET_ALLOW_NAME;
1569 struct seccomp_log_name {
1570 u32 log;
1571 const char *name;
1574 static const struct seccomp_log_name seccomp_log_names[] = {
1575 { SECCOMP_LOG_KILL_PROCESS, SECCOMP_RET_KILL_PROCESS_NAME },
1576 { SECCOMP_LOG_KILL_THREAD, SECCOMP_RET_KILL_THREAD_NAME },
1577 { SECCOMP_LOG_TRAP, SECCOMP_RET_TRAP_NAME },
1578 { SECCOMP_LOG_ERRNO, SECCOMP_RET_ERRNO_NAME },
1579 { SECCOMP_LOG_USER_NOTIF, SECCOMP_RET_USER_NOTIF_NAME },
1580 { SECCOMP_LOG_TRACE, SECCOMP_RET_TRACE_NAME },
1581 { SECCOMP_LOG_LOG, SECCOMP_RET_LOG_NAME },
1582 { SECCOMP_LOG_ALLOW, SECCOMP_RET_ALLOW_NAME },
1586 static bool seccomp_names_from_actions_logged(char *names, size_t size,
1587 u32 actions_logged,
1588 const char *sep)
1590 const struct seccomp_log_name *cur;
1591 bool append_sep = false;
1593 for (cur = seccomp_log_names; cur->name && size; cur++) {
1594 ssize_t ret;
1596 if (!(actions_logged & cur->log))
1597 continue;
1599 if (append_sep) {
1600 ret = strscpy(names, sep, size);
1601 if (ret < 0)
1602 return false;
1604 names += ret;
1605 size -= ret;
1606 } else
1607 append_sep = true;
1609 ret = strscpy(names, cur->name, size);
1610 if (ret < 0)
1611 return false;
1613 names += ret;
1614 size -= ret;
1617 return true;
1620 static bool seccomp_action_logged_from_name(u32 *action_logged,
1621 const char *name)
1623 const struct seccomp_log_name *cur;
1625 for (cur = seccomp_log_names; cur->name; cur++) {
1626 if (!strcmp(cur->name, name)) {
1627 *action_logged = cur->log;
1628 return true;
1632 return false;
1635 static bool seccomp_actions_logged_from_names(u32 *actions_logged, char *names)
1637 char *name;
1639 *actions_logged = 0;
1640 while ((name = strsep(&names, " ")) && *name) {
1641 u32 action_logged = 0;
1643 if (!seccomp_action_logged_from_name(&action_logged, name))
1644 return false;
1646 *actions_logged |= action_logged;
1649 return true;
1652 static int read_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1653 size_t *lenp, loff_t *ppos)
1655 char names[sizeof(seccomp_actions_avail)];
1656 struct ctl_table table;
1658 memset(names, 0, sizeof(names));
1660 if (!seccomp_names_from_actions_logged(names, sizeof(names),
1661 seccomp_actions_logged, " "))
1662 return -EINVAL;
1664 table = *ro_table;
1665 table.data = names;
1666 table.maxlen = sizeof(names);
1667 return proc_dostring(&table, 0, buffer, lenp, ppos);
1670 static int write_actions_logged(struct ctl_table *ro_table, void __user *buffer,
1671 size_t *lenp, loff_t *ppos, u32 *actions_logged)
1673 char names[sizeof(seccomp_actions_avail)];
1674 struct ctl_table table;
1675 int ret;
1677 if (!capable(CAP_SYS_ADMIN))
1678 return -EPERM;
1680 memset(names, 0, sizeof(names));
1682 table = *ro_table;
1683 table.data = names;
1684 table.maxlen = sizeof(names);
1685 ret = proc_dostring(&table, 1, buffer, lenp, ppos);
1686 if (ret)
1687 return ret;
1689 if (!seccomp_actions_logged_from_names(actions_logged, table.data))
1690 return -EINVAL;
1692 if (*actions_logged & SECCOMP_LOG_ALLOW)
1693 return -EINVAL;
1695 seccomp_actions_logged = *actions_logged;
1696 return 0;
1699 static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
1700 int ret)
1702 char names[sizeof(seccomp_actions_avail)];
1703 char old_names[sizeof(seccomp_actions_avail)];
1704 const char *new = names;
1705 const char *old = old_names;
1707 if (!audit_enabled)
1708 return;
1710 memset(names, 0, sizeof(names));
1711 memset(old_names, 0, sizeof(old_names));
1713 if (ret)
1714 new = "?";
1715 else if (!actions_logged)
1716 new = "(none)";
1717 else if (!seccomp_names_from_actions_logged(names, sizeof(names),
1718 actions_logged, ","))
1719 new = "?";
1721 if (!old_actions_logged)
1722 old = "(none)";
1723 else if (!seccomp_names_from_actions_logged(old_names,
1724 sizeof(old_names),
1725 old_actions_logged, ","))
1726 old = "?";
1728 return audit_seccomp_actions_logged(new, old, !ret);
1731 static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
1732 void __user *buffer, size_t *lenp,
1733 loff_t *ppos)
1735 int ret;
1737 if (write) {
1738 u32 actions_logged = 0;
1739 u32 old_actions_logged = seccomp_actions_logged;
1741 ret = write_actions_logged(ro_table, buffer, lenp, ppos,
1742 &actions_logged);
1743 audit_actions_logged(actions_logged, old_actions_logged, ret);
1744 } else
1745 ret = read_actions_logged(ro_table, buffer, lenp, ppos);
1747 return ret;
1750 static struct ctl_path seccomp_sysctl_path[] = {
1751 { .procname = "kernel", },
1752 { .procname = "seccomp", },
1756 static struct ctl_table seccomp_sysctl_table[] = {
1758 .procname = "actions_avail",
1759 .data = (void *) &seccomp_actions_avail,
1760 .maxlen = sizeof(seccomp_actions_avail),
1761 .mode = 0444,
1762 .proc_handler = proc_dostring,
1765 .procname = "actions_logged",
1766 .mode = 0644,
1767 .proc_handler = seccomp_actions_logged_handler,
1772 static int __init seccomp_sysctl_init(void)
1774 struct ctl_table_header *hdr;
1776 hdr = register_sysctl_paths(seccomp_sysctl_path, seccomp_sysctl_table);
1777 if (!hdr)
1778 pr_warn("seccomp: sysctl registration failed\n");
1779 else
1780 kmemleak_not_leak(hdr);
1782 return 0;
1785 device_initcall(seccomp_sysctl_init)
1787 #endif /* CONFIG_SYSCTL */