KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry
[linux/fpc-iii.git] / kernel / seccomp.c
blob0db7c8a2afe2fb531fe390d78ff9bb435c992077
1 /*
2 * linux/kernel/seccomp.c
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
6 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
9 * This defines a simple but solid secure-computing facility.
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
16 #include <linux/atomic.h>
17 #include <linux/audit.h>
18 #include <linux/compat.h>
19 #include <linux/sched.h>
20 #include <linux/seccomp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
24 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
25 #include <asm/syscall.h>
26 #endif
28 #ifdef CONFIG_SECCOMP_FILTER
29 #include <linux/filter.h>
30 #include <linux/pid.h>
31 #include <linux/ptrace.h>
32 #include <linux/security.h>
33 #include <linux/tracehook.h>
34 #include <linux/uaccess.h>
36 /**
37 * struct seccomp_filter - container for seccomp BPF programs
39 * @usage: reference count to manage the object lifetime.
40 * get/put helpers should be used when accessing an instance
41 * outside of a lifetime-guarded section. In general, this
42 * is only needed for handling filters shared across tasks.
43 * @prev: points to a previously installed, or inherited, filter
44 * @len: the number of instructions in the program
45 * @insnsi: the BPF program instructions to evaluate
47 * seccomp_filter objects are organized in a tree linked via the @prev
48 * pointer. For any task, it appears to be a singly-linked list starting
49 * with current->seccomp.filter, the most recently attached or inherited filter.
50 * However, multiple filters may share a @prev node, by way of fork(), which
51 * results in a unidirectional tree existing in memory. This is similar to
52 * how namespaces work.
54 * seccomp_filter objects should never be modified after being attached
55 * to a task_struct (other than @usage).
57 struct seccomp_filter {
58 atomic_t usage;
59 struct seccomp_filter *prev;
60 struct bpf_prog *prog;
63 /* Limit any path through the tree to 256KB worth of instructions. */
64 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
67 * Endianness is explicitly ignored and left for BPF program authors to manage
68 * as per the specific architecture.
70 static void populate_seccomp_data(struct seccomp_data *sd)
72 struct task_struct *task = current;
73 struct pt_regs *regs = task_pt_regs(task);
74 unsigned long args[6];
76 sd->nr = syscall_get_nr(task, regs);
77 sd->arch = syscall_get_arch();
78 syscall_get_arguments(task, regs, 0, 6, args);
79 sd->args[0] = args[0];
80 sd->args[1] = args[1];
81 sd->args[2] = args[2];
82 sd->args[3] = args[3];
83 sd->args[4] = args[4];
84 sd->args[5] = args[5];
85 sd->instruction_pointer = KSTK_EIP(task);
88 /**
89 * seccomp_check_filter - verify seccomp filter code
90 * @filter: filter to verify
91 * @flen: length of filter
93 * Takes a previously checked filter (by bpf_check_classic) and
94 * redirects all filter code that loads struct sk_buff data
95 * and related data through seccomp_bpf_load. It also
96 * enforces length and alignment checking of those loads.
98 * Returns 0 if the rule set is legal or -EINVAL if not.
100 static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
102 int pc;
103 for (pc = 0; pc < flen; pc++) {
104 struct sock_filter *ftest = &filter[pc];
105 u16 code = ftest->code;
106 u32 k = ftest->k;
108 switch (code) {
109 case BPF_LD | BPF_W | BPF_ABS:
110 ftest->code = BPF_LDX | BPF_W | BPF_ABS;
111 /* 32-bit aligned and not out of bounds. */
112 if (k >= sizeof(struct seccomp_data) || k & 3)
113 return -EINVAL;
114 continue;
115 case BPF_LD | BPF_W | BPF_LEN:
116 ftest->code = BPF_LD | BPF_IMM;
117 ftest->k = sizeof(struct seccomp_data);
118 continue;
119 case BPF_LDX | BPF_W | BPF_LEN:
120 ftest->code = BPF_LDX | BPF_IMM;
121 ftest->k = sizeof(struct seccomp_data);
122 continue;
123 /* Explicitly include allowed calls. */
124 case BPF_RET | BPF_K:
125 case BPF_RET | BPF_A:
126 case BPF_ALU | BPF_ADD | BPF_K:
127 case BPF_ALU | BPF_ADD | BPF_X:
128 case BPF_ALU | BPF_SUB | BPF_K:
129 case BPF_ALU | BPF_SUB | BPF_X:
130 case BPF_ALU | BPF_MUL | BPF_K:
131 case BPF_ALU | BPF_MUL | BPF_X:
132 case BPF_ALU | BPF_DIV | BPF_K:
133 case BPF_ALU | BPF_DIV | BPF_X:
134 case BPF_ALU | BPF_AND | BPF_K:
135 case BPF_ALU | BPF_AND | BPF_X:
136 case BPF_ALU | BPF_OR | BPF_K:
137 case BPF_ALU | BPF_OR | BPF_X:
138 case BPF_ALU | BPF_XOR | BPF_K:
139 case BPF_ALU | BPF_XOR | BPF_X:
140 case BPF_ALU | BPF_LSH | BPF_K:
141 case BPF_ALU | BPF_LSH | BPF_X:
142 case BPF_ALU | BPF_RSH | BPF_K:
143 case BPF_ALU | BPF_RSH | BPF_X:
144 case BPF_ALU | BPF_NEG:
145 case BPF_LD | BPF_IMM:
146 case BPF_LDX | BPF_IMM:
147 case BPF_MISC | BPF_TAX:
148 case BPF_MISC | BPF_TXA:
149 case BPF_LD | BPF_MEM:
150 case BPF_LDX | BPF_MEM:
151 case BPF_ST:
152 case BPF_STX:
153 case BPF_JMP | BPF_JA:
154 case BPF_JMP | BPF_JEQ | BPF_K:
155 case BPF_JMP | BPF_JEQ | BPF_X:
156 case BPF_JMP | BPF_JGE | BPF_K:
157 case BPF_JMP | BPF_JGE | BPF_X:
158 case BPF_JMP | BPF_JGT | BPF_K:
159 case BPF_JMP | BPF_JGT | BPF_X:
160 case BPF_JMP | BPF_JSET | BPF_K:
161 case BPF_JMP | BPF_JSET | BPF_X:
162 continue;
163 default:
164 return -EINVAL;
167 return 0;
171 * seccomp_run_filters - evaluates all seccomp filters against @syscall
172 * @syscall: number of the current system call
174 * Returns valid seccomp BPF response codes.
176 static u32 seccomp_run_filters(const struct seccomp_data *sd)
178 struct seccomp_data sd_local;
179 u32 ret = SECCOMP_RET_ALLOW;
180 /* Make sure cross-thread synced filter points somewhere sane. */
181 struct seccomp_filter *f =
182 lockless_dereference(current->seccomp.filter);
184 /* Ensure unexpected behavior doesn't result in failing open. */
185 if (unlikely(WARN_ON(f == NULL)))
186 return SECCOMP_RET_KILL;
188 if (!sd) {
189 populate_seccomp_data(&sd_local);
190 sd = &sd_local;
194 * All filters in the list are evaluated and the lowest BPF return
195 * value always takes priority (ignoring the DATA).
197 for (; f; f = f->prev) {
198 u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd);
200 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
201 ret = cur_ret;
203 return ret;
205 #endif /* CONFIG_SECCOMP_FILTER */
207 static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
209 assert_spin_locked(&current->sighand->siglock);
211 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
212 return false;
214 return true;
217 static inline void seccomp_assign_mode(struct task_struct *task,
218 unsigned long seccomp_mode)
220 assert_spin_locked(&task->sighand->siglock);
222 task->seccomp.mode = seccomp_mode;
224 * Make sure TIF_SECCOMP cannot be set before the mode (and
225 * filter) is set.
227 smp_mb__before_atomic();
228 set_tsk_thread_flag(task, TIF_SECCOMP);
231 #ifdef CONFIG_SECCOMP_FILTER
232 /* Returns 1 if the parent is an ancestor of the child. */
233 static int is_ancestor(struct seccomp_filter *parent,
234 struct seccomp_filter *child)
236 /* NULL is the root ancestor. */
237 if (parent == NULL)
238 return 1;
239 for (; child; child = child->prev)
240 if (child == parent)
241 return 1;
242 return 0;
246 * seccomp_can_sync_threads: checks if all threads can be synchronized
248 * Expects sighand and cred_guard_mutex locks to be held.
250 * Returns 0 on success, -ve on error, or the pid of a thread which was
251 * either not in the correct seccomp mode or it did not have an ancestral
252 * seccomp filter.
254 static inline pid_t seccomp_can_sync_threads(void)
256 struct task_struct *thread, *caller;
258 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
259 assert_spin_locked(&current->sighand->siglock);
261 /* Validate all threads being eligible for synchronization. */
262 caller = current;
263 for_each_thread(caller, thread) {
264 pid_t failed;
266 /* Skip current, since it is initiating the sync. */
267 if (thread == caller)
268 continue;
270 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED ||
271 (thread->seccomp.mode == SECCOMP_MODE_FILTER &&
272 is_ancestor(thread->seccomp.filter,
273 caller->seccomp.filter)))
274 continue;
276 /* Return the first thread that cannot be synchronized. */
277 failed = task_pid_vnr(thread);
278 /* If the pid cannot be resolved, then return -ESRCH */
279 if (unlikely(WARN_ON(failed == 0)))
280 failed = -ESRCH;
281 return failed;
284 return 0;
288 * seccomp_sync_threads: sets all threads to use current's filter
290 * Expects sighand and cred_guard_mutex locks to be held, and for
291 * seccomp_can_sync_threads() to have returned success already
292 * without dropping the locks.
295 static inline void seccomp_sync_threads(void)
297 struct task_struct *thread, *caller;
299 BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex));
300 assert_spin_locked(&current->sighand->siglock);
302 /* Synchronize all threads. */
303 caller = current;
304 for_each_thread(caller, thread) {
305 /* Skip current, since it needs no changes. */
306 if (thread == caller)
307 continue;
309 /* Get a task reference for the new leaf node. */
310 get_seccomp_filter(caller);
312 * Drop the task reference to the shared ancestor since
313 * current's path will hold a reference. (This also
314 * allows a put before the assignment.)
316 put_seccomp_filter(thread);
317 smp_store_release(&thread->seccomp.filter,
318 caller->seccomp.filter);
321 * Don't let an unprivileged task work around
322 * the no_new_privs restriction by creating
323 * a thread that sets it up, enters seccomp,
324 * then dies.
326 if (task_no_new_privs(caller))
327 task_set_no_new_privs(thread);
330 * Opt the other thread into seccomp if needed.
331 * As threads are considered to be trust-realm
332 * equivalent (see ptrace_may_access), it is safe to
333 * allow one thread to transition the other.
335 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
336 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
341 * seccomp_prepare_filter: Prepares a seccomp filter for use.
342 * @fprog: BPF program to install
344 * Returns filter on success or an ERR_PTR on failure.
346 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
348 struct seccomp_filter *sfilter;
349 int ret;
350 const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
352 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
353 return ERR_PTR(-EINVAL);
355 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
358 * Installing a seccomp filter requires that the task has
359 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
360 * This avoids scenarios where unprivileged tasks can affect the
361 * behavior of privileged children.
363 if (!task_no_new_privs(current) &&
364 security_capable_noaudit(current_cred(), current_user_ns(),
365 CAP_SYS_ADMIN) != 0)
366 return ERR_PTR(-EACCES);
368 /* Allocate a new seccomp_filter */
369 sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
370 if (!sfilter)
371 return ERR_PTR(-ENOMEM);
373 ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
374 seccomp_check_filter, save_orig);
375 if (ret < 0) {
376 kfree(sfilter);
377 return ERR_PTR(ret);
380 atomic_set(&sfilter->usage, 1);
382 return sfilter;
386 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
387 * @user_filter: pointer to the user data containing a sock_fprog.
389 * Returns 0 on success and non-zero otherwise.
391 static struct seccomp_filter *
392 seccomp_prepare_user_filter(const char __user *user_filter)
394 struct sock_fprog fprog;
395 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
397 #ifdef CONFIG_COMPAT
398 if (in_compat_syscall()) {
399 struct compat_sock_fprog fprog32;
400 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
401 goto out;
402 fprog.len = fprog32.len;
403 fprog.filter = compat_ptr(fprog32.filter);
404 } else /* falls through to the if below. */
405 #endif
406 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
407 goto out;
408 filter = seccomp_prepare_filter(&fprog);
409 out:
410 return filter;
414 * seccomp_attach_filter: validate and attach filter
415 * @flags: flags to change filter behavior
416 * @filter: seccomp filter to add to the current process
418 * Caller must be holding current->sighand->siglock lock.
420 * Returns 0 on success, -ve on error.
422 static long seccomp_attach_filter(unsigned int flags,
423 struct seccomp_filter *filter)
425 unsigned long total_insns;
426 struct seccomp_filter *walker;
428 assert_spin_locked(&current->sighand->siglock);
430 /* Validate resulting filter length. */
431 total_insns = filter->prog->len;
432 for (walker = current->seccomp.filter; walker; walker = walker->prev)
433 total_insns += walker->prog->len + 4; /* 4 instr penalty */
434 if (total_insns > MAX_INSNS_PER_PATH)
435 return -ENOMEM;
437 /* If thread sync has been requested, check that it is possible. */
438 if (flags & SECCOMP_FILTER_FLAG_TSYNC) {
439 int ret;
441 ret = seccomp_can_sync_threads();
442 if (ret)
443 return ret;
447 * If there is an existing filter, make it the prev and don't drop its
448 * task reference.
450 filter->prev = current->seccomp.filter;
451 current->seccomp.filter = filter;
453 /* Now that the new filter is in place, synchronize to all threads. */
454 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
455 seccomp_sync_threads();
457 return 0;
460 /* get_seccomp_filter - increments the reference count of the filter on @tsk */
461 void get_seccomp_filter(struct task_struct *tsk)
463 struct seccomp_filter *orig = tsk->seccomp.filter;
464 if (!orig)
465 return;
466 /* Reference count is bounded by the number of total processes. */
467 atomic_inc(&orig->usage);
470 static inline void seccomp_filter_free(struct seccomp_filter *filter)
472 if (filter) {
473 bpf_prog_destroy(filter->prog);
474 kfree(filter);
478 /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
479 void put_seccomp_filter(struct task_struct *tsk)
481 struct seccomp_filter *orig = tsk->seccomp.filter;
482 /* Clean up single-reference branches iteratively. */
483 while (orig && atomic_dec_and_test(&orig->usage)) {
484 struct seccomp_filter *freeme = orig;
485 orig = orig->prev;
486 seccomp_filter_free(freeme);
491 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
492 * @syscall: syscall number to send to userland
493 * @reason: filter-supplied reason code to send to userland (via si_errno)
495 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
497 static void seccomp_send_sigsys(int syscall, int reason)
499 struct siginfo info;
500 memset(&info, 0, sizeof(info));
501 info.si_signo = SIGSYS;
502 info.si_code = SYS_SECCOMP;
503 info.si_call_addr = (void __user *)KSTK_EIP(current);
504 info.si_errno = reason;
505 info.si_arch = syscall_get_arch();
506 info.si_syscall = syscall;
507 force_sig_info(SIGSYS, &info, current);
509 #endif /* CONFIG_SECCOMP_FILTER */
512 * Secure computing mode 1 allows only read/write/exit/sigreturn.
513 * To be fully secure this must be combined with rlimit
514 * to limit the stack allocations too.
516 static const int mode1_syscalls[] = {
517 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
518 0, /* null terminated */
521 static void __secure_computing_strict(int this_syscall)
523 const int *syscall_whitelist = mode1_syscalls;
524 #ifdef CONFIG_COMPAT
525 if (in_compat_syscall())
526 syscall_whitelist = get_compat_mode1_syscalls();
527 #endif
528 do {
529 if (*syscall_whitelist == this_syscall)
530 return;
531 } while (*++syscall_whitelist);
533 #ifdef SECCOMP_DEBUG
534 dump_stack();
535 #endif
536 audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL);
537 do_exit(SIGKILL);
540 #ifndef CONFIG_HAVE_ARCH_SECCOMP_FILTER
541 void secure_computing_strict(int this_syscall)
543 int mode = current->seccomp.mode;
545 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
546 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
547 return;
549 if (mode == SECCOMP_MODE_DISABLED)
550 return;
551 else if (mode == SECCOMP_MODE_STRICT)
552 __secure_computing_strict(this_syscall);
553 else
554 BUG();
556 #else
558 #ifdef CONFIG_SECCOMP_FILTER
559 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
560 const bool recheck_after_trace)
562 u32 filter_ret, action;
563 int data;
566 * Make sure that any changes to mode from another thread have
567 * been seen after TIF_SECCOMP was seen.
569 rmb();
571 filter_ret = seccomp_run_filters(sd);
572 data = filter_ret & SECCOMP_RET_DATA;
573 action = filter_ret & SECCOMP_RET_ACTION;
575 switch (action) {
576 case SECCOMP_RET_ERRNO:
577 /* Set low-order bits as an errno, capped at MAX_ERRNO. */
578 if (data > MAX_ERRNO)
579 data = MAX_ERRNO;
580 syscall_set_return_value(current, task_pt_regs(current),
581 -data, 0);
582 goto skip;
584 case SECCOMP_RET_TRAP:
585 /* Show the handler the original registers. */
586 syscall_rollback(current, task_pt_regs(current));
587 /* Let the filter pass back 16 bits of data. */
588 seccomp_send_sigsys(this_syscall, data);
589 goto skip;
591 case SECCOMP_RET_TRACE:
592 /* We've been put in this state by the ptracer already. */
593 if (recheck_after_trace)
594 return 0;
596 /* ENOSYS these calls if there is no tracer attached. */
597 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
598 syscall_set_return_value(current,
599 task_pt_regs(current),
600 -ENOSYS, 0);
601 goto skip;
604 /* Allow the BPF to provide the event message */
605 ptrace_event(PTRACE_EVENT_SECCOMP, data);
607 * The delivery of a fatal signal during event
608 * notification may silently skip tracer notification,
609 * which could leave us with a potentially unmodified
610 * syscall that the tracer would have liked to have
611 * changed. Since the process is about to die, we just
612 * force the syscall to be skipped and let the signal
613 * kill the process and correctly handle any tracer exit
614 * notifications.
616 if (fatal_signal_pending(current))
617 goto skip;
618 /* Check if the tracer forced the syscall to be skipped. */
619 this_syscall = syscall_get_nr(current, task_pt_regs(current));
620 if (this_syscall < 0)
621 goto skip;
624 * Recheck the syscall, since it may have changed. This
625 * intentionally uses a NULL struct seccomp_data to force
626 * a reload of all registers. This does not goto skip since
627 * a skip would have already been reported.
629 if (__seccomp_filter(this_syscall, NULL, true))
630 return -1;
632 return 0;
634 case SECCOMP_RET_ALLOW:
635 return 0;
637 case SECCOMP_RET_KILL:
638 default:
639 audit_seccomp(this_syscall, SIGSYS, action);
640 do_exit(SIGSYS);
643 unreachable();
645 skip:
646 audit_seccomp(this_syscall, 0, action);
647 return -1;
649 #else
650 static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
651 const bool recheck_after_trace)
653 BUG();
655 #endif
657 int __secure_computing(const struct seccomp_data *sd)
659 int mode = current->seccomp.mode;
660 int this_syscall;
662 if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
663 unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
664 return 0;
666 this_syscall = sd ? sd->nr :
667 syscall_get_nr(current, task_pt_regs(current));
669 switch (mode) {
670 case SECCOMP_MODE_STRICT:
671 __secure_computing_strict(this_syscall); /* may call do_exit */
672 return 0;
673 case SECCOMP_MODE_FILTER:
674 return __seccomp_filter(this_syscall, sd, false);
675 default:
676 BUG();
679 #endif /* CONFIG_HAVE_ARCH_SECCOMP_FILTER */
681 long prctl_get_seccomp(void)
683 return current->seccomp.mode;
687 * seccomp_set_mode_strict: internal function for setting strict seccomp
689 * Once current->seccomp.mode is non-zero, it may not be changed.
691 * Returns 0 on success or -EINVAL on failure.
693 static long seccomp_set_mode_strict(void)
695 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
696 long ret = -EINVAL;
698 spin_lock_irq(&current->sighand->siglock);
700 if (!seccomp_may_assign_mode(seccomp_mode))
701 goto out;
703 #ifdef TIF_NOTSC
704 disable_TSC();
705 #endif
706 seccomp_assign_mode(current, seccomp_mode);
707 ret = 0;
709 out:
710 spin_unlock_irq(&current->sighand->siglock);
712 return ret;
715 #ifdef CONFIG_SECCOMP_FILTER
717 * seccomp_set_mode_filter: internal function for setting seccomp filter
718 * @flags: flags to change filter behavior
719 * @filter: struct sock_fprog containing filter
721 * This function may be called repeatedly to install additional filters.
722 * Every filter successfully installed will be evaluated (in reverse order)
723 * for each system call the task makes.
725 * Once current->seccomp.mode is non-zero, it may not be changed.
727 * Returns 0 on success or -EINVAL on failure.
729 static long seccomp_set_mode_filter(unsigned int flags,
730 const char __user *filter)
732 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
733 struct seccomp_filter *prepared = NULL;
734 long ret = -EINVAL;
736 /* Validate flags. */
737 if (flags & ~SECCOMP_FILTER_FLAG_MASK)
738 return -EINVAL;
740 /* Prepare the new filter before holding any locks. */
741 prepared = seccomp_prepare_user_filter(filter);
742 if (IS_ERR(prepared))
743 return PTR_ERR(prepared);
746 * Make sure we cannot change seccomp or nnp state via TSYNC
747 * while another thread is in the middle of calling exec.
749 if (flags & SECCOMP_FILTER_FLAG_TSYNC &&
750 mutex_lock_killable(&current->signal->cred_guard_mutex))
751 goto out_free;
753 spin_lock_irq(&current->sighand->siglock);
755 if (!seccomp_may_assign_mode(seccomp_mode))
756 goto out;
758 ret = seccomp_attach_filter(flags, prepared);
759 if (ret)
760 goto out;
761 /* Do not free the successfully attached filter. */
762 prepared = NULL;
764 seccomp_assign_mode(current, seccomp_mode);
765 out:
766 spin_unlock_irq(&current->sighand->siglock);
767 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
768 mutex_unlock(&current->signal->cred_guard_mutex);
769 out_free:
770 seccomp_filter_free(prepared);
771 return ret;
773 #else
774 static inline long seccomp_set_mode_filter(unsigned int flags,
775 const char __user *filter)
777 return -EINVAL;
779 #endif
781 /* Common entry point for both prctl and syscall. */
782 static long do_seccomp(unsigned int op, unsigned int flags,
783 const char __user *uargs)
785 switch (op) {
786 case SECCOMP_SET_MODE_STRICT:
787 if (flags != 0 || uargs != NULL)
788 return -EINVAL;
789 return seccomp_set_mode_strict();
790 case SECCOMP_SET_MODE_FILTER:
791 return seccomp_set_mode_filter(flags, uargs);
792 default:
793 return -EINVAL;
797 SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
798 const char __user *, uargs)
800 return do_seccomp(op, flags, uargs);
804 * prctl_set_seccomp: configures current->seccomp.mode
805 * @seccomp_mode: requested mode to use
806 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
808 * Returns 0 on success or -EINVAL on failure.
810 long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
812 unsigned int op;
813 char __user *uargs;
815 switch (seccomp_mode) {
816 case SECCOMP_MODE_STRICT:
817 op = SECCOMP_SET_MODE_STRICT;
819 * Setting strict mode through prctl always ignored filter,
820 * so make sure it is always NULL here to pass the internal
821 * check in do_seccomp().
823 uargs = NULL;
824 break;
825 case SECCOMP_MODE_FILTER:
826 op = SECCOMP_SET_MODE_FILTER;
827 uargs = filter;
828 break;
829 default:
830 return -EINVAL;
833 /* prctl interface doesn't have flags, so they are always zero. */
834 return do_seccomp(op, 0, uargs);
837 #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
838 long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
839 void __user *data)
841 struct seccomp_filter *filter;
842 struct sock_fprog_kern *fprog;
843 long ret;
844 unsigned long count = 0;
846 if (!capable(CAP_SYS_ADMIN) ||
847 current->seccomp.mode != SECCOMP_MODE_DISABLED) {
848 return -EACCES;
851 spin_lock_irq(&task->sighand->siglock);
852 if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
853 ret = -EINVAL;
854 goto out;
857 filter = task->seccomp.filter;
858 while (filter) {
859 filter = filter->prev;
860 count++;
863 if (filter_off >= count) {
864 ret = -ENOENT;
865 goto out;
867 count -= filter_off;
869 filter = task->seccomp.filter;
870 while (filter && count > 1) {
871 filter = filter->prev;
872 count--;
875 if (WARN_ON(count != 1 || !filter)) {
876 /* The filter tree shouldn't shrink while we're using it. */
877 ret = -ENOENT;
878 goto out;
881 fprog = filter->prog->orig_prog;
882 if (!fprog) {
883 /* This must be a new non-cBPF filter, since we save
884 * every cBPF filter's orig_prog above when
885 * CONFIG_CHECKPOINT_RESTORE is enabled.
887 ret = -EMEDIUMTYPE;
888 goto out;
891 ret = fprog->len;
892 if (!data)
893 goto out;
895 get_seccomp_filter(task);
896 spin_unlock_irq(&task->sighand->siglock);
898 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
899 ret = -EFAULT;
901 put_seccomp_filter(task);
902 return ret;
904 out:
905 spin_unlock_irq(&task->sighand->siglock);
906 return ret;
908 #endif