Merge tag 'for-linus-4.1-1' of git://git.code.sf.net/p/openipmi/linux-ipmi
[linux/fpc-iii.git] / kernel / signal.c
blobd51c5ddd855c84b9b65d4a7ef22eedcdff2eeafa
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
49 * SLAB caches for signal bits.
52 static struct kmem_cache *sigqueue_cachep;
54 int print_fatal_signals __read_mostly;
56 static void __user *sig_handler(struct task_struct *t, int sig)
58 return t->sighand->action[sig - 1].sa.sa_handler;
61 static int sig_handler_ignored(void __user *handler, int sig)
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
70 void __user *handler;
72 handler = sig_handler(t, sig);
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
76 return 1;
78 return sig_handler_ignored(handler, sig);
81 static int sig_ignored(struct task_struct *t, int sig, bool force)
84 * Blocked signals are never ignored, since the
85 * signal handler may change by the time it is
86 * unblocked.
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 return 0;
91 if (!sig_task_ignored(t, sig, force))
92 return 0;
95 * Tracers may want to know about even ignored signals.
97 return !t->ptrace;
101 * Re-calculate pending state from the set of locally pending
102 * signals, globally pending signals, and blocked signals.
104 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
106 unsigned long ready;
107 long i;
109 switch (_NSIG_WORDS) {
110 default:
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
113 break;
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
119 break;
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
127 return ready != 0;
130 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
132 static int recalc_sigpending_tsk(struct task_struct *t)
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
138 return 1;
141 * We must never clear the flag in another thread, or in current
142 * when it's possible the current syscall is returning -ERESTART*.
143 * So we don't clear it here, and only callers who know they should do.
145 return 0;
149 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
150 * This is superfluous when called on current, the wakeup is a harmless no-op.
152 void recalc_sigpending_and_wake(struct task_struct *t)
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
158 void recalc_sigpending(void)
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
165 /* Given the mask, find the first available signal that should be serviced. */
167 #define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
171 int next_signal(struct sigpending *pending, sigset_t *mask)
173 unsigned long i, *s, *m, x;
174 int sig = 0;
176 s = pending->signal.sig;
177 m = mask->sig;
180 * Handle the first word specially: it contains the
181 * synchronous signals that need to be dequeued first.
183 x = *s &~ *m;
184 if (x) {
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
187 sig = ffz(~x) + 1;
188 return sig;
191 switch (_NSIG_WORDS) {
192 default:
193 for (i = 1; i < _NSIG_WORDS; ++i) {
194 x = *++s &~ *++m;
195 if (!x)
196 continue;
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
198 break;
200 break;
202 case 2:
203 x = s[1] &~ m[1];
204 if (!x)
205 break;
206 sig = ffz(~x) + _NSIG_BPW + 1;
207 break;
209 case 1:
210 /* Nothing to do */
211 break;
214 return sig;
217 static inline void print_dropped_signal(int sig)
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
221 if (!print_fatal_signals)
222 return;
224 if (!__ratelimit(&ratelimit_state))
225 return;
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
232 * task_set_jobctl_pending - set jobctl pending bits
233 * @task: target task
234 * @mask: pending bits to set
236 * Clear @mask from @task->jobctl. @mask must be subset of
237 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
238 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
239 * cleared. If @task is already being killed or exiting, this function
240 * becomes noop.
242 * CONTEXT:
243 * Must be called with @task->sighand->siglock held.
245 * RETURNS:
246 * %true if @mask is set, %false if made noop because @task was dying.
248 bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 return false;
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
260 task->jobctl |= mask;
261 return true;
265 * task_clear_jobctl_trapping - clear jobctl trapping bit
266 * @task: target task
268 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
269 * Clear it and wake up the ptracer. Note that we don't need any further
270 * locking. @task->siglock guarantees that @task->parent points to the
271 * ptracer.
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
276 void task_clear_jobctl_trapping(struct task_struct *task)
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb(); /* advised by wake_up_bit() */
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
286 * task_clear_jobctl_pending - clear jobctl pending bits
287 * @task: target task
288 * @mask: pending bits to clear
290 * Clear @mask from @task->jobctl. @mask must be subset of
291 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
292 * STOP bits are cleared together.
294 * If clearing of @mask leaves no stop or trap pending, this function calls
295 * task_clear_jobctl_trapping().
297 * CONTEXT:
298 * Must be called with @task->sighand->siglock held.
300 void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
307 task->jobctl &= ~mask;
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
314 * task_participate_group_stop - participate in a group stop
315 * @task: task participating in a group stop
317 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
318 * Group stop states are cleared and the group stop count is consumed if
319 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
320 * stop, the appropriate %SIGNAL_* flags are set.
322 * CONTEXT:
323 * Must be called with @task->sighand->siglock held.
325 * RETURNS:
326 * %true if group stop completion should be notified to the parent, %false
327 * otherwise.
329 static bool task_participate_group_stop(struct task_struct *task)
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
338 if (!consume)
339 return false;
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
345 * Tell the caller to notify completion iff we are entering into a
346 * fresh group stop. Read comment in do_signal_stop() for details.
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
350 return true;
352 return false;
356 * allocate a new signal queue record
357 * - this may be called without locks if and only if t == current, otherwise an
358 * appropriate lock must be held to stop the target task from exiting
360 static struct sigqueue *
361 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
367 * Protect access to @t credentials. This can go away when all
368 * callers hold rcu read lock.
370 rcu_read_lock();
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
373 rcu_read_unlock();
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
379 } else {
380 print_dropped_signal(sig);
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
385 free_uid(user);
386 } else {
387 INIT_LIST_HEAD(&q->list);
388 q->flags = 0;
389 q->user = user;
392 return q;
395 static void __sigqueue_free(struct sigqueue *q)
397 if (q->flags & SIGQUEUE_PREALLOC)
398 return;
399 atomic_dec(&q->user->sigpending);
400 free_uid(q->user);
401 kmem_cache_free(sigqueue_cachep, q);
404 void flush_sigqueue(struct sigpending *queue)
406 struct sigqueue *q;
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
412 __sigqueue_free(q);
417 * Flush all pending signals for a task.
419 void __flush_signals(struct task_struct *t)
421 clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 flush_sigqueue(&t->pending);
423 flush_sigqueue(&t->signal->shared_pending);
426 void flush_signals(struct task_struct *t)
428 unsigned long flags;
430 spin_lock_irqsave(&t->sighand->siglock, flags);
431 __flush_signals(t);
432 spin_unlock_irqrestore(&t->sighand->siglock, flags);
435 static void __flush_itimer_signals(struct sigpending *pending)
437 sigset_t signal, retain;
438 struct sigqueue *q, *n;
440 signal = pending->signal;
441 sigemptyset(&retain);
443 list_for_each_entry_safe(q, n, &pending->list, list) {
444 int sig = q->info.si_signo;
446 if (likely(q->info.si_code != SI_TIMER)) {
447 sigaddset(&retain, sig);
448 } else {
449 sigdelset(&signal, sig);
450 list_del_init(&q->list);
451 __sigqueue_free(q);
455 sigorsets(&pending->signal, &signal, &retain);
458 void flush_itimer_signals(void)
460 struct task_struct *tsk = current;
461 unsigned long flags;
463 spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 __flush_itimer_signals(&tsk->pending);
465 __flush_itimer_signals(&tsk->signal->shared_pending);
466 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
469 void ignore_signals(struct task_struct *t)
471 int i;
473 for (i = 0; i < _NSIG; ++i)
474 t->sighand->action[i].sa.sa_handler = SIG_IGN;
476 flush_signals(t);
480 * Flush all handlers for a task.
483 void
484 flush_signal_handlers(struct task_struct *t, int force_default)
486 int i;
487 struct k_sigaction *ka = &t->sighand->action[0];
488 for (i = _NSIG ; i != 0 ; i--) {
489 if (force_default || ka->sa.sa_handler != SIG_IGN)
490 ka->sa.sa_handler = SIG_DFL;
491 ka->sa.sa_flags = 0;
492 #ifdef __ARCH_HAS_SA_RESTORER
493 ka->sa.sa_restorer = NULL;
494 #endif
495 sigemptyset(&ka->sa.sa_mask);
496 ka++;
500 int unhandled_signal(struct task_struct *tsk, int sig)
502 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 if (is_global_init(tsk))
504 return 1;
505 if (handler != SIG_IGN && handler != SIG_DFL)
506 return 0;
507 /* if ptraced, let the tracer determine */
508 return !tsk->ptrace;
512 * Notify the system that a driver wants to block all signals for this
513 * process, and wants to be notified if any signals at all were to be
514 * sent/acted upon. If the notifier routine returns non-zero, then the
515 * signal will be acted upon after all. If the notifier routine returns 0,
516 * then then signal will be blocked. Only one block per process is
517 * allowed. priv is a pointer to private data that the notifier routine
518 * can use to determine if the signal should be blocked or not.
520 void
521 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
523 unsigned long flags;
525 spin_lock_irqsave(&current->sighand->siglock, flags);
526 current->notifier_mask = mask;
527 current->notifier_data = priv;
528 current->notifier = notifier;
529 spin_unlock_irqrestore(&current->sighand->siglock, flags);
532 /* Notify the system that blocking has ended. */
534 void
535 unblock_all_signals(void)
537 unsigned long flags;
539 spin_lock_irqsave(&current->sighand->siglock, flags);
540 current->notifier = NULL;
541 current->notifier_data = NULL;
542 recalc_sigpending();
543 spin_unlock_irqrestore(&current->sighand->siglock, flags);
546 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
548 struct sigqueue *q, *first = NULL;
551 * Collect the siginfo appropriate to this signal. Check if
552 * there is another siginfo for the same signal.
554 list_for_each_entry(q, &list->list, list) {
555 if (q->info.si_signo == sig) {
556 if (first)
557 goto still_pending;
558 first = q;
562 sigdelset(&list->signal, sig);
564 if (first) {
565 still_pending:
566 list_del_init(&first->list);
567 copy_siginfo(info, &first->info);
568 __sigqueue_free(first);
569 } else {
571 * Ok, it wasn't in the queue. This must be
572 * a fast-pathed signal or we must have been
573 * out of queue space. So zero out the info.
575 info->si_signo = sig;
576 info->si_errno = 0;
577 info->si_code = SI_USER;
578 info->si_pid = 0;
579 info->si_uid = 0;
583 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
584 siginfo_t *info)
586 int sig = next_signal(pending, mask);
588 if (sig) {
589 if (current->notifier) {
590 if (sigismember(current->notifier_mask, sig)) {
591 if (!(current->notifier)(current->notifier_data)) {
592 clear_thread_flag(TIF_SIGPENDING);
593 return 0;
598 collect_signal(sig, pending, info);
601 return sig;
605 * Dequeue a signal and return the element to the caller, which is
606 * expected to free it.
608 * All callers have to hold the siglock.
610 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
612 int signr;
614 /* We only dequeue private signals from ourselves, we don't let
615 * signalfd steal them
617 signr = __dequeue_signal(&tsk->pending, mask, info);
618 if (!signr) {
619 signr = __dequeue_signal(&tsk->signal->shared_pending,
620 mask, info);
622 * itimer signal ?
624 * itimers are process shared and we restart periodic
625 * itimers in the signal delivery path to prevent DoS
626 * attacks in the high resolution timer case. This is
627 * compliant with the old way of self-restarting
628 * itimers, as the SIGALRM is a legacy signal and only
629 * queued once. Changing the restart behaviour to
630 * restart the timer in the signal dequeue path is
631 * reducing the timer noise on heavy loaded !highres
632 * systems too.
634 if (unlikely(signr == SIGALRM)) {
635 struct hrtimer *tmr = &tsk->signal->real_timer;
637 if (!hrtimer_is_queued(tmr) &&
638 tsk->signal->it_real_incr.tv64 != 0) {
639 hrtimer_forward(tmr, tmr->base->get_time(),
640 tsk->signal->it_real_incr);
641 hrtimer_restart(tmr);
646 recalc_sigpending();
647 if (!signr)
648 return 0;
650 if (unlikely(sig_kernel_stop(signr))) {
652 * Set a marker that we have dequeued a stop signal. Our
653 * caller might release the siglock and then the pending
654 * stop signal it is about to process is no longer in the
655 * pending bitmasks, but must still be cleared by a SIGCONT
656 * (and overruled by a SIGKILL). So those cases clear this
657 * shared flag after we've set it. Note that this flag may
658 * remain set after the signal we return is ignored or
659 * handled. That doesn't matter because its only purpose
660 * is to alert stop-signal processing code when another
661 * processor has come along and cleared the flag.
663 current->jobctl |= JOBCTL_STOP_DEQUEUED;
665 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
667 * Release the siglock to ensure proper locking order
668 * of timer locks outside of siglocks. Note, we leave
669 * irqs disabled here, since the posix-timers code is
670 * about to disable them again anyway.
672 spin_unlock(&tsk->sighand->siglock);
673 do_schedule_next_timer(info);
674 spin_lock(&tsk->sighand->siglock);
676 return signr;
680 * Tell a process that it has a new active signal..
682 * NOTE! we rely on the previous spin_lock to
683 * lock interrupts for us! We can only be called with
684 * "siglock" held, and the local interrupt must
685 * have been disabled when that got acquired!
687 * No need to set need_resched since signal event passing
688 * goes through ->blocked
690 void signal_wake_up_state(struct task_struct *t, unsigned int state)
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
695 * case. We don't check t->state here because there is a race with it
696 * executing another processor and just now entering stopped state.
697 * By using wake_up_state, we ensure the process will wake up and
698 * handle its death signal.
700 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 kick_process(t);
705 * Remove signals in mask from the pending set and queue.
706 * Returns 1 if any signals were found.
708 * All callers must be holding the siglock.
710 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
712 struct sigqueue *q, *n;
713 sigset_t m;
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
717 return 0;
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
723 __sigqueue_free(q);
726 return 1;
729 static inline int is_si_special(const struct siginfo *info)
731 return info <= SEND_SIG_FORCED;
734 static inline bool si_fromuser(const struct siginfo *info)
736 return info == SEND_SIG_NOINFO ||
737 (!is_si_special(info) && SI_FROMUSER(info));
741 * called with RCU read lock from check_kill_permission()
743 static int kill_ok_by_cred(struct task_struct *t)
745 const struct cred *cred = current_cred();
746 const struct cred *tcred = __task_cred(t);
748 if (uid_eq(cred->euid, tcred->suid) ||
749 uid_eq(cred->euid, tcred->uid) ||
750 uid_eq(cred->uid, tcred->suid) ||
751 uid_eq(cred->uid, tcred->uid))
752 return 1;
754 if (ns_capable(tcred->user_ns, CAP_KILL))
755 return 1;
757 return 0;
761 * Bad permissions for sending the signal
762 * - the caller must hold the RCU read lock
764 static int check_kill_permission(int sig, struct siginfo *info,
765 struct task_struct *t)
767 struct pid *sid;
768 int error;
770 if (!valid_signal(sig))
771 return -EINVAL;
773 if (!si_fromuser(info))
774 return 0;
776 error = audit_signal_info(sig, t); /* Let audit system see the signal */
777 if (error)
778 return error;
780 if (!same_thread_group(current, t) &&
781 !kill_ok_by_cred(t)) {
782 switch (sig) {
783 case SIGCONT:
784 sid = task_session(t);
786 * We don't return the error if sid == NULL. The
787 * task was unhashed, the caller must notice this.
789 if (!sid || sid == task_session(current))
790 break;
791 default:
792 return -EPERM;
796 return security_task_kill(t, info, sig, 0);
800 * ptrace_trap_notify - schedule trap to notify ptracer
801 * @t: tracee wanting to notify tracer
803 * This function schedules sticky ptrace trap which is cleared on the next
804 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
805 * ptracer.
807 * If @t is running, STOP trap will be taken. If trapped for STOP and
808 * ptracer is listening for events, tracee is woken up so that it can
809 * re-trap for the new event. If trapped otherwise, STOP trap will be
810 * eventually taken without returning to userland after the existing traps
811 * are finished by PTRACE_CONT.
813 * CONTEXT:
814 * Must be called with @task->sighand->siglock held.
816 static void ptrace_trap_notify(struct task_struct *t)
818 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
819 assert_spin_locked(&t->sighand->siglock);
821 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
822 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
826 * Handle magic process-wide effects of stop/continue signals. Unlike
827 * the signal actions, these happen immediately at signal-generation
828 * time regardless of blocking, ignoring, or handling. This does the
829 * actual continuing for SIGCONT, but not the actual stopping for stop
830 * signals. The process stop is done as a signal action for SIG_DFL.
832 * Returns true if the signal should be actually delivered, otherwise
833 * it should be dropped.
835 static bool prepare_signal(int sig, struct task_struct *p, bool force)
837 struct signal_struct *signal = p->signal;
838 struct task_struct *t;
839 sigset_t flush;
841 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
842 if (signal->flags & SIGNAL_GROUP_COREDUMP)
843 return sig == SIGKILL;
845 * The process is in the middle of dying, nothing to do.
847 } else if (sig_kernel_stop(sig)) {
849 * This is a stop signal. Remove SIGCONT from all queues.
851 siginitset(&flush, sigmask(SIGCONT));
852 flush_sigqueue_mask(&flush, &signal->shared_pending);
853 for_each_thread(p, t)
854 flush_sigqueue_mask(&flush, &t->pending);
855 } else if (sig == SIGCONT) {
856 unsigned int why;
858 * Remove all stop signals from all queues, wake all threads.
860 siginitset(&flush, SIG_KERNEL_STOP_MASK);
861 flush_sigqueue_mask(&flush, &signal->shared_pending);
862 for_each_thread(p, t) {
863 flush_sigqueue_mask(&flush, &t->pending);
864 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
865 if (likely(!(t->ptrace & PT_SEIZED)))
866 wake_up_state(t, __TASK_STOPPED);
867 else
868 ptrace_trap_notify(t);
872 * Notify the parent with CLD_CONTINUED if we were stopped.
874 * If we were in the middle of a group stop, we pretend it
875 * was already finished, and then continued. Since SIGCHLD
876 * doesn't queue we report only CLD_STOPPED, as if the next
877 * CLD_CONTINUED was dropped.
879 why = 0;
880 if (signal->flags & SIGNAL_STOP_STOPPED)
881 why |= SIGNAL_CLD_CONTINUED;
882 else if (signal->group_stop_count)
883 why |= SIGNAL_CLD_STOPPED;
885 if (why) {
887 * The first thread which returns from do_signal_stop()
888 * will take ->siglock, notice SIGNAL_CLD_MASK, and
889 * notify its parent. See get_signal_to_deliver().
891 signal->flags = why | SIGNAL_STOP_CONTINUED;
892 signal->group_stop_count = 0;
893 signal->group_exit_code = 0;
897 return !sig_ignored(p, sig, force);
901 * Test if P wants to take SIG. After we've checked all threads with this,
902 * it's equivalent to finding no threads not blocking SIG. Any threads not
903 * blocking SIG were ruled out because they are not running and already
904 * have pending signals. Such threads will dequeue from the shared queue
905 * as soon as they're available, so putting the signal on the shared queue
906 * will be equivalent to sending it to one such thread.
908 static inline int wants_signal(int sig, struct task_struct *p)
910 if (sigismember(&p->blocked, sig))
911 return 0;
912 if (p->flags & PF_EXITING)
913 return 0;
914 if (sig == SIGKILL)
915 return 1;
916 if (task_is_stopped_or_traced(p))
917 return 0;
918 return task_curr(p) || !signal_pending(p);
921 static void complete_signal(int sig, struct task_struct *p, int group)
923 struct signal_struct *signal = p->signal;
924 struct task_struct *t;
927 * Now find a thread we can wake up to take the signal off the queue.
929 * If the main thread wants the signal, it gets first crack.
930 * Probably the least surprising to the average bear.
932 if (wants_signal(sig, p))
933 t = p;
934 else if (!group || thread_group_empty(p))
936 * There is just one thread and it does not need to be woken.
937 * It will dequeue unblocked signals before it runs again.
939 return;
940 else {
942 * Otherwise try to find a suitable thread.
944 t = signal->curr_target;
945 while (!wants_signal(sig, t)) {
946 t = next_thread(t);
947 if (t == signal->curr_target)
949 * No thread needs to be woken.
950 * Any eligible threads will see
951 * the signal in the queue soon.
953 return;
955 signal->curr_target = t;
959 * Found a killable thread. If the signal will be fatal,
960 * then start taking the whole group down immediately.
962 if (sig_fatal(p, sig) &&
963 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
964 !sigismember(&t->real_blocked, sig) &&
965 (sig == SIGKILL || !t->ptrace)) {
967 * This signal will be fatal to the whole group.
969 if (!sig_kernel_coredump(sig)) {
971 * Start a group exit and wake everybody up.
972 * This way we don't have other threads
973 * running and doing things after a slower
974 * thread has the fatal signal pending.
976 signal->flags = SIGNAL_GROUP_EXIT;
977 signal->group_exit_code = sig;
978 signal->group_stop_count = 0;
979 t = p;
980 do {
981 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
982 sigaddset(&t->pending.signal, SIGKILL);
983 signal_wake_up(t, 1);
984 } while_each_thread(p, t);
985 return;
990 * The signal is already in the shared-pending queue.
991 * Tell the chosen thread to wake up and dequeue it.
993 signal_wake_up(t, sig == SIGKILL);
994 return;
997 static inline int legacy_queue(struct sigpending *signals, int sig)
999 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1002 #ifdef CONFIG_USER_NS
1003 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1005 if (current_user_ns() == task_cred_xxx(t, user_ns))
1006 return;
1008 if (SI_FROMKERNEL(info))
1009 return;
1011 rcu_read_lock();
1012 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1013 make_kuid(current_user_ns(), info->si_uid));
1014 rcu_read_unlock();
1016 #else
1017 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1019 return;
1021 #endif
1023 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1024 int group, int from_ancestor_ns)
1026 struct sigpending *pending;
1027 struct sigqueue *q;
1028 int override_rlimit;
1029 int ret = 0, result;
1031 assert_spin_locked(&t->sighand->siglock);
1033 result = TRACE_SIGNAL_IGNORED;
1034 if (!prepare_signal(sig, t,
1035 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1036 goto ret;
1038 pending = group ? &t->signal->shared_pending : &t->pending;
1040 * Short-circuit ignored signals and support queuing
1041 * exactly one non-rt signal, so that we can get more
1042 * detailed information about the cause of the signal.
1044 result = TRACE_SIGNAL_ALREADY_PENDING;
1045 if (legacy_queue(pending, sig))
1046 goto ret;
1048 result = TRACE_SIGNAL_DELIVERED;
1050 * fast-pathed signals for kernel-internal things like SIGSTOP
1051 * or SIGKILL.
1053 if (info == SEND_SIG_FORCED)
1054 goto out_set;
1057 * Real-time signals must be queued if sent by sigqueue, or
1058 * some other real-time mechanism. It is implementation
1059 * defined whether kill() does so. We attempt to do so, on
1060 * the principle of least surprise, but since kill is not
1061 * allowed to fail with EAGAIN when low on memory we just
1062 * make sure at least one signal gets delivered and don't
1063 * pass on the info struct.
1065 if (sig < SIGRTMIN)
1066 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1067 else
1068 override_rlimit = 0;
1070 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1071 override_rlimit);
1072 if (q) {
1073 list_add_tail(&q->list, &pending->list);
1074 switch ((unsigned long) info) {
1075 case (unsigned long) SEND_SIG_NOINFO:
1076 q->info.si_signo = sig;
1077 q->info.si_errno = 0;
1078 q->info.si_code = SI_USER;
1079 q->info.si_pid = task_tgid_nr_ns(current,
1080 task_active_pid_ns(t));
1081 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1082 break;
1083 case (unsigned long) SEND_SIG_PRIV:
1084 q->info.si_signo = sig;
1085 q->info.si_errno = 0;
1086 q->info.si_code = SI_KERNEL;
1087 q->info.si_pid = 0;
1088 q->info.si_uid = 0;
1089 break;
1090 default:
1091 copy_siginfo(&q->info, info);
1092 if (from_ancestor_ns)
1093 q->info.si_pid = 0;
1094 break;
1097 userns_fixup_signal_uid(&q->info, t);
1099 } else if (!is_si_special(info)) {
1100 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1102 * Queue overflow, abort. We may abort if the
1103 * signal was rt and sent by user using something
1104 * other than kill().
1106 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1107 ret = -EAGAIN;
1108 goto ret;
1109 } else {
1111 * This is a silent loss of information. We still
1112 * send the signal, but the *info bits are lost.
1114 result = TRACE_SIGNAL_LOSE_INFO;
1118 out_set:
1119 signalfd_notify(t, sig);
1120 sigaddset(&pending->signal, sig);
1121 complete_signal(sig, t, group);
1122 ret:
1123 trace_signal_generate(sig, info, t, group, result);
1124 return ret;
1127 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1128 int group)
1130 int from_ancestor_ns = 0;
1132 #ifdef CONFIG_PID_NS
1133 from_ancestor_ns = si_fromuser(info) &&
1134 !task_pid_nr_ns(current, task_active_pid_ns(t));
1135 #endif
1137 return __send_signal(sig, info, t, group, from_ancestor_ns);
1140 static void print_fatal_signal(int signr)
1142 struct pt_regs *regs = signal_pt_regs();
1143 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1145 #if defined(__i386__) && !defined(__arch_um__)
1146 printk(KERN_INFO "code at %08lx: ", regs->ip);
1148 int i;
1149 for (i = 0; i < 16; i++) {
1150 unsigned char insn;
1152 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1153 break;
1154 printk(KERN_CONT "%02x ", insn);
1157 printk(KERN_CONT "\n");
1158 #endif
1159 preempt_disable();
1160 show_regs(regs);
1161 preempt_enable();
1164 static int __init setup_print_fatal_signals(char *str)
1166 get_option (&str, &print_fatal_signals);
1168 return 1;
1171 __setup("print-fatal-signals=", setup_print_fatal_signals);
1174 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1176 return send_signal(sig, info, p, 1);
1179 static int
1180 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1182 return send_signal(sig, info, t, 0);
1185 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1186 bool group)
1188 unsigned long flags;
1189 int ret = -ESRCH;
1191 if (lock_task_sighand(p, &flags)) {
1192 ret = send_signal(sig, info, p, group);
1193 unlock_task_sighand(p, &flags);
1196 return ret;
1200 * Force a signal that the process can't ignore: if necessary
1201 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1203 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1204 * since we do not want to have a signal handler that was blocked
1205 * be invoked when user space had explicitly blocked it.
1207 * We don't want to have recursive SIGSEGV's etc, for example,
1208 * that is why we also clear SIGNAL_UNKILLABLE.
1211 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1213 unsigned long int flags;
1214 int ret, blocked, ignored;
1215 struct k_sigaction *action;
1217 spin_lock_irqsave(&t->sighand->siglock, flags);
1218 action = &t->sighand->action[sig-1];
1219 ignored = action->sa.sa_handler == SIG_IGN;
1220 blocked = sigismember(&t->blocked, sig);
1221 if (blocked || ignored) {
1222 action->sa.sa_handler = SIG_DFL;
1223 if (blocked) {
1224 sigdelset(&t->blocked, sig);
1225 recalc_sigpending_and_wake(t);
1228 if (action->sa.sa_handler == SIG_DFL)
1229 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1230 ret = specific_send_sig_info(sig, info, t);
1231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1233 return ret;
1237 * Nuke all other threads in the group.
1239 int zap_other_threads(struct task_struct *p)
1241 struct task_struct *t = p;
1242 int count = 0;
1244 p->signal->group_stop_count = 0;
1246 while_each_thread(p, t) {
1247 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1248 count++;
1250 /* Don't bother with already dead threads */
1251 if (t->exit_state)
1252 continue;
1253 sigaddset(&t->pending.signal, SIGKILL);
1254 signal_wake_up(t, 1);
1257 return count;
1260 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1261 unsigned long *flags)
1263 struct sighand_struct *sighand;
1265 for (;;) {
1267 * Disable interrupts early to avoid deadlocks.
1268 * See rcu_read_unlock() comment header for details.
1270 local_irq_save(*flags);
1271 rcu_read_lock();
1272 sighand = rcu_dereference(tsk->sighand);
1273 if (unlikely(sighand == NULL)) {
1274 rcu_read_unlock();
1275 local_irq_restore(*flags);
1276 break;
1279 * This sighand can be already freed and even reused, but
1280 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1281 * initializes ->siglock: this slab can't go away, it has
1282 * the same object type, ->siglock can't be reinitialized.
1284 * We need to ensure that tsk->sighand is still the same
1285 * after we take the lock, we can race with de_thread() or
1286 * __exit_signal(). In the latter case the next iteration
1287 * must see ->sighand == NULL.
1289 spin_lock(&sighand->siglock);
1290 if (likely(sighand == tsk->sighand)) {
1291 rcu_read_unlock();
1292 break;
1294 spin_unlock(&sighand->siglock);
1295 rcu_read_unlock();
1296 local_irq_restore(*flags);
1299 return sighand;
1303 * send signal info to all the members of a group
1305 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1307 int ret;
1309 rcu_read_lock();
1310 ret = check_kill_permission(sig, info, p);
1311 rcu_read_unlock();
1313 if (!ret && sig)
1314 ret = do_send_sig_info(sig, info, p, true);
1316 return ret;
1320 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1321 * control characters do (^C, ^Z etc)
1322 * - the caller must hold at least a readlock on tasklist_lock
1324 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1326 struct task_struct *p = NULL;
1327 int retval, success;
1329 success = 0;
1330 retval = -ESRCH;
1331 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1332 int err = group_send_sig_info(sig, info, p);
1333 success |= !err;
1334 retval = err;
1335 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1336 return success ? 0 : retval;
1339 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1341 int error = -ESRCH;
1342 struct task_struct *p;
1344 for (;;) {
1345 rcu_read_lock();
1346 p = pid_task(pid, PIDTYPE_PID);
1347 if (p)
1348 error = group_send_sig_info(sig, info, p);
1349 rcu_read_unlock();
1350 if (likely(!p || error != -ESRCH))
1351 return error;
1354 * The task was unhashed in between, try again. If it
1355 * is dead, pid_task() will return NULL, if we race with
1356 * de_thread() it will find the new leader.
1361 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1363 int error;
1364 rcu_read_lock();
1365 error = kill_pid_info(sig, info, find_vpid(pid));
1366 rcu_read_unlock();
1367 return error;
1370 static int kill_as_cred_perm(const struct cred *cred,
1371 struct task_struct *target)
1373 const struct cred *pcred = __task_cred(target);
1374 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1375 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1376 return 0;
1377 return 1;
1380 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1381 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1382 const struct cred *cred, u32 secid)
1384 int ret = -EINVAL;
1385 struct task_struct *p;
1386 unsigned long flags;
1388 if (!valid_signal(sig))
1389 return ret;
1391 rcu_read_lock();
1392 p = pid_task(pid, PIDTYPE_PID);
1393 if (!p) {
1394 ret = -ESRCH;
1395 goto out_unlock;
1397 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1398 ret = -EPERM;
1399 goto out_unlock;
1401 ret = security_task_kill(p, info, sig, secid);
1402 if (ret)
1403 goto out_unlock;
1405 if (sig) {
1406 if (lock_task_sighand(p, &flags)) {
1407 ret = __send_signal(sig, info, p, 1, 0);
1408 unlock_task_sighand(p, &flags);
1409 } else
1410 ret = -ESRCH;
1412 out_unlock:
1413 rcu_read_unlock();
1414 return ret;
1416 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1419 * kill_something_info() interprets pid in interesting ways just like kill(2).
1421 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1422 * is probably wrong. Should make it like BSD or SYSV.
1425 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1427 int ret;
1429 if (pid > 0) {
1430 rcu_read_lock();
1431 ret = kill_pid_info(sig, info, find_vpid(pid));
1432 rcu_read_unlock();
1433 return ret;
1436 read_lock(&tasklist_lock);
1437 if (pid != -1) {
1438 ret = __kill_pgrp_info(sig, info,
1439 pid ? find_vpid(-pid) : task_pgrp(current));
1440 } else {
1441 int retval = 0, count = 0;
1442 struct task_struct * p;
1444 for_each_process(p) {
1445 if (task_pid_vnr(p) > 1 &&
1446 !same_thread_group(p, current)) {
1447 int err = group_send_sig_info(sig, info, p);
1448 ++count;
1449 if (err != -EPERM)
1450 retval = err;
1453 ret = count ? retval : -ESRCH;
1455 read_unlock(&tasklist_lock);
1457 return ret;
1461 * These are for backward compatibility with the rest of the kernel source.
1464 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1467 * Make sure legacy kernel users don't send in bad values
1468 * (normal paths check this in check_kill_permission).
1470 if (!valid_signal(sig))
1471 return -EINVAL;
1473 return do_send_sig_info(sig, info, p, false);
1476 #define __si_special(priv) \
1477 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1480 send_sig(int sig, struct task_struct *p, int priv)
1482 return send_sig_info(sig, __si_special(priv), p);
1485 void
1486 force_sig(int sig, struct task_struct *p)
1488 force_sig_info(sig, SEND_SIG_PRIV, p);
1492 * When things go south during signal handling, we
1493 * will force a SIGSEGV. And if the signal that caused
1494 * the problem was already a SIGSEGV, we'll want to
1495 * make sure we don't even try to deliver the signal..
1498 force_sigsegv(int sig, struct task_struct *p)
1500 if (sig == SIGSEGV) {
1501 unsigned long flags;
1502 spin_lock_irqsave(&p->sighand->siglock, flags);
1503 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1504 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1506 force_sig(SIGSEGV, p);
1507 return 0;
1510 int kill_pgrp(struct pid *pid, int sig, int priv)
1512 int ret;
1514 read_lock(&tasklist_lock);
1515 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1516 read_unlock(&tasklist_lock);
1518 return ret;
1520 EXPORT_SYMBOL(kill_pgrp);
1522 int kill_pid(struct pid *pid, int sig, int priv)
1524 return kill_pid_info(sig, __si_special(priv), pid);
1526 EXPORT_SYMBOL(kill_pid);
1529 * These functions support sending signals using preallocated sigqueue
1530 * structures. This is needed "because realtime applications cannot
1531 * afford to lose notifications of asynchronous events, like timer
1532 * expirations or I/O completions". In the case of POSIX Timers
1533 * we allocate the sigqueue structure from the timer_create. If this
1534 * allocation fails we are able to report the failure to the application
1535 * with an EAGAIN error.
1537 struct sigqueue *sigqueue_alloc(void)
1539 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1541 if (q)
1542 q->flags |= SIGQUEUE_PREALLOC;
1544 return q;
1547 void sigqueue_free(struct sigqueue *q)
1549 unsigned long flags;
1550 spinlock_t *lock = &current->sighand->siglock;
1552 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1554 * We must hold ->siglock while testing q->list
1555 * to serialize with collect_signal() or with
1556 * __exit_signal()->flush_sigqueue().
1558 spin_lock_irqsave(lock, flags);
1559 q->flags &= ~SIGQUEUE_PREALLOC;
1561 * If it is queued it will be freed when dequeued,
1562 * like the "regular" sigqueue.
1564 if (!list_empty(&q->list))
1565 q = NULL;
1566 spin_unlock_irqrestore(lock, flags);
1568 if (q)
1569 __sigqueue_free(q);
1572 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1574 int sig = q->info.si_signo;
1575 struct sigpending *pending;
1576 unsigned long flags;
1577 int ret, result;
1579 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1581 ret = -1;
1582 if (!likely(lock_task_sighand(t, &flags)))
1583 goto ret;
1585 ret = 1; /* the signal is ignored */
1586 result = TRACE_SIGNAL_IGNORED;
1587 if (!prepare_signal(sig, t, false))
1588 goto out;
1590 ret = 0;
1591 if (unlikely(!list_empty(&q->list))) {
1593 * If an SI_TIMER entry is already queue just increment
1594 * the overrun count.
1596 BUG_ON(q->info.si_code != SI_TIMER);
1597 q->info.si_overrun++;
1598 result = TRACE_SIGNAL_ALREADY_PENDING;
1599 goto out;
1601 q->info.si_overrun = 0;
1603 signalfd_notify(t, sig);
1604 pending = group ? &t->signal->shared_pending : &t->pending;
1605 list_add_tail(&q->list, &pending->list);
1606 sigaddset(&pending->signal, sig);
1607 complete_signal(sig, t, group);
1608 result = TRACE_SIGNAL_DELIVERED;
1609 out:
1610 trace_signal_generate(sig, &q->info, t, group, result);
1611 unlock_task_sighand(t, &flags);
1612 ret:
1613 return ret;
1617 * Let a parent know about the death of a child.
1618 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1620 * Returns true if our parent ignored us and so we've switched to
1621 * self-reaping.
1623 bool do_notify_parent(struct task_struct *tsk, int sig)
1625 struct siginfo info;
1626 unsigned long flags;
1627 struct sighand_struct *psig;
1628 bool autoreap = false;
1629 cputime_t utime, stime;
1631 BUG_ON(sig == -1);
1633 /* do_notify_parent_cldstop should have been called instead. */
1634 BUG_ON(task_is_stopped_or_traced(tsk));
1636 BUG_ON(!tsk->ptrace &&
1637 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1639 if (sig != SIGCHLD) {
1641 * This is only possible if parent == real_parent.
1642 * Check if it has changed security domain.
1644 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1645 sig = SIGCHLD;
1648 info.si_signo = sig;
1649 info.si_errno = 0;
1651 * We are under tasklist_lock here so our parent is tied to
1652 * us and cannot change.
1654 * task_active_pid_ns will always return the same pid namespace
1655 * until a task passes through release_task.
1657 * write_lock() currently calls preempt_disable() which is the
1658 * same as rcu_read_lock(), but according to Oleg, this is not
1659 * correct to rely on this
1661 rcu_read_lock();
1662 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1663 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1664 task_uid(tsk));
1665 rcu_read_unlock();
1667 task_cputime(tsk, &utime, &stime);
1668 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1669 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1671 info.si_status = tsk->exit_code & 0x7f;
1672 if (tsk->exit_code & 0x80)
1673 info.si_code = CLD_DUMPED;
1674 else if (tsk->exit_code & 0x7f)
1675 info.si_code = CLD_KILLED;
1676 else {
1677 info.si_code = CLD_EXITED;
1678 info.si_status = tsk->exit_code >> 8;
1681 psig = tsk->parent->sighand;
1682 spin_lock_irqsave(&psig->siglock, flags);
1683 if (!tsk->ptrace && sig == SIGCHLD &&
1684 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1685 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1687 * We are exiting and our parent doesn't care. POSIX.1
1688 * defines special semantics for setting SIGCHLD to SIG_IGN
1689 * or setting the SA_NOCLDWAIT flag: we should be reaped
1690 * automatically and not left for our parent's wait4 call.
1691 * Rather than having the parent do it as a magic kind of
1692 * signal handler, we just set this to tell do_exit that we
1693 * can be cleaned up without becoming a zombie. Note that
1694 * we still call __wake_up_parent in this case, because a
1695 * blocked sys_wait4 might now return -ECHILD.
1697 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1698 * is implementation-defined: we do (if you don't want
1699 * it, just use SIG_IGN instead).
1701 autoreap = true;
1702 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1703 sig = 0;
1705 if (valid_signal(sig) && sig)
1706 __group_send_sig_info(sig, &info, tsk->parent);
1707 __wake_up_parent(tsk, tsk->parent);
1708 spin_unlock_irqrestore(&psig->siglock, flags);
1710 return autoreap;
1714 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1715 * @tsk: task reporting the state change
1716 * @for_ptracer: the notification is for ptracer
1717 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1719 * Notify @tsk's parent that the stopped/continued state has changed. If
1720 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1721 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1723 * CONTEXT:
1724 * Must be called with tasklist_lock at least read locked.
1726 static void do_notify_parent_cldstop(struct task_struct *tsk,
1727 bool for_ptracer, int why)
1729 struct siginfo info;
1730 unsigned long flags;
1731 struct task_struct *parent;
1732 struct sighand_struct *sighand;
1733 cputime_t utime, stime;
1735 if (for_ptracer) {
1736 parent = tsk->parent;
1737 } else {
1738 tsk = tsk->group_leader;
1739 parent = tsk->real_parent;
1742 info.si_signo = SIGCHLD;
1743 info.si_errno = 0;
1745 * see comment in do_notify_parent() about the following 4 lines
1747 rcu_read_lock();
1748 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1749 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1750 rcu_read_unlock();
1752 task_cputime(tsk, &utime, &stime);
1753 info.si_utime = cputime_to_clock_t(utime);
1754 info.si_stime = cputime_to_clock_t(stime);
1756 info.si_code = why;
1757 switch (why) {
1758 case CLD_CONTINUED:
1759 info.si_status = SIGCONT;
1760 break;
1761 case CLD_STOPPED:
1762 info.si_status = tsk->signal->group_exit_code & 0x7f;
1763 break;
1764 case CLD_TRAPPED:
1765 info.si_status = tsk->exit_code & 0x7f;
1766 break;
1767 default:
1768 BUG();
1771 sighand = parent->sighand;
1772 spin_lock_irqsave(&sighand->siglock, flags);
1773 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1774 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1775 __group_send_sig_info(SIGCHLD, &info, parent);
1777 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1779 __wake_up_parent(tsk, parent);
1780 spin_unlock_irqrestore(&sighand->siglock, flags);
1783 static inline int may_ptrace_stop(void)
1785 if (!likely(current->ptrace))
1786 return 0;
1788 * Are we in the middle of do_coredump?
1789 * If so and our tracer is also part of the coredump stopping
1790 * is a deadlock situation, and pointless because our tracer
1791 * is dead so don't allow us to stop.
1792 * If SIGKILL was already sent before the caller unlocked
1793 * ->siglock we must see ->core_state != NULL. Otherwise it
1794 * is safe to enter schedule().
1796 * This is almost outdated, a task with the pending SIGKILL can't
1797 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1798 * after SIGKILL was already dequeued.
1800 if (unlikely(current->mm->core_state) &&
1801 unlikely(current->mm == current->parent->mm))
1802 return 0;
1804 return 1;
1808 * Return non-zero if there is a SIGKILL that should be waking us up.
1809 * Called with the siglock held.
1811 static int sigkill_pending(struct task_struct *tsk)
1813 return sigismember(&tsk->pending.signal, SIGKILL) ||
1814 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1818 * This must be called with current->sighand->siglock held.
1820 * This should be the path for all ptrace stops.
1821 * We always set current->last_siginfo while stopped here.
1822 * That makes it a way to test a stopped process for
1823 * being ptrace-stopped vs being job-control-stopped.
1825 * If we actually decide not to stop at all because the tracer
1826 * is gone, we keep current->exit_code unless clear_code.
1828 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1829 __releases(&current->sighand->siglock)
1830 __acquires(&current->sighand->siglock)
1832 bool gstop_done = false;
1834 if (arch_ptrace_stop_needed(exit_code, info)) {
1836 * The arch code has something special to do before a
1837 * ptrace stop. This is allowed to block, e.g. for faults
1838 * on user stack pages. We can't keep the siglock while
1839 * calling arch_ptrace_stop, so we must release it now.
1840 * To preserve proper semantics, we must do this before
1841 * any signal bookkeeping like checking group_stop_count.
1842 * Meanwhile, a SIGKILL could come in before we retake the
1843 * siglock. That must prevent us from sleeping in TASK_TRACED.
1844 * So after regaining the lock, we must check for SIGKILL.
1846 spin_unlock_irq(&current->sighand->siglock);
1847 arch_ptrace_stop(exit_code, info);
1848 spin_lock_irq(&current->sighand->siglock);
1849 if (sigkill_pending(current))
1850 return;
1854 * We're committing to trapping. TRACED should be visible before
1855 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1856 * Also, transition to TRACED and updates to ->jobctl should be
1857 * atomic with respect to siglock and should be done after the arch
1858 * hook as siglock is released and regrabbed across it.
1860 set_current_state(TASK_TRACED);
1862 current->last_siginfo = info;
1863 current->exit_code = exit_code;
1866 * If @why is CLD_STOPPED, we're trapping to participate in a group
1867 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1868 * across siglock relocks since INTERRUPT was scheduled, PENDING
1869 * could be clear now. We act as if SIGCONT is received after
1870 * TASK_TRACED is entered - ignore it.
1872 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1873 gstop_done = task_participate_group_stop(current);
1875 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1876 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1877 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1878 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1880 /* entering a trap, clear TRAPPING */
1881 task_clear_jobctl_trapping(current);
1883 spin_unlock_irq(&current->sighand->siglock);
1884 read_lock(&tasklist_lock);
1885 if (may_ptrace_stop()) {
1887 * Notify parents of the stop.
1889 * While ptraced, there are two parents - the ptracer and
1890 * the real_parent of the group_leader. The ptracer should
1891 * know about every stop while the real parent is only
1892 * interested in the completion of group stop. The states
1893 * for the two don't interact with each other. Notify
1894 * separately unless they're gonna be duplicates.
1896 do_notify_parent_cldstop(current, true, why);
1897 if (gstop_done && ptrace_reparented(current))
1898 do_notify_parent_cldstop(current, false, why);
1901 * Don't want to allow preemption here, because
1902 * sys_ptrace() needs this task to be inactive.
1904 * XXX: implement read_unlock_no_resched().
1906 preempt_disable();
1907 read_unlock(&tasklist_lock);
1908 preempt_enable_no_resched();
1909 freezable_schedule();
1910 } else {
1912 * By the time we got the lock, our tracer went away.
1913 * Don't drop the lock yet, another tracer may come.
1915 * If @gstop_done, the ptracer went away between group stop
1916 * completion and here. During detach, it would have set
1917 * JOBCTL_STOP_PENDING on us and we'll re-enter
1918 * TASK_STOPPED in do_signal_stop() on return, so notifying
1919 * the real parent of the group stop completion is enough.
1921 if (gstop_done)
1922 do_notify_parent_cldstop(current, false, why);
1924 /* tasklist protects us from ptrace_freeze_traced() */
1925 __set_current_state(TASK_RUNNING);
1926 if (clear_code)
1927 current->exit_code = 0;
1928 read_unlock(&tasklist_lock);
1932 * We are back. Now reacquire the siglock before touching
1933 * last_siginfo, so that we are sure to have synchronized with
1934 * any signal-sending on another CPU that wants to examine it.
1936 spin_lock_irq(&current->sighand->siglock);
1937 current->last_siginfo = NULL;
1939 /* LISTENING can be set only during STOP traps, clear it */
1940 current->jobctl &= ~JOBCTL_LISTENING;
1943 * Queued signals ignored us while we were stopped for tracing.
1944 * So check for any that we should take before resuming user mode.
1945 * This sets TIF_SIGPENDING, but never clears it.
1947 recalc_sigpending_tsk(current);
1950 static void ptrace_do_notify(int signr, int exit_code, int why)
1952 siginfo_t info;
1954 memset(&info, 0, sizeof info);
1955 info.si_signo = signr;
1956 info.si_code = exit_code;
1957 info.si_pid = task_pid_vnr(current);
1958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1960 /* Let the debugger run. */
1961 ptrace_stop(exit_code, why, 1, &info);
1964 void ptrace_notify(int exit_code)
1966 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1967 if (unlikely(current->task_works))
1968 task_work_run();
1970 spin_lock_irq(&current->sighand->siglock);
1971 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1972 spin_unlock_irq(&current->sighand->siglock);
1976 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1977 * @signr: signr causing group stop if initiating
1979 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1980 * and participate in it. If already set, participate in the existing
1981 * group stop. If participated in a group stop (and thus slept), %true is
1982 * returned with siglock released.
1984 * If ptraced, this function doesn't handle stop itself. Instead,
1985 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1986 * untouched. The caller must ensure that INTERRUPT trap handling takes
1987 * places afterwards.
1989 * CONTEXT:
1990 * Must be called with @current->sighand->siglock held, which is released
1991 * on %true return.
1993 * RETURNS:
1994 * %false if group stop is already cancelled or ptrace trap is scheduled.
1995 * %true if participated in group stop.
1997 static bool do_signal_stop(int signr)
1998 __releases(&current->sighand->siglock)
2000 struct signal_struct *sig = current->signal;
2002 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2003 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2004 struct task_struct *t;
2006 /* signr will be recorded in task->jobctl for retries */
2007 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2009 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2010 unlikely(signal_group_exit(sig)))
2011 return false;
2013 * There is no group stop already in progress. We must
2014 * initiate one now.
2016 * While ptraced, a task may be resumed while group stop is
2017 * still in effect and then receive a stop signal and
2018 * initiate another group stop. This deviates from the
2019 * usual behavior as two consecutive stop signals can't
2020 * cause two group stops when !ptraced. That is why we
2021 * also check !task_is_stopped(t) below.
2023 * The condition can be distinguished by testing whether
2024 * SIGNAL_STOP_STOPPED is already set. Don't generate
2025 * group_exit_code in such case.
2027 * This is not necessary for SIGNAL_STOP_CONTINUED because
2028 * an intervening stop signal is required to cause two
2029 * continued events regardless of ptrace.
2031 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2032 sig->group_exit_code = signr;
2034 sig->group_stop_count = 0;
2036 if (task_set_jobctl_pending(current, signr | gstop))
2037 sig->group_stop_count++;
2039 t = current;
2040 while_each_thread(current, t) {
2042 * Setting state to TASK_STOPPED for a group
2043 * stop is always done with the siglock held,
2044 * so this check has no races.
2046 if (!task_is_stopped(t) &&
2047 task_set_jobctl_pending(t, signr | gstop)) {
2048 sig->group_stop_count++;
2049 if (likely(!(t->ptrace & PT_SEIZED)))
2050 signal_wake_up(t, 0);
2051 else
2052 ptrace_trap_notify(t);
2057 if (likely(!current->ptrace)) {
2058 int notify = 0;
2061 * If there are no other threads in the group, or if there
2062 * is a group stop in progress and we are the last to stop,
2063 * report to the parent.
2065 if (task_participate_group_stop(current))
2066 notify = CLD_STOPPED;
2068 __set_current_state(TASK_STOPPED);
2069 spin_unlock_irq(&current->sighand->siglock);
2072 * Notify the parent of the group stop completion. Because
2073 * we're not holding either the siglock or tasklist_lock
2074 * here, ptracer may attach inbetween; however, this is for
2075 * group stop and should always be delivered to the real
2076 * parent of the group leader. The new ptracer will get
2077 * its notification when this task transitions into
2078 * TASK_TRACED.
2080 if (notify) {
2081 read_lock(&tasklist_lock);
2082 do_notify_parent_cldstop(current, false, notify);
2083 read_unlock(&tasklist_lock);
2086 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2087 freezable_schedule();
2088 return true;
2089 } else {
2091 * While ptraced, group stop is handled by STOP trap.
2092 * Schedule it and let the caller deal with it.
2094 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2095 return false;
2100 * do_jobctl_trap - take care of ptrace jobctl traps
2102 * When PT_SEIZED, it's used for both group stop and explicit
2103 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2104 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2105 * the stop signal; otherwise, %SIGTRAP.
2107 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2108 * number as exit_code and no siginfo.
2110 * CONTEXT:
2111 * Must be called with @current->sighand->siglock held, which may be
2112 * released and re-acquired before returning with intervening sleep.
2114 static void do_jobctl_trap(void)
2116 struct signal_struct *signal = current->signal;
2117 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2119 if (current->ptrace & PT_SEIZED) {
2120 if (!signal->group_stop_count &&
2121 !(signal->flags & SIGNAL_STOP_STOPPED))
2122 signr = SIGTRAP;
2123 WARN_ON_ONCE(!signr);
2124 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2125 CLD_STOPPED);
2126 } else {
2127 WARN_ON_ONCE(!signr);
2128 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2129 current->exit_code = 0;
2133 static int ptrace_signal(int signr, siginfo_t *info)
2135 ptrace_signal_deliver();
2137 * We do not check sig_kernel_stop(signr) but set this marker
2138 * unconditionally because we do not know whether debugger will
2139 * change signr. This flag has no meaning unless we are going
2140 * to stop after return from ptrace_stop(). In this case it will
2141 * be checked in do_signal_stop(), we should only stop if it was
2142 * not cleared by SIGCONT while we were sleeping. See also the
2143 * comment in dequeue_signal().
2145 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2146 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2148 /* We're back. Did the debugger cancel the sig? */
2149 signr = current->exit_code;
2150 if (signr == 0)
2151 return signr;
2153 current->exit_code = 0;
2156 * Update the siginfo structure if the signal has
2157 * changed. If the debugger wanted something
2158 * specific in the siginfo structure then it should
2159 * have updated *info via PTRACE_SETSIGINFO.
2161 if (signr != info->si_signo) {
2162 info->si_signo = signr;
2163 info->si_errno = 0;
2164 info->si_code = SI_USER;
2165 rcu_read_lock();
2166 info->si_pid = task_pid_vnr(current->parent);
2167 info->si_uid = from_kuid_munged(current_user_ns(),
2168 task_uid(current->parent));
2169 rcu_read_unlock();
2172 /* If the (new) signal is now blocked, requeue it. */
2173 if (sigismember(&current->blocked, signr)) {
2174 specific_send_sig_info(signr, info, current);
2175 signr = 0;
2178 return signr;
2181 int get_signal(struct ksignal *ksig)
2183 struct sighand_struct *sighand = current->sighand;
2184 struct signal_struct *signal = current->signal;
2185 int signr;
2187 if (unlikely(current->task_works))
2188 task_work_run();
2190 if (unlikely(uprobe_deny_signal()))
2191 return 0;
2194 * Do this once, we can't return to user-mode if freezing() == T.
2195 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2196 * thus do not need another check after return.
2198 try_to_freeze();
2200 relock:
2201 spin_lock_irq(&sighand->siglock);
2203 * Every stopped thread goes here after wakeup. Check to see if
2204 * we should notify the parent, prepare_signal(SIGCONT) encodes
2205 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2207 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2208 int why;
2210 if (signal->flags & SIGNAL_CLD_CONTINUED)
2211 why = CLD_CONTINUED;
2212 else
2213 why = CLD_STOPPED;
2215 signal->flags &= ~SIGNAL_CLD_MASK;
2217 spin_unlock_irq(&sighand->siglock);
2220 * Notify the parent that we're continuing. This event is
2221 * always per-process and doesn't make whole lot of sense
2222 * for ptracers, who shouldn't consume the state via
2223 * wait(2) either, but, for backward compatibility, notify
2224 * the ptracer of the group leader too unless it's gonna be
2225 * a duplicate.
2227 read_lock(&tasklist_lock);
2228 do_notify_parent_cldstop(current, false, why);
2230 if (ptrace_reparented(current->group_leader))
2231 do_notify_parent_cldstop(current->group_leader,
2232 true, why);
2233 read_unlock(&tasklist_lock);
2235 goto relock;
2238 for (;;) {
2239 struct k_sigaction *ka;
2241 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2242 do_signal_stop(0))
2243 goto relock;
2245 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2246 do_jobctl_trap();
2247 spin_unlock_irq(&sighand->siglock);
2248 goto relock;
2251 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2253 if (!signr)
2254 break; /* will return 0 */
2256 if (unlikely(current->ptrace) && signr != SIGKILL) {
2257 signr = ptrace_signal(signr, &ksig->info);
2258 if (!signr)
2259 continue;
2262 ka = &sighand->action[signr-1];
2264 /* Trace actually delivered signals. */
2265 trace_signal_deliver(signr, &ksig->info, ka);
2267 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2268 continue;
2269 if (ka->sa.sa_handler != SIG_DFL) {
2270 /* Run the handler. */
2271 ksig->ka = *ka;
2273 if (ka->sa.sa_flags & SA_ONESHOT)
2274 ka->sa.sa_handler = SIG_DFL;
2276 break; /* will return non-zero "signr" value */
2280 * Now we are doing the default action for this signal.
2282 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2283 continue;
2286 * Global init gets no signals it doesn't want.
2287 * Container-init gets no signals it doesn't want from same
2288 * container.
2290 * Note that if global/container-init sees a sig_kernel_only()
2291 * signal here, the signal must have been generated internally
2292 * or must have come from an ancestor namespace. In either
2293 * case, the signal cannot be dropped.
2295 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2296 !sig_kernel_only(signr))
2297 continue;
2299 if (sig_kernel_stop(signr)) {
2301 * The default action is to stop all threads in
2302 * the thread group. The job control signals
2303 * do nothing in an orphaned pgrp, but SIGSTOP
2304 * always works. Note that siglock needs to be
2305 * dropped during the call to is_orphaned_pgrp()
2306 * because of lock ordering with tasklist_lock.
2307 * This allows an intervening SIGCONT to be posted.
2308 * We need to check for that and bail out if necessary.
2310 if (signr != SIGSTOP) {
2311 spin_unlock_irq(&sighand->siglock);
2313 /* signals can be posted during this window */
2315 if (is_current_pgrp_orphaned())
2316 goto relock;
2318 spin_lock_irq(&sighand->siglock);
2321 if (likely(do_signal_stop(ksig->info.si_signo))) {
2322 /* It released the siglock. */
2323 goto relock;
2327 * We didn't actually stop, due to a race
2328 * with SIGCONT or something like that.
2330 continue;
2333 spin_unlock_irq(&sighand->siglock);
2336 * Anything else is fatal, maybe with a core dump.
2338 current->flags |= PF_SIGNALED;
2340 if (sig_kernel_coredump(signr)) {
2341 if (print_fatal_signals)
2342 print_fatal_signal(ksig->info.si_signo);
2343 proc_coredump_connector(current);
2345 * If it was able to dump core, this kills all
2346 * other threads in the group and synchronizes with
2347 * their demise. If we lost the race with another
2348 * thread getting here, it set group_exit_code
2349 * first and our do_group_exit call below will use
2350 * that value and ignore the one we pass it.
2352 do_coredump(&ksig->info);
2356 * Death signals, no core dump.
2358 do_group_exit(ksig->info.si_signo);
2359 /* NOTREACHED */
2361 spin_unlock_irq(&sighand->siglock);
2363 ksig->sig = signr;
2364 return ksig->sig > 0;
2368 * signal_delivered -
2369 * @ksig: kernel signal struct
2370 * @stepping: nonzero if debugger single-step or block-step in use
2372 * This function should be called when a signal has successfully been
2373 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2374 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2375 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2377 static void signal_delivered(struct ksignal *ksig, int stepping)
2379 sigset_t blocked;
2381 /* A signal was successfully delivered, and the
2382 saved sigmask was stored on the signal frame,
2383 and will be restored by sigreturn. So we can
2384 simply clear the restore sigmask flag. */
2385 clear_restore_sigmask();
2387 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2388 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2389 sigaddset(&blocked, ksig->sig);
2390 set_current_blocked(&blocked);
2391 tracehook_signal_handler(stepping);
2394 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2396 if (failed)
2397 force_sigsegv(ksig->sig, current);
2398 else
2399 signal_delivered(ksig, stepping);
2403 * It could be that complete_signal() picked us to notify about the
2404 * group-wide signal. Other threads should be notified now to take
2405 * the shared signals in @which since we will not.
2407 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2409 sigset_t retarget;
2410 struct task_struct *t;
2412 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2413 if (sigisemptyset(&retarget))
2414 return;
2416 t = tsk;
2417 while_each_thread(tsk, t) {
2418 if (t->flags & PF_EXITING)
2419 continue;
2421 if (!has_pending_signals(&retarget, &t->blocked))
2422 continue;
2423 /* Remove the signals this thread can handle. */
2424 sigandsets(&retarget, &retarget, &t->blocked);
2426 if (!signal_pending(t))
2427 signal_wake_up(t, 0);
2429 if (sigisemptyset(&retarget))
2430 break;
2434 void exit_signals(struct task_struct *tsk)
2436 int group_stop = 0;
2437 sigset_t unblocked;
2440 * @tsk is about to have PF_EXITING set - lock out users which
2441 * expect stable threadgroup.
2443 threadgroup_change_begin(tsk);
2445 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2446 tsk->flags |= PF_EXITING;
2447 threadgroup_change_end(tsk);
2448 return;
2451 spin_lock_irq(&tsk->sighand->siglock);
2453 * From now this task is not visible for group-wide signals,
2454 * see wants_signal(), do_signal_stop().
2456 tsk->flags |= PF_EXITING;
2458 threadgroup_change_end(tsk);
2460 if (!signal_pending(tsk))
2461 goto out;
2463 unblocked = tsk->blocked;
2464 signotset(&unblocked);
2465 retarget_shared_pending(tsk, &unblocked);
2467 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2468 task_participate_group_stop(tsk))
2469 group_stop = CLD_STOPPED;
2470 out:
2471 spin_unlock_irq(&tsk->sighand->siglock);
2474 * If group stop has completed, deliver the notification. This
2475 * should always go to the real parent of the group leader.
2477 if (unlikely(group_stop)) {
2478 read_lock(&tasklist_lock);
2479 do_notify_parent_cldstop(tsk, false, group_stop);
2480 read_unlock(&tasklist_lock);
2484 EXPORT_SYMBOL(recalc_sigpending);
2485 EXPORT_SYMBOL_GPL(dequeue_signal);
2486 EXPORT_SYMBOL(flush_signals);
2487 EXPORT_SYMBOL(force_sig);
2488 EXPORT_SYMBOL(send_sig);
2489 EXPORT_SYMBOL(send_sig_info);
2490 EXPORT_SYMBOL(sigprocmask);
2491 EXPORT_SYMBOL(block_all_signals);
2492 EXPORT_SYMBOL(unblock_all_signals);
2496 * System call entry points.
2500 * sys_restart_syscall - restart a system call
2502 SYSCALL_DEFINE0(restart_syscall)
2504 struct restart_block *restart = &current->restart_block;
2505 return restart->fn(restart);
2508 long do_no_restart_syscall(struct restart_block *param)
2510 return -EINTR;
2513 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2515 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2516 sigset_t newblocked;
2517 /* A set of now blocked but previously unblocked signals. */
2518 sigandnsets(&newblocked, newset, &current->blocked);
2519 retarget_shared_pending(tsk, &newblocked);
2521 tsk->blocked = *newset;
2522 recalc_sigpending();
2526 * set_current_blocked - change current->blocked mask
2527 * @newset: new mask
2529 * It is wrong to change ->blocked directly, this helper should be used
2530 * to ensure the process can't miss a shared signal we are going to block.
2532 void set_current_blocked(sigset_t *newset)
2534 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 __set_current_blocked(newset);
2538 void __set_current_blocked(const sigset_t *newset)
2540 struct task_struct *tsk = current;
2542 spin_lock_irq(&tsk->sighand->siglock);
2543 __set_task_blocked(tsk, newset);
2544 spin_unlock_irq(&tsk->sighand->siglock);
2548 * This is also useful for kernel threads that want to temporarily
2549 * (or permanently) block certain signals.
2551 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2552 * interface happily blocks "unblockable" signals like SIGKILL
2553 * and friends.
2555 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2557 struct task_struct *tsk = current;
2558 sigset_t newset;
2560 /* Lockless, only current can change ->blocked, never from irq */
2561 if (oldset)
2562 *oldset = tsk->blocked;
2564 switch (how) {
2565 case SIG_BLOCK:
2566 sigorsets(&newset, &tsk->blocked, set);
2567 break;
2568 case SIG_UNBLOCK:
2569 sigandnsets(&newset, &tsk->blocked, set);
2570 break;
2571 case SIG_SETMASK:
2572 newset = *set;
2573 break;
2574 default:
2575 return -EINVAL;
2578 __set_current_blocked(&newset);
2579 return 0;
2583 * sys_rt_sigprocmask - change the list of currently blocked signals
2584 * @how: whether to add, remove, or set signals
2585 * @nset: stores pending signals
2586 * @oset: previous value of signal mask if non-null
2587 * @sigsetsize: size of sigset_t type
2589 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2590 sigset_t __user *, oset, size_t, sigsetsize)
2592 sigset_t old_set, new_set;
2593 int error;
2595 /* XXX: Don't preclude handling different sized sigset_t's. */
2596 if (sigsetsize != sizeof(sigset_t))
2597 return -EINVAL;
2599 old_set = current->blocked;
2601 if (nset) {
2602 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2603 return -EFAULT;
2604 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2606 error = sigprocmask(how, &new_set, NULL);
2607 if (error)
2608 return error;
2611 if (oset) {
2612 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2613 return -EFAULT;
2616 return 0;
2619 #ifdef CONFIG_COMPAT
2620 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2621 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2623 #ifdef __BIG_ENDIAN
2624 sigset_t old_set = current->blocked;
2626 /* XXX: Don't preclude handling different sized sigset_t's. */
2627 if (sigsetsize != sizeof(sigset_t))
2628 return -EINVAL;
2630 if (nset) {
2631 compat_sigset_t new32;
2632 sigset_t new_set;
2633 int error;
2634 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2635 return -EFAULT;
2637 sigset_from_compat(&new_set, &new32);
2638 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2640 error = sigprocmask(how, &new_set, NULL);
2641 if (error)
2642 return error;
2644 if (oset) {
2645 compat_sigset_t old32;
2646 sigset_to_compat(&old32, &old_set);
2647 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2648 return -EFAULT;
2650 return 0;
2651 #else
2652 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2653 (sigset_t __user *)oset, sigsetsize);
2654 #endif
2656 #endif
2658 static int do_sigpending(void *set, unsigned long sigsetsize)
2660 if (sigsetsize > sizeof(sigset_t))
2661 return -EINVAL;
2663 spin_lock_irq(&current->sighand->siglock);
2664 sigorsets(set, &current->pending.signal,
2665 &current->signal->shared_pending.signal);
2666 spin_unlock_irq(&current->sighand->siglock);
2668 /* Outside the lock because only this thread touches it. */
2669 sigandsets(set, &current->blocked, set);
2670 return 0;
2674 * sys_rt_sigpending - examine a pending signal that has been raised
2675 * while blocked
2676 * @uset: stores pending signals
2677 * @sigsetsize: size of sigset_t type or larger
2679 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2681 sigset_t set;
2682 int err = do_sigpending(&set, sigsetsize);
2683 if (!err && copy_to_user(uset, &set, sigsetsize))
2684 err = -EFAULT;
2685 return err;
2688 #ifdef CONFIG_COMPAT
2689 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2690 compat_size_t, sigsetsize)
2692 #ifdef __BIG_ENDIAN
2693 sigset_t set;
2694 int err = do_sigpending(&set, sigsetsize);
2695 if (!err) {
2696 compat_sigset_t set32;
2697 sigset_to_compat(&set32, &set);
2698 /* we can get here only if sigsetsize <= sizeof(set) */
2699 if (copy_to_user(uset, &set32, sigsetsize))
2700 err = -EFAULT;
2702 return err;
2703 #else
2704 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2705 #endif
2707 #endif
2709 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2711 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2713 int err;
2715 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2716 return -EFAULT;
2717 if (from->si_code < 0)
2718 return __copy_to_user(to, from, sizeof(siginfo_t))
2719 ? -EFAULT : 0;
2721 * If you change siginfo_t structure, please be sure
2722 * this code is fixed accordingly.
2723 * Please remember to update the signalfd_copyinfo() function
2724 * inside fs/signalfd.c too, in case siginfo_t changes.
2725 * It should never copy any pad contained in the structure
2726 * to avoid security leaks, but must copy the generic
2727 * 3 ints plus the relevant union member.
2729 err = __put_user(from->si_signo, &to->si_signo);
2730 err |= __put_user(from->si_errno, &to->si_errno);
2731 err |= __put_user((short)from->si_code, &to->si_code);
2732 switch (from->si_code & __SI_MASK) {
2733 case __SI_KILL:
2734 err |= __put_user(from->si_pid, &to->si_pid);
2735 err |= __put_user(from->si_uid, &to->si_uid);
2736 break;
2737 case __SI_TIMER:
2738 err |= __put_user(from->si_tid, &to->si_tid);
2739 err |= __put_user(from->si_overrun, &to->si_overrun);
2740 err |= __put_user(from->si_ptr, &to->si_ptr);
2741 break;
2742 case __SI_POLL:
2743 err |= __put_user(from->si_band, &to->si_band);
2744 err |= __put_user(from->si_fd, &to->si_fd);
2745 break;
2746 case __SI_FAULT:
2747 err |= __put_user(from->si_addr, &to->si_addr);
2748 #ifdef __ARCH_SI_TRAPNO
2749 err |= __put_user(from->si_trapno, &to->si_trapno);
2750 #endif
2751 #ifdef BUS_MCEERR_AO
2753 * Other callers might not initialize the si_lsb field,
2754 * so check explicitly for the right codes here.
2756 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2757 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2758 #endif
2759 #ifdef SEGV_BNDERR
2760 err |= __put_user(from->si_lower, &to->si_lower);
2761 err |= __put_user(from->si_upper, &to->si_upper);
2762 #endif
2763 break;
2764 case __SI_CHLD:
2765 err |= __put_user(from->si_pid, &to->si_pid);
2766 err |= __put_user(from->si_uid, &to->si_uid);
2767 err |= __put_user(from->si_status, &to->si_status);
2768 err |= __put_user(from->si_utime, &to->si_utime);
2769 err |= __put_user(from->si_stime, &to->si_stime);
2770 break;
2771 case __SI_RT: /* This is not generated by the kernel as of now. */
2772 case __SI_MESGQ: /* But this is */
2773 err |= __put_user(from->si_pid, &to->si_pid);
2774 err |= __put_user(from->si_uid, &to->si_uid);
2775 err |= __put_user(from->si_ptr, &to->si_ptr);
2776 break;
2777 #ifdef __ARCH_SIGSYS
2778 case __SI_SYS:
2779 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2780 err |= __put_user(from->si_syscall, &to->si_syscall);
2781 err |= __put_user(from->si_arch, &to->si_arch);
2782 break;
2783 #endif
2784 default: /* this is just in case for now ... */
2785 err |= __put_user(from->si_pid, &to->si_pid);
2786 err |= __put_user(from->si_uid, &to->si_uid);
2787 break;
2789 return err;
2792 #endif
2795 * do_sigtimedwait - wait for queued signals specified in @which
2796 * @which: queued signals to wait for
2797 * @info: if non-null, the signal's siginfo is returned here
2798 * @ts: upper bound on process time suspension
2800 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2801 const struct timespec *ts)
2803 struct task_struct *tsk = current;
2804 long timeout = MAX_SCHEDULE_TIMEOUT;
2805 sigset_t mask = *which;
2806 int sig;
2808 if (ts) {
2809 if (!timespec_valid(ts))
2810 return -EINVAL;
2811 timeout = timespec_to_jiffies(ts);
2813 * We can be close to the next tick, add another one
2814 * to ensure we will wait at least the time asked for.
2816 if (ts->tv_sec || ts->tv_nsec)
2817 timeout++;
2821 * Invert the set of allowed signals to get those we want to block.
2823 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2824 signotset(&mask);
2826 spin_lock_irq(&tsk->sighand->siglock);
2827 sig = dequeue_signal(tsk, &mask, info);
2828 if (!sig && timeout) {
2830 * None ready, temporarily unblock those we're interested
2831 * while we are sleeping in so that we'll be awakened when
2832 * they arrive. Unblocking is always fine, we can avoid
2833 * set_current_blocked().
2835 tsk->real_blocked = tsk->blocked;
2836 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2837 recalc_sigpending();
2838 spin_unlock_irq(&tsk->sighand->siglock);
2840 timeout = freezable_schedule_timeout_interruptible(timeout);
2842 spin_lock_irq(&tsk->sighand->siglock);
2843 __set_task_blocked(tsk, &tsk->real_blocked);
2844 sigemptyset(&tsk->real_blocked);
2845 sig = dequeue_signal(tsk, &mask, info);
2847 spin_unlock_irq(&tsk->sighand->siglock);
2849 if (sig)
2850 return sig;
2851 return timeout ? -EINTR : -EAGAIN;
2855 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2856 * in @uthese
2857 * @uthese: queued signals to wait for
2858 * @uinfo: if non-null, the signal's siginfo is returned here
2859 * @uts: upper bound on process time suspension
2860 * @sigsetsize: size of sigset_t type
2862 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2863 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2864 size_t, sigsetsize)
2866 sigset_t these;
2867 struct timespec ts;
2868 siginfo_t info;
2869 int ret;
2871 /* XXX: Don't preclude handling different sized sigset_t's. */
2872 if (sigsetsize != sizeof(sigset_t))
2873 return -EINVAL;
2875 if (copy_from_user(&these, uthese, sizeof(these)))
2876 return -EFAULT;
2878 if (uts) {
2879 if (copy_from_user(&ts, uts, sizeof(ts)))
2880 return -EFAULT;
2883 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2885 if (ret > 0 && uinfo) {
2886 if (copy_siginfo_to_user(uinfo, &info))
2887 ret = -EFAULT;
2890 return ret;
2894 * sys_kill - send a signal to a process
2895 * @pid: the PID of the process
2896 * @sig: signal to be sent
2898 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2900 struct siginfo info;
2902 info.si_signo = sig;
2903 info.si_errno = 0;
2904 info.si_code = SI_USER;
2905 info.si_pid = task_tgid_vnr(current);
2906 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2908 return kill_something_info(sig, &info, pid);
2911 static int
2912 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2914 struct task_struct *p;
2915 int error = -ESRCH;
2917 rcu_read_lock();
2918 p = find_task_by_vpid(pid);
2919 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2920 error = check_kill_permission(sig, info, p);
2922 * The null signal is a permissions and process existence
2923 * probe. No signal is actually delivered.
2925 if (!error && sig) {
2926 error = do_send_sig_info(sig, info, p, false);
2928 * If lock_task_sighand() failed we pretend the task
2929 * dies after receiving the signal. The window is tiny,
2930 * and the signal is private anyway.
2932 if (unlikely(error == -ESRCH))
2933 error = 0;
2936 rcu_read_unlock();
2938 return error;
2941 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2943 struct siginfo info = {};
2945 info.si_signo = sig;
2946 info.si_errno = 0;
2947 info.si_code = SI_TKILL;
2948 info.si_pid = task_tgid_vnr(current);
2949 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2951 return do_send_specific(tgid, pid, sig, &info);
2955 * sys_tgkill - send signal to one specific thread
2956 * @tgid: the thread group ID of the thread
2957 * @pid: the PID of the thread
2958 * @sig: signal to be sent
2960 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2961 * exists but it's not belonging to the target process anymore. This
2962 * method solves the problem of threads exiting and PIDs getting reused.
2964 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2966 /* This is only valid for single tasks */
2967 if (pid <= 0 || tgid <= 0)
2968 return -EINVAL;
2970 return do_tkill(tgid, pid, sig);
2974 * sys_tkill - send signal to one specific task
2975 * @pid: the PID of the task
2976 * @sig: signal to be sent
2978 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2980 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2982 /* This is only valid for single tasks */
2983 if (pid <= 0)
2984 return -EINVAL;
2986 return do_tkill(0, pid, sig);
2989 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2991 /* Not even root can pretend to send signals from the kernel.
2992 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2994 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2995 (task_pid_vnr(current) != pid))
2996 return -EPERM;
2998 info->si_signo = sig;
3000 /* POSIX.1b doesn't mention process groups. */
3001 return kill_proc_info(sig, info, pid);
3005 * sys_rt_sigqueueinfo - send signal information to a signal
3006 * @pid: the PID of the thread
3007 * @sig: signal to be sent
3008 * @uinfo: signal info to be sent
3010 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3011 siginfo_t __user *, uinfo)
3013 siginfo_t info;
3014 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3015 return -EFAULT;
3016 return do_rt_sigqueueinfo(pid, sig, &info);
3019 #ifdef CONFIG_COMPAT
3020 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3021 compat_pid_t, pid,
3022 int, sig,
3023 struct compat_siginfo __user *, uinfo)
3025 siginfo_t info;
3026 int ret = copy_siginfo_from_user32(&info, uinfo);
3027 if (unlikely(ret))
3028 return ret;
3029 return do_rt_sigqueueinfo(pid, sig, &info);
3031 #endif
3033 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3035 /* This is only valid for single tasks */
3036 if (pid <= 0 || tgid <= 0)
3037 return -EINVAL;
3039 /* Not even root can pretend to send signals from the kernel.
3040 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3042 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3043 (task_pid_vnr(current) != pid))
3044 return -EPERM;
3046 info->si_signo = sig;
3048 return do_send_specific(tgid, pid, sig, info);
3051 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3052 siginfo_t __user *, uinfo)
3054 siginfo_t info;
3056 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3057 return -EFAULT;
3059 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3062 #ifdef CONFIG_COMPAT
3063 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3064 compat_pid_t, tgid,
3065 compat_pid_t, pid,
3066 int, sig,
3067 struct compat_siginfo __user *, uinfo)
3069 siginfo_t info;
3071 if (copy_siginfo_from_user32(&info, uinfo))
3072 return -EFAULT;
3073 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3075 #endif
3078 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3080 void kernel_sigaction(int sig, __sighandler_t action)
3082 spin_lock_irq(&current->sighand->siglock);
3083 current->sighand->action[sig - 1].sa.sa_handler = action;
3084 if (action == SIG_IGN) {
3085 sigset_t mask;
3087 sigemptyset(&mask);
3088 sigaddset(&mask, sig);
3090 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3091 flush_sigqueue_mask(&mask, &current->pending);
3092 recalc_sigpending();
3094 spin_unlock_irq(&current->sighand->siglock);
3096 EXPORT_SYMBOL(kernel_sigaction);
3098 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3100 struct task_struct *p = current, *t;
3101 struct k_sigaction *k;
3102 sigset_t mask;
3104 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3105 return -EINVAL;
3107 k = &p->sighand->action[sig-1];
3109 spin_lock_irq(&p->sighand->siglock);
3110 if (oact)
3111 *oact = *k;
3113 if (act) {
3114 sigdelsetmask(&act->sa.sa_mask,
3115 sigmask(SIGKILL) | sigmask(SIGSTOP));
3116 *k = *act;
3118 * POSIX 3.3.1.3:
3119 * "Setting a signal action to SIG_IGN for a signal that is
3120 * pending shall cause the pending signal to be discarded,
3121 * whether or not it is blocked."
3123 * "Setting a signal action to SIG_DFL for a signal that is
3124 * pending and whose default action is to ignore the signal
3125 * (for example, SIGCHLD), shall cause the pending signal to
3126 * be discarded, whether or not it is blocked"
3128 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3129 sigemptyset(&mask);
3130 sigaddset(&mask, sig);
3131 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3132 for_each_thread(p, t)
3133 flush_sigqueue_mask(&mask, &t->pending);
3137 spin_unlock_irq(&p->sighand->siglock);
3138 return 0;
3141 static int
3142 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3144 stack_t oss;
3145 int error;
3147 oss.ss_sp = (void __user *) current->sas_ss_sp;
3148 oss.ss_size = current->sas_ss_size;
3149 oss.ss_flags = sas_ss_flags(sp);
3151 if (uss) {
3152 void __user *ss_sp;
3153 size_t ss_size;
3154 int ss_flags;
3156 error = -EFAULT;
3157 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3158 goto out;
3159 error = __get_user(ss_sp, &uss->ss_sp) |
3160 __get_user(ss_flags, &uss->ss_flags) |
3161 __get_user(ss_size, &uss->ss_size);
3162 if (error)
3163 goto out;
3165 error = -EPERM;
3166 if (on_sig_stack(sp))
3167 goto out;
3169 error = -EINVAL;
3171 * Note - this code used to test ss_flags incorrectly:
3172 * old code may have been written using ss_flags==0
3173 * to mean ss_flags==SS_ONSTACK (as this was the only
3174 * way that worked) - this fix preserves that older
3175 * mechanism.
3177 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3178 goto out;
3180 if (ss_flags == SS_DISABLE) {
3181 ss_size = 0;
3182 ss_sp = NULL;
3183 } else {
3184 error = -ENOMEM;
3185 if (ss_size < MINSIGSTKSZ)
3186 goto out;
3189 current->sas_ss_sp = (unsigned long) ss_sp;
3190 current->sas_ss_size = ss_size;
3193 error = 0;
3194 if (uoss) {
3195 error = -EFAULT;
3196 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3197 goto out;
3198 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3199 __put_user(oss.ss_size, &uoss->ss_size) |
3200 __put_user(oss.ss_flags, &uoss->ss_flags);
3203 out:
3204 return error;
3206 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3208 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3211 int restore_altstack(const stack_t __user *uss)
3213 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3214 /* squash all but EFAULT for now */
3215 return err == -EFAULT ? err : 0;
3218 int __save_altstack(stack_t __user *uss, unsigned long sp)
3220 struct task_struct *t = current;
3221 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3222 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3223 __put_user(t->sas_ss_size, &uss->ss_size);
3226 #ifdef CONFIG_COMPAT
3227 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3228 const compat_stack_t __user *, uss_ptr,
3229 compat_stack_t __user *, uoss_ptr)
3231 stack_t uss, uoss;
3232 int ret;
3233 mm_segment_t seg;
3235 if (uss_ptr) {
3236 compat_stack_t uss32;
3238 memset(&uss, 0, sizeof(stack_t));
3239 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3240 return -EFAULT;
3241 uss.ss_sp = compat_ptr(uss32.ss_sp);
3242 uss.ss_flags = uss32.ss_flags;
3243 uss.ss_size = uss32.ss_size;
3245 seg = get_fs();
3246 set_fs(KERNEL_DS);
3247 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3248 (stack_t __force __user *) &uoss,
3249 compat_user_stack_pointer());
3250 set_fs(seg);
3251 if (ret >= 0 && uoss_ptr) {
3252 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3253 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3254 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3255 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3256 ret = -EFAULT;
3258 return ret;
3261 int compat_restore_altstack(const compat_stack_t __user *uss)
3263 int err = compat_sys_sigaltstack(uss, NULL);
3264 /* squash all but -EFAULT for now */
3265 return err == -EFAULT ? err : 0;
3268 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3270 struct task_struct *t = current;
3271 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3272 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3273 __put_user(t->sas_ss_size, &uss->ss_size);
3275 #endif
3277 #ifdef __ARCH_WANT_SYS_SIGPENDING
3280 * sys_sigpending - examine pending signals
3281 * @set: where mask of pending signal is returned
3283 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3285 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3288 #endif
3290 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3292 * sys_sigprocmask - examine and change blocked signals
3293 * @how: whether to add, remove, or set signals
3294 * @nset: signals to add or remove (if non-null)
3295 * @oset: previous value of signal mask if non-null
3297 * Some platforms have their own version with special arguments;
3298 * others support only sys_rt_sigprocmask.
3301 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3302 old_sigset_t __user *, oset)
3304 old_sigset_t old_set, new_set;
3305 sigset_t new_blocked;
3307 old_set = current->blocked.sig[0];
3309 if (nset) {
3310 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3311 return -EFAULT;
3313 new_blocked = current->blocked;
3315 switch (how) {
3316 case SIG_BLOCK:
3317 sigaddsetmask(&new_blocked, new_set);
3318 break;
3319 case SIG_UNBLOCK:
3320 sigdelsetmask(&new_blocked, new_set);
3321 break;
3322 case SIG_SETMASK:
3323 new_blocked.sig[0] = new_set;
3324 break;
3325 default:
3326 return -EINVAL;
3329 set_current_blocked(&new_blocked);
3332 if (oset) {
3333 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3334 return -EFAULT;
3337 return 0;
3339 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3341 #ifndef CONFIG_ODD_RT_SIGACTION
3343 * sys_rt_sigaction - alter an action taken by a process
3344 * @sig: signal to be sent
3345 * @act: new sigaction
3346 * @oact: used to save the previous sigaction
3347 * @sigsetsize: size of sigset_t type
3349 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3350 const struct sigaction __user *, act,
3351 struct sigaction __user *, oact,
3352 size_t, sigsetsize)
3354 struct k_sigaction new_sa, old_sa;
3355 int ret = -EINVAL;
3357 /* XXX: Don't preclude handling different sized sigset_t's. */
3358 if (sigsetsize != sizeof(sigset_t))
3359 goto out;
3361 if (act) {
3362 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3363 return -EFAULT;
3366 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3368 if (!ret && oact) {
3369 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3370 return -EFAULT;
3372 out:
3373 return ret;
3375 #ifdef CONFIG_COMPAT
3376 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3377 const struct compat_sigaction __user *, act,
3378 struct compat_sigaction __user *, oact,
3379 compat_size_t, sigsetsize)
3381 struct k_sigaction new_ka, old_ka;
3382 compat_sigset_t mask;
3383 #ifdef __ARCH_HAS_SA_RESTORER
3384 compat_uptr_t restorer;
3385 #endif
3386 int ret;
3388 /* XXX: Don't preclude handling different sized sigset_t's. */
3389 if (sigsetsize != sizeof(compat_sigset_t))
3390 return -EINVAL;
3392 if (act) {
3393 compat_uptr_t handler;
3394 ret = get_user(handler, &act->sa_handler);
3395 new_ka.sa.sa_handler = compat_ptr(handler);
3396 #ifdef __ARCH_HAS_SA_RESTORER
3397 ret |= get_user(restorer, &act->sa_restorer);
3398 new_ka.sa.sa_restorer = compat_ptr(restorer);
3399 #endif
3400 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3401 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3402 if (ret)
3403 return -EFAULT;
3404 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3407 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3408 if (!ret && oact) {
3409 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3410 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3411 &oact->sa_handler);
3412 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3413 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3414 #ifdef __ARCH_HAS_SA_RESTORER
3415 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3416 &oact->sa_restorer);
3417 #endif
3419 return ret;
3421 #endif
3422 #endif /* !CONFIG_ODD_RT_SIGACTION */
3424 #ifdef CONFIG_OLD_SIGACTION
3425 SYSCALL_DEFINE3(sigaction, int, sig,
3426 const struct old_sigaction __user *, act,
3427 struct old_sigaction __user *, oact)
3429 struct k_sigaction new_ka, old_ka;
3430 int ret;
3432 if (act) {
3433 old_sigset_t mask;
3434 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3435 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3436 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3437 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3438 __get_user(mask, &act->sa_mask))
3439 return -EFAULT;
3440 #ifdef __ARCH_HAS_KA_RESTORER
3441 new_ka.ka_restorer = NULL;
3442 #endif
3443 siginitset(&new_ka.sa.sa_mask, mask);
3446 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3448 if (!ret && oact) {
3449 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3450 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3451 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3452 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3453 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3454 return -EFAULT;
3457 return ret;
3459 #endif
3460 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3461 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3462 const struct compat_old_sigaction __user *, act,
3463 struct compat_old_sigaction __user *, oact)
3465 struct k_sigaction new_ka, old_ka;
3466 int ret;
3467 compat_old_sigset_t mask;
3468 compat_uptr_t handler, restorer;
3470 if (act) {
3471 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3472 __get_user(handler, &act->sa_handler) ||
3473 __get_user(restorer, &act->sa_restorer) ||
3474 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3475 __get_user(mask, &act->sa_mask))
3476 return -EFAULT;
3478 #ifdef __ARCH_HAS_KA_RESTORER
3479 new_ka.ka_restorer = NULL;
3480 #endif
3481 new_ka.sa.sa_handler = compat_ptr(handler);
3482 new_ka.sa.sa_restorer = compat_ptr(restorer);
3483 siginitset(&new_ka.sa.sa_mask, mask);
3486 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3488 if (!ret && oact) {
3489 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3490 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3491 &oact->sa_handler) ||
3492 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3493 &oact->sa_restorer) ||
3494 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3495 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3496 return -EFAULT;
3498 return ret;
3500 #endif
3502 #ifdef CONFIG_SGETMASK_SYSCALL
3505 * For backwards compatibility. Functionality superseded by sigprocmask.
3507 SYSCALL_DEFINE0(sgetmask)
3509 /* SMP safe */
3510 return current->blocked.sig[0];
3513 SYSCALL_DEFINE1(ssetmask, int, newmask)
3515 int old = current->blocked.sig[0];
3516 sigset_t newset;
3518 siginitset(&newset, newmask);
3519 set_current_blocked(&newset);
3521 return old;
3523 #endif /* CONFIG_SGETMASK_SYSCALL */
3525 #ifdef __ARCH_WANT_SYS_SIGNAL
3527 * For backwards compatibility. Functionality superseded by sigaction.
3529 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3531 struct k_sigaction new_sa, old_sa;
3532 int ret;
3534 new_sa.sa.sa_handler = handler;
3535 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3536 sigemptyset(&new_sa.sa.sa_mask);
3538 ret = do_sigaction(sig, &new_sa, &old_sa);
3540 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3542 #endif /* __ARCH_WANT_SYS_SIGNAL */
3544 #ifdef __ARCH_WANT_SYS_PAUSE
3546 SYSCALL_DEFINE0(pause)
3548 while (!signal_pending(current)) {
3549 __set_current_state(TASK_INTERRUPTIBLE);
3550 schedule();
3552 return -ERESTARTNOHAND;
3555 #endif
3557 int sigsuspend(sigset_t *set)
3559 current->saved_sigmask = current->blocked;
3560 set_current_blocked(set);
3562 __set_current_state(TASK_INTERRUPTIBLE);
3563 schedule();
3564 set_restore_sigmask();
3565 return -ERESTARTNOHAND;
3569 * sys_rt_sigsuspend - replace the signal mask for a value with the
3570 * @unewset value until a signal is received
3571 * @unewset: new signal mask value
3572 * @sigsetsize: size of sigset_t type
3574 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3576 sigset_t newset;
3578 /* XXX: Don't preclude handling different sized sigset_t's. */
3579 if (sigsetsize != sizeof(sigset_t))
3580 return -EINVAL;
3582 if (copy_from_user(&newset, unewset, sizeof(newset)))
3583 return -EFAULT;
3584 return sigsuspend(&newset);
3587 #ifdef CONFIG_COMPAT
3588 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3590 #ifdef __BIG_ENDIAN
3591 sigset_t newset;
3592 compat_sigset_t newset32;
3594 /* XXX: Don't preclude handling different sized sigset_t's. */
3595 if (sigsetsize != sizeof(sigset_t))
3596 return -EINVAL;
3598 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3599 return -EFAULT;
3600 sigset_from_compat(&newset, &newset32);
3601 return sigsuspend(&newset);
3602 #else
3603 /* on little-endian bitmaps don't care about granularity */
3604 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3605 #endif
3607 #endif
3609 #ifdef CONFIG_OLD_SIGSUSPEND
3610 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3612 sigset_t blocked;
3613 siginitset(&blocked, mask);
3614 return sigsuspend(&blocked);
3616 #endif
3617 #ifdef CONFIG_OLD_SIGSUSPEND3
3618 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3620 sigset_t blocked;
3621 siginitset(&blocked, mask);
3622 return sigsuspend(&blocked);
3624 #endif
3626 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3628 return NULL;
3631 void __init signals_init(void)
3633 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3636 #ifdef CONFIG_KGDB_KDB
3637 #include <linux/kdb.h>
3639 * kdb_send_sig_info - Allows kdb to send signals without exposing
3640 * signal internals. This function checks if the required locks are
3641 * available before calling the main signal code, to avoid kdb
3642 * deadlocks.
3644 void
3645 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3647 static struct task_struct *kdb_prev_t;
3648 int sig, new_t;
3649 if (!spin_trylock(&t->sighand->siglock)) {
3650 kdb_printf("Can't do kill command now.\n"
3651 "The sigmask lock is held somewhere else in "
3652 "kernel, try again later\n");
3653 return;
3655 spin_unlock(&t->sighand->siglock);
3656 new_t = kdb_prev_t != t;
3657 kdb_prev_t = t;
3658 if (t->state != TASK_RUNNING && new_t) {
3659 kdb_printf("Process is not RUNNING, sending a signal from "
3660 "kdb risks deadlock\n"
3661 "on the run queue locks. "
3662 "The signal has _not_ been sent.\n"
3663 "Reissue the kill command if you want to risk "
3664 "the deadlock.\n");
3665 return;
3667 sig = info->si_signo;
3668 if (send_sig_info(sig, info, t))
3669 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3670 sig, t->pid);
3671 else
3672 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3674 #endif /* CONFIG_KGDB_KDB */