net: ethernet: Fix memleak in ethoc_probe
[linux/fpc-iii.git] / kernel / signal.c
blobbedca1629f2608b4bb11c2392e7f535d2c93cf74
1 /*
2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/ptrace.h>
24 #include <linux/signal.h>
25 #include <linux/signalfd.h>
26 #include <linux/ratelimit.h>
27 #include <linux/tracehook.h>
28 #include <linux/capability.h>
29 #include <linux/freezer.h>
30 #include <linux/pid_namespace.h>
31 #include <linux/nsproxy.h>
32 #include <linux/user_namespace.h>
33 #include <linux/uprobes.h>
34 #include <linux/compat.h>
35 #include <linux/cn_proc.h>
36 #include <linux/compiler.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/signal.h>
41 #include <asm/param.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/siginfo.h>
45 #include <asm/cacheflush.h>
46 #include "audit.h" /* audit_signal_info() */
49 * SLAB caches for signal bits.
52 static struct kmem_cache *sigqueue_cachep;
54 int print_fatal_signals __read_mostly;
56 static void __user *sig_handler(struct task_struct *t, int sig)
58 return t->sighand->action[sig - 1].sa.sa_handler;
61 static int sig_handler_ignored(void __user *handler, int sig)
63 /* Is it explicitly or implicitly ignored? */
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
68 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
70 void __user *handler;
72 handler = sig_handler(t, sig);
74 /* SIGKILL and SIGSTOP may not be sent to the global init */
75 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
76 return true;
78 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
79 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
80 return 1;
82 /* Only allow kernel generated signals to this kthread */
83 if (unlikely((t->flags & PF_KTHREAD) &&
84 (handler == SIG_KTHREAD_KERNEL) && !force))
85 return true;
87 return sig_handler_ignored(handler, sig);
90 static int sig_ignored(struct task_struct *t, int sig, bool force)
93 * Blocked signals are never ignored, since the
94 * signal handler may change by the time it is
95 * unblocked.
97 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
98 return 0;
101 * Tracers may want to know about even ignored signal unless it
102 * is SIGKILL which can't be reported anyway but can be ignored
103 * by SIGNAL_UNKILLABLE task.
105 if (t->ptrace && sig != SIGKILL)
106 return 0;
108 return sig_task_ignored(t, sig, force);
112 * Re-calculate pending state from the set of locally pending
113 * signals, globally pending signals, and blocked signals.
115 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
117 unsigned long ready;
118 long i;
120 switch (_NSIG_WORDS) {
121 default:
122 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
123 ready |= signal->sig[i] &~ blocked->sig[i];
124 break;
126 case 4: ready = signal->sig[3] &~ blocked->sig[3];
127 ready |= signal->sig[2] &~ blocked->sig[2];
128 ready |= signal->sig[1] &~ blocked->sig[1];
129 ready |= signal->sig[0] &~ blocked->sig[0];
130 break;
132 case 2: ready = signal->sig[1] &~ blocked->sig[1];
133 ready |= signal->sig[0] &~ blocked->sig[0];
134 break;
136 case 1: ready = signal->sig[0] &~ blocked->sig[0];
138 return ready != 0;
141 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
143 static int recalc_sigpending_tsk(struct task_struct *t)
145 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
146 PENDING(&t->pending, &t->blocked) ||
147 PENDING(&t->signal->shared_pending, &t->blocked)) {
148 set_tsk_thread_flag(t, TIF_SIGPENDING);
149 return 1;
152 * We must never clear the flag in another thread, or in current
153 * when it's possible the current syscall is returning -ERESTART*.
154 * So we don't clear it here, and only callers who know they should do.
156 return 0;
160 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
161 * This is superfluous when called on current, the wakeup is a harmless no-op.
163 void recalc_sigpending_and_wake(struct task_struct *t)
165 if (recalc_sigpending_tsk(t))
166 signal_wake_up(t, 0);
169 void recalc_sigpending(void)
171 if (!recalc_sigpending_tsk(current) && !freezing(current))
172 clear_thread_flag(TIF_SIGPENDING);
176 /* Given the mask, find the first available signal that should be serviced. */
178 #define SYNCHRONOUS_MASK \
179 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
180 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
182 int next_signal(struct sigpending *pending, sigset_t *mask)
184 unsigned long i, *s, *m, x;
185 int sig = 0;
187 s = pending->signal.sig;
188 m = mask->sig;
191 * Handle the first word specially: it contains the
192 * synchronous signals that need to be dequeued first.
194 x = *s &~ *m;
195 if (x) {
196 if (x & SYNCHRONOUS_MASK)
197 x &= SYNCHRONOUS_MASK;
198 sig = ffz(~x) + 1;
199 return sig;
202 switch (_NSIG_WORDS) {
203 default:
204 for (i = 1; i < _NSIG_WORDS; ++i) {
205 x = *++s &~ *++m;
206 if (!x)
207 continue;
208 sig = ffz(~x) + i*_NSIG_BPW + 1;
209 break;
211 break;
213 case 2:
214 x = s[1] &~ m[1];
215 if (!x)
216 break;
217 sig = ffz(~x) + _NSIG_BPW + 1;
218 break;
220 case 1:
221 /* Nothing to do */
222 break;
225 return sig;
228 static inline void print_dropped_signal(int sig)
230 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
232 if (!print_fatal_signals)
233 return;
235 if (!__ratelimit(&ratelimit_state))
236 return;
238 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
239 current->comm, current->pid, sig);
243 * task_set_jobctl_pending - set jobctl pending bits
244 * @task: target task
245 * @mask: pending bits to set
247 * Clear @mask from @task->jobctl. @mask must be subset of
248 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
249 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
250 * cleared. If @task is already being killed or exiting, this function
251 * becomes noop.
253 * CONTEXT:
254 * Must be called with @task->sighand->siglock held.
256 * RETURNS:
257 * %true if @mask is set, %false if made noop because @task was dying.
259 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
261 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
262 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
263 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
265 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
266 return false;
268 if (mask & JOBCTL_STOP_SIGMASK)
269 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
271 task->jobctl |= mask;
272 return true;
276 * task_clear_jobctl_trapping - clear jobctl trapping bit
277 * @task: target task
279 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
280 * Clear it and wake up the ptracer. Note that we don't need any further
281 * locking. @task->siglock guarantees that @task->parent points to the
282 * ptracer.
284 * CONTEXT:
285 * Must be called with @task->sighand->siglock held.
287 void task_clear_jobctl_trapping(struct task_struct *task)
289 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
290 task->jobctl &= ~JOBCTL_TRAPPING;
291 smp_mb(); /* advised by wake_up_bit() */
292 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
297 * task_clear_jobctl_pending - clear jobctl pending bits
298 * @task: target task
299 * @mask: pending bits to clear
301 * Clear @mask from @task->jobctl. @mask must be subset of
302 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
303 * STOP bits are cleared together.
305 * If clearing of @mask leaves no stop or trap pending, this function calls
306 * task_clear_jobctl_trapping().
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
311 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
313 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
315 if (mask & JOBCTL_STOP_PENDING)
316 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
318 task->jobctl &= ~mask;
320 if (!(task->jobctl & JOBCTL_PENDING_MASK))
321 task_clear_jobctl_trapping(task);
325 * task_participate_group_stop - participate in a group stop
326 * @task: task participating in a group stop
328 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
329 * Group stop states are cleared and the group stop count is consumed if
330 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
331 * stop, the appropriate %SIGNAL_* flags are set.
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
336 * RETURNS:
337 * %true if group stop completion should be notified to the parent, %false
338 * otherwise.
340 static bool task_participate_group_stop(struct task_struct *task)
342 struct signal_struct *sig = task->signal;
343 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
345 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
347 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
349 if (!consume)
350 return false;
352 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
353 sig->group_stop_count--;
356 * Tell the caller to notify completion iff we are entering into a
357 * fresh group stop. Read comment in do_signal_stop() for details.
359 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
360 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
361 return true;
363 return false;
367 * allocate a new signal queue record
368 * - this may be called without locks if and only if t == current, otherwise an
369 * appropriate lock must be held to stop the target task from exiting
371 static struct sigqueue *
372 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
374 struct sigqueue *q = NULL;
375 struct user_struct *user;
376 int sigpending;
379 * Protect access to @t credentials. This can go away when all
380 * callers hold rcu read lock.
382 * NOTE! A pending signal will hold on to the user refcount,
383 * and we get/put the refcount only when the sigpending count
384 * changes from/to zero.
386 rcu_read_lock();
387 user = __task_cred(t)->user;
388 sigpending = atomic_inc_return(&user->sigpending);
389 if (sigpending == 1)
390 get_uid(user);
391 rcu_read_unlock();
393 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
394 q = kmem_cache_alloc(sigqueue_cachep, flags);
395 } else {
396 print_dropped_signal(sig);
399 if (unlikely(q == NULL)) {
400 if (atomic_dec_and_test(&user->sigpending))
401 free_uid(user);
402 } else {
403 INIT_LIST_HEAD(&q->list);
404 q->flags = 0;
405 q->user = user;
408 return q;
411 static void __sigqueue_free(struct sigqueue *q)
413 if (q->flags & SIGQUEUE_PREALLOC)
414 return;
415 if (atomic_dec_and_test(&q->user->sigpending))
416 free_uid(q->user);
417 kmem_cache_free(sigqueue_cachep, q);
420 void flush_sigqueue(struct sigpending *queue)
422 struct sigqueue *q;
424 sigemptyset(&queue->signal);
425 while (!list_empty(&queue->list)) {
426 q = list_entry(queue->list.next, struct sigqueue , list);
427 list_del_init(&q->list);
428 __sigqueue_free(q);
433 * Flush all pending signals for this kthread.
435 void flush_signals(struct task_struct *t)
437 unsigned long flags;
439 spin_lock_irqsave(&t->sighand->siglock, flags);
440 clear_tsk_thread_flag(t, TIF_SIGPENDING);
441 flush_sigqueue(&t->pending);
442 flush_sigqueue(&t->signal->shared_pending);
443 spin_unlock_irqrestore(&t->sighand->siglock, flags);
446 static void __flush_itimer_signals(struct sigpending *pending)
448 sigset_t signal, retain;
449 struct sigqueue *q, *n;
451 signal = pending->signal;
452 sigemptyset(&retain);
454 list_for_each_entry_safe(q, n, &pending->list, list) {
455 int sig = q->info.si_signo;
457 if (likely(q->info.si_code != SI_TIMER)) {
458 sigaddset(&retain, sig);
459 } else {
460 sigdelset(&signal, sig);
461 list_del_init(&q->list);
462 __sigqueue_free(q);
466 sigorsets(&pending->signal, &signal, &retain);
469 void flush_itimer_signals(void)
471 struct task_struct *tsk = current;
472 unsigned long flags;
474 spin_lock_irqsave(&tsk->sighand->siglock, flags);
475 __flush_itimer_signals(&tsk->pending);
476 __flush_itimer_signals(&tsk->signal->shared_pending);
477 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
480 void ignore_signals(struct task_struct *t)
482 int i;
484 for (i = 0; i < _NSIG; ++i)
485 t->sighand->action[i].sa.sa_handler = SIG_IGN;
487 flush_signals(t);
491 * Flush all handlers for a task.
494 void
495 flush_signal_handlers(struct task_struct *t, int force_default)
497 int i;
498 struct k_sigaction *ka = &t->sighand->action[0];
499 for (i = _NSIG ; i != 0 ; i--) {
500 if (force_default || ka->sa.sa_handler != SIG_IGN)
501 ka->sa.sa_handler = SIG_DFL;
502 ka->sa.sa_flags = 0;
503 #ifdef __ARCH_HAS_SA_RESTORER
504 ka->sa.sa_restorer = NULL;
505 #endif
506 sigemptyset(&ka->sa.sa_mask);
507 ka++;
511 int unhandled_signal(struct task_struct *tsk, int sig)
513 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
514 if (is_global_init(tsk))
515 return 1;
516 if (handler != SIG_IGN && handler != SIG_DFL)
517 return 0;
518 /* if ptraced, let the tracer determine */
519 return !tsk->ptrace;
522 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
523 bool *resched_timer)
525 struct sigqueue *q, *first = NULL;
528 * Collect the siginfo appropriate to this signal. Check if
529 * there is another siginfo for the same signal.
531 list_for_each_entry(q, &list->list, list) {
532 if (q->info.si_signo == sig) {
533 if (first)
534 goto still_pending;
535 first = q;
539 sigdelset(&list->signal, sig);
541 if (first) {
542 still_pending:
543 list_del_init(&first->list);
544 copy_siginfo(info, &first->info);
546 *resched_timer =
547 (first->flags & SIGQUEUE_PREALLOC) &&
548 (info->si_code == SI_TIMER) &&
549 (info->si_sys_private);
551 __sigqueue_free(first);
552 } else {
554 * Ok, it wasn't in the queue. This must be
555 * a fast-pathed signal or we must have been
556 * out of queue space. So zero out the info.
558 info->si_signo = sig;
559 info->si_errno = 0;
560 info->si_code = SI_USER;
561 info->si_pid = 0;
562 info->si_uid = 0;
566 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
567 siginfo_t *info, bool *resched_timer)
569 int sig = next_signal(pending, mask);
571 if (sig)
572 collect_signal(sig, pending, info, resched_timer);
573 return sig;
577 * Dequeue a signal and return the element to the caller, which is
578 * expected to free it.
580 * All callers have to hold the siglock.
582 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
584 bool resched_timer = false;
585 int signr;
587 /* We only dequeue private signals from ourselves, we don't let
588 * signalfd steal them
590 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
591 if (!signr) {
592 signr = __dequeue_signal(&tsk->signal->shared_pending,
593 mask, info, &resched_timer);
595 * itimer signal ?
597 * itimers are process shared and we restart periodic
598 * itimers in the signal delivery path to prevent DoS
599 * attacks in the high resolution timer case. This is
600 * compliant with the old way of self-restarting
601 * itimers, as the SIGALRM is a legacy signal and only
602 * queued once. Changing the restart behaviour to
603 * restart the timer in the signal dequeue path is
604 * reducing the timer noise on heavy loaded !highres
605 * systems too.
607 if (unlikely(signr == SIGALRM)) {
608 struct hrtimer *tmr = &tsk->signal->real_timer;
610 if (!hrtimer_is_queued(tmr) &&
611 tsk->signal->it_real_incr.tv64 != 0) {
612 hrtimer_forward(tmr, tmr->base->get_time(),
613 tsk->signal->it_real_incr);
614 hrtimer_restart(tmr);
619 recalc_sigpending();
620 if (!signr)
621 return 0;
623 if (unlikely(sig_kernel_stop(signr))) {
625 * Set a marker that we have dequeued a stop signal. Our
626 * caller might release the siglock and then the pending
627 * stop signal it is about to process is no longer in the
628 * pending bitmasks, but must still be cleared by a SIGCONT
629 * (and overruled by a SIGKILL). So those cases clear this
630 * shared flag after we've set it. Note that this flag may
631 * remain set after the signal we return is ignored or
632 * handled. That doesn't matter because its only purpose
633 * is to alert stop-signal processing code when another
634 * processor has come along and cleared the flag.
636 current->jobctl |= JOBCTL_STOP_DEQUEUED;
638 if (resched_timer) {
640 * Release the siglock to ensure proper locking order
641 * of timer locks outside of siglocks. Note, we leave
642 * irqs disabled here, since the posix-timers code is
643 * about to disable them again anyway.
645 spin_unlock(&tsk->sighand->siglock);
646 do_schedule_next_timer(info);
647 spin_lock(&tsk->sighand->siglock);
649 return signr;
653 * Tell a process that it has a new active signal..
655 * NOTE! we rely on the previous spin_lock to
656 * lock interrupts for us! We can only be called with
657 * "siglock" held, and the local interrupt must
658 * have been disabled when that got acquired!
660 * No need to set need_resched since signal event passing
661 * goes through ->blocked
663 void signal_wake_up_state(struct task_struct *t, unsigned int state)
665 set_tsk_thread_flag(t, TIF_SIGPENDING);
667 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
668 * case. We don't check t->state here because there is a race with it
669 * executing another processor and just now entering stopped state.
670 * By using wake_up_state, we ensure the process will wake up and
671 * handle its death signal.
673 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
674 kick_process(t);
678 * Remove signals in mask from the pending set and queue.
679 * Returns 1 if any signals were found.
681 * All callers must be holding the siglock.
683 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
685 struct sigqueue *q, *n;
686 sigset_t m;
688 sigandsets(&m, mask, &s->signal);
689 if (sigisemptyset(&m))
690 return 0;
692 sigandnsets(&s->signal, &s->signal, mask);
693 list_for_each_entry_safe(q, n, &s->list, list) {
694 if (sigismember(mask, q->info.si_signo)) {
695 list_del_init(&q->list);
696 __sigqueue_free(q);
699 return 1;
702 static inline int is_si_special(const struct siginfo *info)
704 return info <= SEND_SIG_FORCED;
707 static inline bool si_fromuser(const struct siginfo *info)
709 return info == SEND_SIG_NOINFO ||
710 (!is_si_special(info) && SI_FROMUSER(info));
713 static int dequeue_synchronous_signal(siginfo_t *info)
715 struct task_struct *tsk = current;
716 struct sigpending *pending = &tsk->pending;
717 struct sigqueue *q, *sync = NULL;
720 * Might a synchronous signal be in the queue?
722 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
723 return 0;
726 * Return the first synchronous signal in the queue.
728 list_for_each_entry(q, &pending->list, list) {
729 /* Synchronous signals have a postive si_code */
730 if ((q->info.si_code > SI_USER) &&
731 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 sync = q;
733 goto next;
736 return 0;
737 next:
739 * Check if there is another siginfo for the same signal.
741 list_for_each_entry_continue(q, &pending->list, list) {
742 if (q->info.si_signo == sync->info.si_signo)
743 goto still_pending;
746 sigdelset(&pending->signal, sync->info.si_signo);
747 recalc_sigpending();
748 still_pending:
749 list_del_init(&sync->list);
750 copy_siginfo(info, &sync->info);
751 __sigqueue_free(sync);
752 return info->si_signo;
756 * called with RCU read lock from check_kill_permission()
758 static int kill_ok_by_cred(struct task_struct *t)
760 const struct cred *cred = current_cred();
761 const struct cred *tcred = __task_cred(t);
763 if (uid_eq(cred->euid, tcred->suid) ||
764 uid_eq(cred->euid, tcred->uid) ||
765 uid_eq(cred->uid, tcred->suid) ||
766 uid_eq(cred->uid, tcred->uid))
767 return 1;
769 if (ns_capable(tcred->user_ns, CAP_KILL))
770 return 1;
772 return 0;
776 * Bad permissions for sending the signal
777 * - the caller must hold the RCU read lock
779 static int check_kill_permission(int sig, struct siginfo *info,
780 struct task_struct *t)
782 struct pid *sid;
783 int error;
785 if (!valid_signal(sig))
786 return -EINVAL;
788 if (!si_fromuser(info))
789 return 0;
791 error = audit_signal_info(sig, t); /* Let audit system see the signal */
792 if (error)
793 return error;
795 if (!same_thread_group(current, t) &&
796 !kill_ok_by_cred(t)) {
797 switch (sig) {
798 case SIGCONT:
799 sid = task_session(t);
801 * We don't return the error if sid == NULL. The
802 * task was unhashed, the caller must notice this.
804 if (!sid || sid == task_session(current))
805 break;
806 default:
807 return -EPERM;
811 return security_task_kill(t, info, sig, 0);
815 * ptrace_trap_notify - schedule trap to notify ptracer
816 * @t: tracee wanting to notify tracer
818 * This function schedules sticky ptrace trap which is cleared on the next
819 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
820 * ptracer.
822 * If @t is running, STOP trap will be taken. If trapped for STOP and
823 * ptracer is listening for events, tracee is woken up so that it can
824 * re-trap for the new event. If trapped otherwise, STOP trap will be
825 * eventually taken without returning to userland after the existing traps
826 * are finished by PTRACE_CONT.
828 * CONTEXT:
829 * Must be called with @task->sighand->siglock held.
831 static void ptrace_trap_notify(struct task_struct *t)
833 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
834 assert_spin_locked(&t->sighand->siglock);
836 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
837 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
841 * Handle magic process-wide effects of stop/continue signals. Unlike
842 * the signal actions, these happen immediately at signal-generation
843 * time regardless of blocking, ignoring, or handling. This does the
844 * actual continuing for SIGCONT, but not the actual stopping for stop
845 * signals. The process stop is done as a signal action for SIG_DFL.
847 * Returns true if the signal should be actually delivered, otherwise
848 * it should be dropped.
850 static bool prepare_signal(int sig, struct task_struct *p, bool force)
852 struct signal_struct *signal = p->signal;
853 struct task_struct *t;
854 sigset_t flush;
856 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
857 if (!(signal->flags & SIGNAL_GROUP_EXIT))
858 return sig == SIGKILL;
860 * The process is in the middle of dying, nothing to do.
862 } else if (sig_kernel_stop(sig)) {
864 * This is a stop signal. Remove SIGCONT from all queues.
866 siginitset(&flush, sigmask(SIGCONT));
867 flush_sigqueue_mask(&flush, &signal->shared_pending);
868 for_each_thread(p, t)
869 flush_sigqueue_mask(&flush, &t->pending);
870 } else if (sig == SIGCONT) {
871 unsigned int why;
873 * Remove all stop signals from all queues, wake all threads.
875 siginitset(&flush, SIG_KERNEL_STOP_MASK);
876 flush_sigqueue_mask(&flush, &signal->shared_pending);
877 for_each_thread(p, t) {
878 flush_sigqueue_mask(&flush, &t->pending);
879 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
880 if (likely(!(t->ptrace & PT_SEIZED)))
881 wake_up_state(t, __TASK_STOPPED);
882 else
883 ptrace_trap_notify(t);
887 * Notify the parent with CLD_CONTINUED if we were stopped.
889 * If we were in the middle of a group stop, we pretend it
890 * was already finished, and then continued. Since SIGCHLD
891 * doesn't queue we report only CLD_STOPPED, as if the next
892 * CLD_CONTINUED was dropped.
894 why = 0;
895 if (signal->flags & SIGNAL_STOP_STOPPED)
896 why |= SIGNAL_CLD_CONTINUED;
897 else if (signal->group_stop_count)
898 why |= SIGNAL_CLD_STOPPED;
900 if (why) {
902 * The first thread which returns from do_signal_stop()
903 * will take ->siglock, notice SIGNAL_CLD_MASK, and
904 * notify its parent. See get_signal_to_deliver().
906 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
907 signal->group_stop_count = 0;
908 signal->group_exit_code = 0;
912 return !sig_ignored(p, sig, force);
916 * Test if P wants to take SIG. After we've checked all threads with this,
917 * it's equivalent to finding no threads not blocking SIG. Any threads not
918 * blocking SIG were ruled out because they are not running and already
919 * have pending signals. Such threads will dequeue from the shared queue
920 * as soon as they're available, so putting the signal on the shared queue
921 * will be equivalent to sending it to one such thread.
923 static inline int wants_signal(int sig, struct task_struct *p)
925 if (sigismember(&p->blocked, sig))
926 return 0;
927 if (p->flags & PF_EXITING)
928 return 0;
929 if (sig == SIGKILL)
930 return 1;
931 if (task_is_stopped_or_traced(p))
932 return 0;
933 return task_curr(p) || !signal_pending(p);
936 static void complete_signal(int sig, struct task_struct *p, int group)
938 struct signal_struct *signal = p->signal;
939 struct task_struct *t;
942 * Now find a thread we can wake up to take the signal off the queue.
944 * If the main thread wants the signal, it gets first crack.
945 * Probably the least surprising to the average bear.
947 if (wants_signal(sig, p))
948 t = p;
949 else if (!group || thread_group_empty(p))
951 * There is just one thread and it does not need to be woken.
952 * It will dequeue unblocked signals before it runs again.
954 return;
955 else {
957 * Otherwise try to find a suitable thread.
959 t = signal->curr_target;
960 while (!wants_signal(sig, t)) {
961 t = next_thread(t);
962 if (t == signal->curr_target)
964 * No thread needs to be woken.
965 * Any eligible threads will see
966 * the signal in the queue soon.
968 return;
970 signal->curr_target = t;
974 * Found a killable thread. If the signal will be fatal,
975 * then start taking the whole group down immediately.
977 if (sig_fatal(p, sig) &&
978 !(signal->flags & SIGNAL_GROUP_EXIT) &&
979 !sigismember(&t->real_blocked, sig) &&
980 (sig == SIGKILL || !p->ptrace)) {
982 * This signal will be fatal to the whole group.
984 if (!sig_kernel_coredump(sig)) {
986 * Start a group exit and wake everybody up.
987 * This way we don't have other threads
988 * running and doing things after a slower
989 * thread has the fatal signal pending.
991 signal->flags = SIGNAL_GROUP_EXIT;
992 signal->group_exit_code = sig;
993 signal->group_stop_count = 0;
994 t = p;
995 do {
996 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
997 sigaddset(&t->pending.signal, SIGKILL);
998 signal_wake_up(t, 1);
999 } while_each_thread(p, t);
1000 return;
1005 * The signal is already in the shared-pending queue.
1006 * Tell the chosen thread to wake up and dequeue it.
1008 signal_wake_up(t, sig == SIGKILL);
1009 return;
1012 static inline int legacy_queue(struct sigpending *signals, int sig)
1014 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1017 #ifdef CONFIG_USER_NS
1018 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1020 if (current_user_ns() == task_cred_xxx(t, user_ns))
1021 return;
1023 if (SI_FROMKERNEL(info))
1024 return;
1026 rcu_read_lock();
1027 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1028 make_kuid(current_user_ns(), info->si_uid));
1029 rcu_read_unlock();
1031 #else
1032 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1034 return;
1036 #endif
1038 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1039 int group, int from_ancestor_ns)
1041 struct sigpending *pending;
1042 struct sigqueue *q;
1043 int override_rlimit;
1044 int ret = 0, result;
1046 assert_spin_locked(&t->sighand->siglock);
1048 result = TRACE_SIGNAL_IGNORED;
1049 if (!prepare_signal(sig, t,
1050 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1051 goto ret;
1053 pending = group ? &t->signal->shared_pending : &t->pending;
1055 * Short-circuit ignored signals and support queuing
1056 * exactly one non-rt signal, so that we can get more
1057 * detailed information about the cause of the signal.
1059 result = TRACE_SIGNAL_ALREADY_PENDING;
1060 if (legacy_queue(pending, sig))
1061 goto ret;
1063 result = TRACE_SIGNAL_DELIVERED;
1065 * fast-pathed signals for kernel-internal things like SIGSTOP
1066 * or SIGKILL.
1068 if (info == SEND_SIG_FORCED)
1069 goto out_set;
1072 * Real-time signals must be queued if sent by sigqueue, or
1073 * some other real-time mechanism. It is implementation
1074 * defined whether kill() does so. We attempt to do so, on
1075 * the principle of least surprise, but since kill is not
1076 * allowed to fail with EAGAIN when low on memory we just
1077 * make sure at least one signal gets delivered and don't
1078 * pass on the info struct.
1080 if (sig < SIGRTMIN)
1081 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1082 else
1083 override_rlimit = 0;
1085 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1086 override_rlimit);
1087 if (q) {
1088 list_add_tail(&q->list, &pending->list);
1089 switch ((unsigned long) info) {
1090 case (unsigned long) SEND_SIG_NOINFO:
1091 q->info.si_signo = sig;
1092 q->info.si_errno = 0;
1093 q->info.si_code = SI_USER;
1094 q->info.si_pid = task_tgid_nr_ns(current,
1095 task_active_pid_ns(t));
1096 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1097 break;
1098 case (unsigned long) SEND_SIG_PRIV:
1099 q->info.si_signo = sig;
1100 q->info.si_errno = 0;
1101 q->info.si_code = SI_KERNEL;
1102 q->info.si_pid = 0;
1103 q->info.si_uid = 0;
1104 break;
1105 default:
1106 copy_siginfo(&q->info, info);
1107 if (from_ancestor_ns)
1108 q->info.si_pid = 0;
1109 break;
1112 userns_fixup_signal_uid(&q->info, t);
1114 } else if (!is_si_special(info)) {
1115 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1117 * Queue overflow, abort. We may abort if the
1118 * signal was rt and sent by user using something
1119 * other than kill().
1121 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1122 ret = -EAGAIN;
1123 goto ret;
1124 } else {
1126 * This is a silent loss of information. We still
1127 * send the signal, but the *info bits are lost.
1129 result = TRACE_SIGNAL_LOSE_INFO;
1133 out_set:
1134 signalfd_notify(t, sig);
1135 sigaddset(&pending->signal, sig);
1136 complete_signal(sig, t, group);
1137 ret:
1138 trace_signal_generate(sig, info, t, group, result);
1139 return ret;
1142 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1143 int group)
1145 int from_ancestor_ns = 0;
1147 #ifdef CONFIG_PID_NS
1148 from_ancestor_ns = si_fromuser(info) &&
1149 !task_pid_nr_ns(current, task_active_pid_ns(t));
1150 #endif
1152 return __send_signal(sig, info, t, group, from_ancestor_ns);
1155 static void print_fatal_signal(int signr)
1157 struct pt_regs *regs = signal_pt_regs();
1158 pr_info("potentially unexpected fatal signal %d.\n", signr);
1160 #if defined(__i386__) && !defined(__arch_um__)
1161 pr_info("code at %08lx: ", regs->ip);
1163 int i;
1164 for (i = 0; i < 16; i++) {
1165 unsigned char insn;
1167 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1168 break;
1169 pr_cont("%02x ", insn);
1172 pr_cont("\n");
1173 #endif
1174 preempt_disable();
1175 show_regs(regs);
1176 preempt_enable();
1179 static int __init setup_print_fatal_signals(char *str)
1181 get_option (&str, &print_fatal_signals);
1183 return 1;
1186 __setup("print-fatal-signals=", setup_print_fatal_signals);
1189 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1191 return send_signal(sig, info, p, 1);
1194 static int
1195 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1197 return send_signal(sig, info, t, 0);
1200 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1201 bool group)
1203 unsigned long flags;
1204 int ret = -ESRCH;
1206 if (lock_task_sighand(p, &flags)) {
1207 ret = send_signal(sig, info, p, group);
1208 unlock_task_sighand(p, &flags);
1211 return ret;
1215 * Force a signal that the process can't ignore: if necessary
1216 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1218 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1219 * since we do not want to have a signal handler that was blocked
1220 * be invoked when user space had explicitly blocked it.
1222 * We don't want to have recursive SIGSEGV's etc, for example,
1223 * that is why we also clear SIGNAL_UNKILLABLE.
1226 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1228 unsigned long int flags;
1229 int ret, blocked, ignored;
1230 struct k_sigaction *action;
1232 spin_lock_irqsave(&t->sighand->siglock, flags);
1233 action = &t->sighand->action[sig-1];
1234 ignored = action->sa.sa_handler == SIG_IGN;
1235 blocked = sigismember(&t->blocked, sig);
1236 if (blocked || ignored) {
1237 action->sa.sa_handler = SIG_DFL;
1238 if (blocked) {
1239 sigdelset(&t->blocked, sig);
1240 recalc_sigpending_and_wake(t);
1243 if (action->sa.sa_handler == SIG_DFL)
1244 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1245 ret = specific_send_sig_info(sig, info, t);
1246 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1248 return ret;
1252 * Nuke all other threads in the group.
1254 int zap_other_threads(struct task_struct *p)
1256 struct task_struct *t = p;
1257 int count = 0;
1259 p->signal->group_stop_count = 0;
1261 while_each_thread(p, t) {
1262 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1263 count++;
1265 /* Don't bother with already dead threads */
1266 if (t->exit_state)
1267 continue;
1268 sigaddset(&t->pending.signal, SIGKILL);
1269 signal_wake_up(t, 1);
1272 return count;
1275 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1276 unsigned long *flags)
1278 struct sighand_struct *sighand;
1280 for (;;) {
1282 * Disable interrupts early to avoid deadlocks.
1283 * See rcu_read_unlock() comment header for details.
1285 local_irq_save(*flags);
1286 rcu_read_lock();
1287 sighand = rcu_dereference(tsk->sighand);
1288 if (unlikely(sighand == NULL)) {
1289 rcu_read_unlock();
1290 local_irq_restore(*flags);
1291 break;
1294 * This sighand can be already freed and even reused, but
1295 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1296 * initializes ->siglock: this slab can't go away, it has
1297 * the same object type, ->siglock can't be reinitialized.
1299 * We need to ensure that tsk->sighand is still the same
1300 * after we take the lock, we can race with de_thread() or
1301 * __exit_signal(). In the latter case the next iteration
1302 * must see ->sighand == NULL.
1304 spin_lock(&sighand->siglock);
1305 if (likely(sighand == tsk->sighand)) {
1306 rcu_read_unlock();
1307 break;
1309 spin_unlock(&sighand->siglock);
1310 rcu_read_unlock();
1311 local_irq_restore(*flags);
1314 return sighand;
1318 * send signal info to all the members of a group
1320 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1322 int ret;
1324 rcu_read_lock();
1325 ret = check_kill_permission(sig, info, p);
1326 rcu_read_unlock();
1328 if (!ret && sig)
1329 ret = do_send_sig_info(sig, info, p, true);
1331 return ret;
1335 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1336 * control characters do (^C, ^Z etc)
1337 * - the caller must hold at least a readlock on tasklist_lock
1339 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1341 struct task_struct *p = NULL;
1342 int retval, success;
1344 success = 0;
1345 retval = -ESRCH;
1346 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1347 int err = group_send_sig_info(sig, info, p);
1348 success |= !err;
1349 retval = err;
1350 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1351 return success ? 0 : retval;
1354 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1356 int error = -ESRCH;
1357 struct task_struct *p;
1359 for (;;) {
1360 rcu_read_lock();
1361 p = pid_task(pid, PIDTYPE_PID);
1362 if (p)
1363 error = group_send_sig_info(sig, info, p);
1364 rcu_read_unlock();
1365 if (likely(!p || error != -ESRCH))
1366 return error;
1369 * The task was unhashed in between, try again. If it
1370 * is dead, pid_task() will return NULL, if we race with
1371 * de_thread() it will find the new leader.
1376 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1378 int error;
1379 rcu_read_lock();
1380 error = kill_pid_info(sig, info, find_vpid(pid));
1381 rcu_read_unlock();
1382 return error;
1385 static int kill_as_cred_perm(const struct cred *cred,
1386 struct task_struct *target)
1388 const struct cred *pcred = __task_cred(target);
1389 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1390 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1391 return 0;
1392 return 1;
1395 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1396 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1397 const struct cred *cred, u32 secid)
1399 int ret = -EINVAL;
1400 struct task_struct *p;
1401 unsigned long flags;
1403 if (!valid_signal(sig))
1404 return ret;
1406 rcu_read_lock();
1407 p = pid_task(pid, PIDTYPE_PID);
1408 if (!p) {
1409 ret = -ESRCH;
1410 goto out_unlock;
1412 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1413 ret = -EPERM;
1414 goto out_unlock;
1416 ret = security_task_kill(p, info, sig, secid);
1417 if (ret)
1418 goto out_unlock;
1420 if (sig) {
1421 if (lock_task_sighand(p, &flags)) {
1422 ret = __send_signal(sig, info, p, 1, 0);
1423 unlock_task_sighand(p, &flags);
1424 } else
1425 ret = -ESRCH;
1427 out_unlock:
1428 rcu_read_unlock();
1429 return ret;
1431 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1434 * kill_something_info() interprets pid in interesting ways just like kill(2).
1436 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1437 * is probably wrong. Should make it like BSD or SYSV.
1440 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1442 int ret;
1444 if (pid > 0) {
1445 rcu_read_lock();
1446 ret = kill_pid_info(sig, info, find_vpid(pid));
1447 rcu_read_unlock();
1448 return ret;
1451 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1452 if (pid == INT_MIN)
1453 return -ESRCH;
1455 read_lock(&tasklist_lock);
1456 if (pid != -1) {
1457 ret = __kill_pgrp_info(sig, info,
1458 pid ? find_vpid(-pid) : task_pgrp(current));
1459 } else {
1460 int retval = 0, count = 0;
1461 struct task_struct * p;
1463 for_each_process(p) {
1464 if (task_pid_vnr(p) > 1 &&
1465 !same_thread_group(p, current)) {
1466 int err = group_send_sig_info(sig, info, p);
1467 ++count;
1468 if (err != -EPERM)
1469 retval = err;
1472 ret = count ? retval : -ESRCH;
1474 read_unlock(&tasklist_lock);
1476 return ret;
1480 * These are for backward compatibility with the rest of the kernel source.
1483 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1486 * Make sure legacy kernel users don't send in bad values
1487 * (normal paths check this in check_kill_permission).
1489 if (!valid_signal(sig))
1490 return -EINVAL;
1492 return do_send_sig_info(sig, info, p, false);
1495 #define __si_special(priv) \
1496 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1499 send_sig(int sig, struct task_struct *p, int priv)
1501 return send_sig_info(sig, __si_special(priv), p);
1504 void
1505 force_sig(int sig, struct task_struct *p)
1507 force_sig_info(sig, SEND_SIG_PRIV, p);
1511 * When things go south during signal handling, we
1512 * will force a SIGSEGV. And if the signal that caused
1513 * the problem was already a SIGSEGV, we'll want to
1514 * make sure we don't even try to deliver the signal..
1517 force_sigsegv(int sig, struct task_struct *p)
1519 if (sig == SIGSEGV) {
1520 unsigned long flags;
1521 spin_lock_irqsave(&p->sighand->siglock, flags);
1522 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1523 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1525 force_sig(SIGSEGV, p);
1526 return 0;
1529 int kill_pgrp(struct pid *pid, int sig, int priv)
1531 int ret;
1533 read_lock(&tasklist_lock);
1534 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1535 read_unlock(&tasklist_lock);
1537 return ret;
1539 EXPORT_SYMBOL(kill_pgrp);
1541 int kill_pid(struct pid *pid, int sig, int priv)
1543 return kill_pid_info(sig, __si_special(priv), pid);
1545 EXPORT_SYMBOL(kill_pid);
1548 * These functions support sending signals using preallocated sigqueue
1549 * structures. This is needed "because realtime applications cannot
1550 * afford to lose notifications of asynchronous events, like timer
1551 * expirations or I/O completions". In the case of POSIX Timers
1552 * we allocate the sigqueue structure from the timer_create. If this
1553 * allocation fails we are able to report the failure to the application
1554 * with an EAGAIN error.
1556 struct sigqueue *sigqueue_alloc(void)
1558 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1560 if (q)
1561 q->flags |= SIGQUEUE_PREALLOC;
1563 return q;
1566 void sigqueue_free(struct sigqueue *q)
1568 unsigned long flags;
1569 spinlock_t *lock = &current->sighand->siglock;
1571 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1573 * We must hold ->siglock while testing q->list
1574 * to serialize with collect_signal() or with
1575 * __exit_signal()->flush_sigqueue().
1577 spin_lock_irqsave(lock, flags);
1578 q->flags &= ~SIGQUEUE_PREALLOC;
1580 * If it is queued it will be freed when dequeued,
1581 * like the "regular" sigqueue.
1583 if (!list_empty(&q->list))
1584 q = NULL;
1585 spin_unlock_irqrestore(lock, flags);
1587 if (q)
1588 __sigqueue_free(q);
1591 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1593 int sig = q->info.si_signo;
1594 struct sigpending *pending;
1595 unsigned long flags;
1596 int ret, result;
1598 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1600 ret = -1;
1601 if (!likely(lock_task_sighand(t, &flags)))
1602 goto ret;
1604 ret = 1; /* the signal is ignored */
1605 result = TRACE_SIGNAL_IGNORED;
1606 if (!prepare_signal(sig, t, false))
1607 goto out;
1609 ret = 0;
1610 if (unlikely(!list_empty(&q->list))) {
1612 * If an SI_TIMER entry is already queue just increment
1613 * the overrun count.
1615 BUG_ON(q->info.si_code != SI_TIMER);
1616 q->info.si_overrun++;
1617 result = TRACE_SIGNAL_ALREADY_PENDING;
1618 goto out;
1620 q->info.si_overrun = 0;
1622 signalfd_notify(t, sig);
1623 pending = group ? &t->signal->shared_pending : &t->pending;
1624 list_add_tail(&q->list, &pending->list);
1625 sigaddset(&pending->signal, sig);
1626 complete_signal(sig, t, group);
1627 result = TRACE_SIGNAL_DELIVERED;
1628 out:
1629 trace_signal_generate(sig, &q->info, t, group, result);
1630 unlock_task_sighand(t, &flags);
1631 ret:
1632 return ret;
1636 * Let a parent know about the death of a child.
1637 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1639 * Returns true if our parent ignored us and so we've switched to
1640 * self-reaping.
1642 bool do_notify_parent(struct task_struct *tsk, int sig)
1644 struct siginfo info;
1645 unsigned long flags;
1646 struct sighand_struct *psig;
1647 bool autoreap = false;
1648 cputime_t utime, stime;
1650 BUG_ON(sig == -1);
1652 /* do_notify_parent_cldstop should have been called instead. */
1653 BUG_ON(task_is_stopped_or_traced(tsk));
1655 BUG_ON(!tsk->ptrace &&
1656 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1658 if (sig != SIGCHLD) {
1660 * This is only possible if parent == real_parent.
1661 * Check if it has changed security domain.
1663 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1664 sig = SIGCHLD;
1667 info.si_signo = sig;
1668 info.si_errno = 0;
1670 * We are under tasklist_lock here so our parent is tied to
1671 * us and cannot change.
1673 * task_active_pid_ns will always return the same pid namespace
1674 * until a task passes through release_task.
1676 * write_lock() currently calls preempt_disable() which is the
1677 * same as rcu_read_lock(), but according to Oleg, this is not
1678 * correct to rely on this
1680 rcu_read_lock();
1681 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1682 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1683 task_uid(tsk));
1684 rcu_read_unlock();
1686 task_cputime(tsk, &utime, &stime);
1687 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1688 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1690 info.si_status = tsk->exit_code & 0x7f;
1691 if (tsk->exit_code & 0x80)
1692 info.si_code = CLD_DUMPED;
1693 else if (tsk->exit_code & 0x7f)
1694 info.si_code = CLD_KILLED;
1695 else {
1696 info.si_code = CLD_EXITED;
1697 info.si_status = tsk->exit_code >> 8;
1700 psig = tsk->parent->sighand;
1701 spin_lock_irqsave(&psig->siglock, flags);
1702 if (!tsk->ptrace && sig == SIGCHLD &&
1703 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1704 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1706 * We are exiting and our parent doesn't care. POSIX.1
1707 * defines special semantics for setting SIGCHLD to SIG_IGN
1708 * or setting the SA_NOCLDWAIT flag: we should be reaped
1709 * automatically and not left for our parent's wait4 call.
1710 * Rather than having the parent do it as a magic kind of
1711 * signal handler, we just set this to tell do_exit that we
1712 * can be cleaned up without becoming a zombie. Note that
1713 * we still call __wake_up_parent in this case, because a
1714 * blocked sys_wait4 might now return -ECHILD.
1716 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1717 * is implementation-defined: we do (if you don't want
1718 * it, just use SIG_IGN instead).
1720 autoreap = true;
1721 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1722 sig = 0;
1724 if (valid_signal(sig) && sig)
1725 __group_send_sig_info(sig, &info, tsk->parent);
1726 __wake_up_parent(tsk, tsk->parent);
1727 spin_unlock_irqrestore(&psig->siglock, flags);
1729 return autoreap;
1733 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1734 * @tsk: task reporting the state change
1735 * @for_ptracer: the notification is for ptracer
1736 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1738 * Notify @tsk's parent that the stopped/continued state has changed. If
1739 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1740 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1742 * CONTEXT:
1743 * Must be called with tasklist_lock at least read locked.
1745 static void do_notify_parent_cldstop(struct task_struct *tsk,
1746 bool for_ptracer, int why)
1748 struct siginfo info;
1749 unsigned long flags;
1750 struct task_struct *parent;
1751 struct sighand_struct *sighand;
1752 cputime_t utime, stime;
1754 if (for_ptracer) {
1755 parent = tsk->parent;
1756 } else {
1757 tsk = tsk->group_leader;
1758 parent = tsk->real_parent;
1761 info.si_signo = SIGCHLD;
1762 info.si_errno = 0;
1764 * see comment in do_notify_parent() about the following 4 lines
1766 rcu_read_lock();
1767 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1768 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1769 rcu_read_unlock();
1771 task_cputime(tsk, &utime, &stime);
1772 info.si_utime = cputime_to_clock_t(utime);
1773 info.si_stime = cputime_to_clock_t(stime);
1775 info.si_code = why;
1776 switch (why) {
1777 case CLD_CONTINUED:
1778 info.si_status = SIGCONT;
1779 break;
1780 case CLD_STOPPED:
1781 info.si_status = tsk->signal->group_exit_code & 0x7f;
1782 break;
1783 case CLD_TRAPPED:
1784 info.si_status = tsk->exit_code & 0x7f;
1785 break;
1786 default:
1787 BUG();
1790 sighand = parent->sighand;
1791 spin_lock_irqsave(&sighand->siglock, flags);
1792 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1793 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1794 __group_send_sig_info(SIGCHLD, &info, parent);
1796 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1798 __wake_up_parent(tsk, parent);
1799 spin_unlock_irqrestore(&sighand->siglock, flags);
1802 static inline int may_ptrace_stop(void)
1804 if (!likely(current->ptrace))
1805 return 0;
1807 * Are we in the middle of do_coredump?
1808 * If so and our tracer is also part of the coredump stopping
1809 * is a deadlock situation, and pointless because our tracer
1810 * is dead so don't allow us to stop.
1811 * If SIGKILL was already sent before the caller unlocked
1812 * ->siglock we must see ->core_state != NULL. Otherwise it
1813 * is safe to enter schedule().
1815 * This is almost outdated, a task with the pending SIGKILL can't
1816 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1817 * after SIGKILL was already dequeued.
1819 if (unlikely(current->mm->core_state) &&
1820 unlikely(current->mm == current->parent->mm))
1821 return 0;
1823 return 1;
1827 * Return non-zero if there is a SIGKILL that should be waking us up.
1828 * Called with the siglock held.
1830 static int sigkill_pending(struct task_struct *tsk)
1832 return sigismember(&tsk->pending.signal, SIGKILL) ||
1833 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1837 * This must be called with current->sighand->siglock held.
1839 * This should be the path for all ptrace stops.
1840 * We always set current->last_siginfo while stopped here.
1841 * That makes it a way to test a stopped process for
1842 * being ptrace-stopped vs being job-control-stopped.
1844 * If we actually decide not to stop at all because the tracer
1845 * is gone, we keep current->exit_code unless clear_code.
1847 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1848 __releases(&current->sighand->siglock)
1849 __acquires(&current->sighand->siglock)
1851 bool gstop_done = false;
1853 if (arch_ptrace_stop_needed(exit_code, info)) {
1855 * The arch code has something special to do before a
1856 * ptrace stop. This is allowed to block, e.g. for faults
1857 * on user stack pages. We can't keep the siglock while
1858 * calling arch_ptrace_stop, so we must release it now.
1859 * To preserve proper semantics, we must do this before
1860 * any signal bookkeeping like checking group_stop_count.
1861 * Meanwhile, a SIGKILL could come in before we retake the
1862 * siglock. That must prevent us from sleeping in TASK_TRACED.
1863 * So after regaining the lock, we must check for SIGKILL.
1865 spin_unlock_irq(&current->sighand->siglock);
1866 arch_ptrace_stop(exit_code, info);
1867 spin_lock_irq(&current->sighand->siglock);
1868 if (sigkill_pending(current))
1869 return;
1873 * We're committing to trapping. TRACED should be visible before
1874 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1875 * Also, transition to TRACED and updates to ->jobctl should be
1876 * atomic with respect to siglock and should be done after the arch
1877 * hook as siglock is released and regrabbed across it.
1879 set_current_state(TASK_TRACED);
1881 current->last_siginfo = info;
1882 current->exit_code = exit_code;
1885 * If @why is CLD_STOPPED, we're trapping to participate in a group
1886 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1887 * across siglock relocks since INTERRUPT was scheduled, PENDING
1888 * could be clear now. We act as if SIGCONT is received after
1889 * TASK_TRACED is entered - ignore it.
1891 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1892 gstop_done = task_participate_group_stop(current);
1894 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1895 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1896 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1897 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1899 /* entering a trap, clear TRAPPING */
1900 task_clear_jobctl_trapping(current);
1902 spin_unlock_irq(&current->sighand->siglock);
1903 read_lock(&tasklist_lock);
1904 if (may_ptrace_stop()) {
1906 * Notify parents of the stop.
1908 * While ptraced, there are two parents - the ptracer and
1909 * the real_parent of the group_leader. The ptracer should
1910 * know about every stop while the real parent is only
1911 * interested in the completion of group stop. The states
1912 * for the two don't interact with each other. Notify
1913 * separately unless they're gonna be duplicates.
1915 do_notify_parent_cldstop(current, true, why);
1916 if (gstop_done && ptrace_reparented(current))
1917 do_notify_parent_cldstop(current, false, why);
1920 * Don't want to allow preemption here, because
1921 * sys_ptrace() needs this task to be inactive.
1923 * XXX: implement read_unlock_no_resched().
1925 preempt_disable();
1926 read_unlock(&tasklist_lock);
1927 preempt_enable_no_resched();
1928 freezable_schedule();
1929 } else {
1931 * By the time we got the lock, our tracer went away.
1932 * Don't drop the lock yet, another tracer may come.
1934 * If @gstop_done, the ptracer went away between group stop
1935 * completion and here. During detach, it would have set
1936 * JOBCTL_STOP_PENDING on us and we'll re-enter
1937 * TASK_STOPPED in do_signal_stop() on return, so notifying
1938 * the real parent of the group stop completion is enough.
1940 if (gstop_done)
1941 do_notify_parent_cldstop(current, false, why);
1943 /* tasklist protects us from ptrace_freeze_traced() */
1944 __set_current_state(TASK_RUNNING);
1945 if (clear_code)
1946 current->exit_code = 0;
1947 read_unlock(&tasklist_lock);
1951 * We are back. Now reacquire the siglock before touching
1952 * last_siginfo, so that we are sure to have synchronized with
1953 * any signal-sending on another CPU that wants to examine it.
1955 spin_lock_irq(&current->sighand->siglock);
1956 current->last_siginfo = NULL;
1958 /* LISTENING can be set only during STOP traps, clear it */
1959 current->jobctl &= ~JOBCTL_LISTENING;
1962 * Queued signals ignored us while we were stopped for tracing.
1963 * So check for any that we should take before resuming user mode.
1964 * This sets TIF_SIGPENDING, but never clears it.
1966 recalc_sigpending_tsk(current);
1969 static void ptrace_do_notify(int signr, int exit_code, int why)
1971 siginfo_t info;
1973 memset(&info, 0, sizeof info);
1974 info.si_signo = signr;
1975 info.si_code = exit_code;
1976 info.si_pid = task_pid_vnr(current);
1977 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1979 /* Let the debugger run. */
1980 ptrace_stop(exit_code, why, 1, &info);
1983 void ptrace_notify(int exit_code)
1985 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1986 if (unlikely(current->task_works))
1987 task_work_run();
1989 spin_lock_irq(&current->sighand->siglock);
1990 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1991 spin_unlock_irq(&current->sighand->siglock);
1995 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1996 * @signr: signr causing group stop if initiating
1998 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1999 * and participate in it. If already set, participate in the existing
2000 * group stop. If participated in a group stop (and thus slept), %true is
2001 * returned with siglock released.
2003 * If ptraced, this function doesn't handle stop itself. Instead,
2004 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2005 * untouched. The caller must ensure that INTERRUPT trap handling takes
2006 * places afterwards.
2008 * CONTEXT:
2009 * Must be called with @current->sighand->siglock held, which is released
2010 * on %true return.
2012 * RETURNS:
2013 * %false if group stop is already cancelled or ptrace trap is scheduled.
2014 * %true if participated in group stop.
2016 static bool do_signal_stop(int signr)
2017 __releases(&current->sighand->siglock)
2019 struct signal_struct *sig = current->signal;
2021 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2022 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2023 struct task_struct *t;
2025 /* signr will be recorded in task->jobctl for retries */
2026 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2028 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2029 unlikely(signal_group_exit(sig)))
2030 return false;
2032 * There is no group stop already in progress. We must
2033 * initiate one now.
2035 * While ptraced, a task may be resumed while group stop is
2036 * still in effect and then receive a stop signal and
2037 * initiate another group stop. This deviates from the
2038 * usual behavior as two consecutive stop signals can't
2039 * cause two group stops when !ptraced. That is why we
2040 * also check !task_is_stopped(t) below.
2042 * The condition can be distinguished by testing whether
2043 * SIGNAL_STOP_STOPPED is already set. Don't generate
2044 * group_exit_code in such case.
2046 * This is not necessary for SIGNAL_STOP_CONTINUED because
2047 * an intervening stop signal is required to cause two
2048 * continued events regardless of ptrace.
2050 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2051 sig->group_exit_code = signr;
2053 sig->group_stop_count = 0;
2055 if (task_set_jobctl_pending(current, signr | gstop))
2056 sig->group_stop_count++;
2058 t = current;
2059 while_each_thread(current, t) {
2061 * Setting state to TASK_STOPPED for a group
2062 * stop is always done with the siglock held,
2063 * so this check has no races.
2065 if (!task_is_stopped(t) &&
2066 task_set_jobctl_pending(t, signr | gstop)) {
2067 sig->group_stop_count++;
2068 if (likely(!(t->ptrace & PT_SEIZED)))
2069 signal_wake_up(t, 0);
2070 else
2071 ptrace_trap_notify(t);
2076 if (likely(!current->ptrace)) {
2077 int notify = 0;
2080 * If there are no other threads in the group, or if there
2081 * is a group stop in progress and we are the last to stop,
2082 * report to the parent.
2084 if (task_participate_group_stop(current))
2085 notify = CLD_STOPPED;
2087 __set_current_state(TASK_STOPPED);
2088 spin_unlock_irq(&current->sighand->siglock);
2091 * Notify the parent of the group stop completion. Because
2092 * we're not holding either the siglock or tasklist_lock
2093 * here, ptracer may attach inbetween; however, this is for
2094 * group stop and should always be delivered to the real
2095 * parent of the group leader. The new ptracer will get
2096 * its notification when this task transitions into
2097 * TASK_TRACED.
2099 if (notify) {
2100 read_lock(&tasklist_lock);
2101 do_notify_parent_cldstop(current, false, notify);
2102 read_unlock(&tasklist_lock);
2105 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2106 freezable_schedule();
2107 return true;
2108 } else {
2110 * While ptraced, group stop is handled by STOP trap.
2111 * Schedule it and let the caller deal with it.
2113 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2114 return false;
2119 * do_jobctl_trap - take care of ptrace jobctl traps
2121 * When PT_SEIZED, it's used for both group stop and explicit
2122 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2123 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2124 * the stop signal; otherwise, %SIGTRAP.
2126 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2127 * number as exit_code and no siginfo.
2129 * CONTEXT:
2130 * Must be called with @current->sighand->siglock held, which may be
2131 * released and re-acquired before returning with intervening sleep.
2133 static void do_jobctl_trap(void)
2135 struct signal_struct *signal = current->signal;
2136 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2138 if (current->ptrace & PT_SEIZED) {
2139 if (!signal->group_stop_count &&
2140 !(signal->flags & SIGNAL_STOP_STOPPED))
2141 signr = SIGTRAP;
2142 WARN_ON_ONCE(!signr);
2143 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2144 CLD_STOPPED);
2145 } else {
2146 WARN_ON_ONCE(!signr);
2147 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2148 current->exit_code = 0;
2152 static int ptrace_signal(int signr, siginfo_t *info)
2154 ptrace_signal_deliver();
2156 * We do not check sig_kernel_stop(signr) but set this marker
2157 * unconditionally because we do not know whether debugger will
2158 * change signr. This flag has no meaning unless we are going
2159 * to stop after return from ptrace_stop(). In this case it will
2160 * be checked in do_signal_stop(), we should only stop if it was
2161 * not cleared by SIGCONT while we were sleeping. See also the
2162 * comment in dequeue_signal().
2164 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2165 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2167 /* We're back. Did the debugger cancel the sig? */
2168 signr = current->exit_code;
2169 if (signr == 0)
2170 return signr;
2172 current->exit_code = 0;
2175 * Update the siginfo structure if the signal has
2176 * changed. If the debugger wanted something
2177 * specific in the siginfo structure then it should
2178 * have updated *info via PTRACE_SETSIGINFO.
2180 if (signr != info->si_signo) {
2181 info->si_signo = signr;
2182 info->si_errno = 0;
2183 info->si_code = SI_USER;
2184 rcu_read_lock();
2185 info->si_pid = task_pid_vnr(current->parent);
2186 info->si_uid = from_kuid_munged(current_user_ns(),
2187 task_uid(current->parent));
2188 rcu_read_unlock();
2191 /* If the (new) signal is now blocked, requeue it. */
2192 if (sigismember(&current->blocked, signr)) {
2193 specific_send_sig_info(signr, info, current);
2194 signr = 0;
2197 return signr;
2200 int get_signal(struct ksignal *ksig)
2202 struct sighand_struct *sighand = current->sighand;
2203 struct signal_struct *signal = current->signal;
2204 int signr;
2206 if (unlikely(current->task_works))
2207 task_work_run();
2209 if (unlikely(uprobe_deny_signal()))
2210 return 0;
2213 * Do this once, we can't return to user-mode if freezing() == T.
2214 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2215 * thus do not need another check after return.
2217 try_to_freeze();
2219 relock:
2220 spin_lock_irq(&sighand->siglock);
2222 * Every stopped thread goes here after wakeup. Check to see if
2223 * we should notify the parent, prepare_signal(SIGCONT) encodes
2224 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2226 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2227 int why;
2229 if (signal->flags & SIGNAL_CLD_CONTINUED)
2230 why = CLD_CONTINUED;
2231 else
2232 why = CLD_STOPPED;
2234 signal->flags &= ~SIGNAL_CLD_MASK;
2236 spin_unlock_irq(&sighand->siglock);
2239 * Notify the parent that we're continuing. This event is
2240 * always per-process and doesn't make whole lot of sense
2241 * for ptracers, who shouldn't consume the state via
2242 * wait(2) either, but, for backward compatibility, notify
2243 * the ptracer of the group leader too unless it's gonna be
2244 * a duplicate.
2246 read_lock(&tasklist_lock);
2247 do_notify_parent_cldstop(current, false, why);
2249 if (ptrace_reparented(current->group_leader))
2250 do_notify_parent_cldstop(current->group_leader,
2251 true, why);
2252 read_unlock(&tasklist_lock);
2254 goto relock;
2257 /* Has this task already been marked for death? */
2258 if (signal_group_exit(signal)) {
2259 ksig->info.si_signo = signr = SIGKILL;
2260 sigdelset(&current->pending.signal, SIGKILL);
2261 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2262 &sighand->action[SIGKILL - 1]);
2263 recalc_sigpending();
2264 goto fatal;
2267 for (;;) {
2268 struct k_sigaction *ka;
2270 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2271 do_signal_stop(0))
2272 goto relock;
2274 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2275 do_jobctl_trap();
2276 spin_unlock_irq(&sighand->siglock);
2277 goto relock;
2281 * Signals generated by the execution of an instruction
2282 * need to be delivered before any other pending signals
2283 * so that the instruction pointer in the signal stack
2284 * frame points to the faulting instruction.
2286 signr = dequeue_synchronous_signal(&ksig->info);
2287 if (!signr)
2288 signr = dequeue_signal(current, &current->blocked, &ksig->info);
2290 if (!signr)
2291 break; /* will return 0 */
2293 if (unlikely(current->ptrace) && signr != SIGKILL) {
2294 signr = ptrace_signal(signr, &ksig->info);
2295 if (!signr)
2296 continue;
2299 ka = &sighand->action[signr-1];
2301 /* Trace actually delivered signals. */
2302 trace_signal_deliver(signr, &ksig->info, ka);
2304 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2305 continue;
2306 if (ka->sa.sa_handler != SIG_DFL) {
2307 /* Run the handler. */
2308 ksig->ka = *ka;
2310 if (ka->sa.sa_flags & SA_ONESHOT)
2311 ka->sa.sa_handler = SIG_DFL;
2313 break; /* will return non-zero "signr" value */
2317 * Now we are doing the default action for this signal.
2319 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2320 continue;
2323 * Global init gets no signals it doesn't want.
2324 * Container-init gets no signals it doesn't want from same
2325 * container.
2327 * Note that if global/container-init sees a sig_kernel_only()
2328 * signal here, the signal must have been generated internally
2329 * or must have come from an ancestor namespace. In either
2330 * case, the signal cannot be dropped.
2332 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2333 !sig_kernel_only(signr))
2334 continue;
2336 if (sig_kernel_stop(signr)) {
2338 * The default action is to stop all threads in
2339 * the thread group. The job control signals
2340 * do nothing in an orphaned pgrp, but SIGSTOP
2341 * always works. Note that siglock needs to be
2342 * dropped during the call to is_orphaned_pgrp()
2343 * because of lock ordering with tasklist_lock.
2344 * This allows an intervening SIGCONT to be posted.
2345 * We need to check for that and bail out if necessary.
2347 if (signr != SIGSTOP) {
2348 spin_unlock_irq(&sighand->siglock);
2350 /* signals can be posted during this window */
2352 if (is_current_pgrp_orphaned())
2353 goto relock;
2355 spin_lock_irq(&sighand->siglock);
2358 if (likely(do_signal_stop(ksig->info.si_signo))) {
2359 /* It released the siglock. */
2360 goto relock;
2364 * We didn't actually stop, due to a race
2365 * with SIGCONT or something like that.
2367 continue;
2370 fatal:
2371 spin_unlock_irq(&sighand->siglock);
2374 * Anything else is fatal, maybe with a core dump.
2376 current->flags |= PF_SIGNALED;
2378 if (sig_kernel_coredump(signr)) {
2379 if (print_fatal_signals)
2380 print_fatal_signal(ksig->info.si_signo);
2381 proc_coredump_connector(current);
2383 * If it was able to dump core, this kills all
2384 * other threads in the group and synchronizes with
2385 * their demise. If we lost the race with another
2386 * thread getting here, it set group_exit_code
2387 * first and our do_group_exit call below will use
2388 * that value and ignore the one we pass it.
2390 do_coredump(&ksig->info);
2394 * Death signals, no core dump.
2396 do_group_exit(ksig->info.si_signo);
2397 /* NOTREACHED */
2399 spin_unlock_irq(&sighand->siglock);
2401 ksig->sig = signr;
2402 return ksig->sig > 0;
2406 * signal_delivered -
2407 * @ksig: kernel signal struct
2408 * @stepping: nonzero if debugger single-step or block-step in use
2410 * This function should be called when a signal has successfully been
2411 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2412 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2413 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2415 static void signal_delivered(struct ksignal *ksig, int stepping)
2417 sigset_t blocked;
2419 /* A signal was successfully delivered, and the
2420 saved sigmask was stored on the signal frame,
2421 and will be restored by sigreturn. So we can
2422 simply clear the restore sigmask flag. */
2423 clear_restore_sigmask();
2425 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2426 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2427 sigaddset(&blocked, ksig->sig);
2428 set_current_blocked(&blocked);
2429 tracehook_signal_handler(stepping);
2432 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2434 if (failed)
2435 force_sigsegv(ksig->sig, current);
2436 else
2437 signal_delivered(ksig, stepping);
2441 * It could be that complete_signal() picked us to notify about the
2442 * group-wide signal. Other threads should be notified now to take
2443 * the shared signals in @which since we will not.
2445 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2447 sigset_t retarget;
2448 struct task_struct *t;
2450 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2451 if (sigisemptyset(&retarget))
2452 return;
2454 t = tsk;
2455 while_each_thread(tsk, t) {
2456 if (t->flags & PF_EXITING)
2457 continue;
2459 if (!has_pending_signals(&retarget, &t->blocked))
2460 continue;
2461 /* Remove the signals this thread can handle. */
2462 sigandsets(&retarget, &retarget, &t->blocked);
2464 if (!signal_pending(t))
2465 signal_wake_up(t, 0);
2467 if (sigisemptyset(&retarget))
2468 break;
2472 void exit_signals(struct task_struct *tsk)
2474 int group_stop = 0;
2475 sigset_t unblocked;
2478 * @tsk is about to have PF_EXITING set - lock out users which
2479 * expect stable threadgroup.
2481 threadgroup_change_begin(tsk);
2483 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2484 tsk->flags |= PF_EXITING;
2485 threadgroup_change_end(tsk);
2486 return;
2489 spin_lock_irq(&tsk->sighand->siglock);
2491 * From now this task is not visible for group-wide signals,
2492 * see wants_signal(), do_signal_stop().
2494 tsk->flags |= PF_EXITING;
2496 threadgroup_change_end(tsk);
2498 if (!signal_pending(tsk))
2499 goto out;
2501 unblocked = tsk->blocked;
2502 signotset(&unblocked);
2503 retarget_shared_pending(tsk, &unblocked);
2505 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2506 task_participate_group_stop(tsk))
2507 group_stop = CLD_STOPPED;
2508 out:
2509 spin_unlock_irq(&tsk->sighand->siglock);
2512 * If group stop has completed, deliver the notification. This
2513 * should always go to the real parent of the group leader.
2515 if (unlikely(group_stop)) {
2516 read_lock(&tasklist_lock);
2517 do_notify_parent_cldstop(tsk, false, group_stop);
2518 read_unlock(&tasklist_lock);
2522 EXPORT_SYMBOL(recalc_sigpending);
2523 EXPORT_SYMBOL_GPL(dequeue_signal);
2524 EXPORT_SYMBOL(flush_signals);
2525 EXPORT_SYMBOL(force_sig);
2526 EXPORT_SYMBOL(send_sig);
2527 EXPORT_SYMBOL(send_sig_info);
2528 EXPORT_SYMBOL(sigprocmask);
2531 * System call entry points.
2535 * sys_restart_syscall - restart a system call
2537 SYSCALL_DEFINE0(restart_syscall)
2539 struct restart_block *restart = &current->restart_block;
2540 return restart->fn(restart);
2543 long do_no_restart_syscall(struct restart_block *param)
2545 return -EINTR;
2548 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2550 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2551 sigset_t newblocked;
2552 /* A set of now blocked but previously unblocked signals. */
2553 sigandnsets(&newblocked, newset, &current->blocked);
2554 retarget_shared_pending(tsk, &newblocked);
2556 tsk->blocked = *newset;
2557 recalc_sigpending();
2561 * set_current_blocked - change current->blocked mask
2562 * @newset: new mask
2564 * It is wrong to change ->blocked directly, this helper should be used
2565 * to ensure the process can't miss a shared signal we are going to block.
2567 void set_current_blocked(sigset_t *newset)
2569 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2570 __set_current_blocked(newset);
2573 void __set_current_blocked(const sigset_t *newset)
2575 struct task_struct *tsk = current;
2578 * In case the signal mask hasn't changed, there is nothing we need
2579 * to do. The current->blocked shouldn't be modified by other task.
2581 if (sigequalsets(&tsk->blocked, newset))
2582 return;
2584 spin_lock_irq(&tsk->sighand->siglock);
2585 __set_task_blocked(tsk, newset);
2586 spin_unlock_irq(&tsk->sighand->siglock);
2590 * This is also useful for kernel threads that want to temporarily
2591 * (or permanently) block certain signals.
2593 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2594 * interface happily blocks "unblockable" signals like SIGKILL
2595 * and friends.
2597 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2599 struct task_struct *tsk = current;
2600 sigset_t newset;
2602 /* Lockless, only current can change ->blocked, never from irq */
2603 if (oldset)
2604 *oldset = tsk->blocked;
2606 switch (how) {
2607 case SIG_BLOCK:
2608 sigorsets(&newset, &tsk->blocked, set);
2609 break;
2610 case SIG_UNBLOCK:
2611 sigandnsets(&newset, &tsk->blocked, set);
2612 break;
2613 case SIG_SETMASK:
2614 newset = *set;
2615 break;
2616 default:
2617 return -EINVAL;
2620 __set_current_blocked(&newset);
2621 return 0;
2625 * sys_rt_sigprocmask - change the list of currently blocked signals
2626 * @how: whether to add, remove, or set signals
2627 * @nset: stores pending signals
2628 * @oset: previous value of signal mask if non-null
2629 * @sigsetsize: size of sigset_t type
2631 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2632 sigset_t __user *, oset, size_t, sigsetsize)
2634 sigset_t old_set, new_set;
2635 int error;
2637 /* XXX: Don't preclude handling different sized sigset_t's. */
2638 if (sigsetsize != sizeof(sigset_t))
2639 return -EINVAL;
2641 old_set = current->blocked;
2643 if (nset) {
2644 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2645 return -EFAULT;
2646 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2648 error = sigprocmask(how, &new_set, NULL);
2649 if (error)
2650 return error;
2653 if (oset) {
2654 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2655 return -EFAULT;
2658 return 0;
2661 #ifdef CONFIG_COMPAT
2662 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2663 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2665 #ifdef __BIG_ENDIAN
2666 sigset_t old_set = current->blocked;
2668 /* XXX: Don't preclude handling different sized sigset_t's. */
2669 if (sigsetsize != sizeof(sigset_t))
2670 return -EINVAL;
2672 if (nset) {
2673 compat_sigset_t new32;
2674 sigset_t new_set;
2675 int error;
2676 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2677 return -EFAULT;
2679 sigset_from_compat(&new_set, &new32);
2680 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2682 error = sigprocmask(how, &new_set, NULL);
2683 if (error)
2684 return error;
2686 if (oset) {
2687 compat_sigset_t old32;
2688 sigset_to_compat(&old32, &old_set);
2689 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2690 return -EFAULT;
2692 return 0;
2693 #else
2694 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2695 (sigset_t __user *)oset, sigsetsize);
2696 #endif
2698 #endif
2700 static int do_sigpending(void *set, unsigned long sigsetsize)
2702 if (sigsetsize > sizeof(sigset_t))
2703 return -EINVAL;
2705 spin_lock_irq(&current->sighand->siglock);
2706 sigorsets(set, &current->pending.signal,
2707 &current->signal->shared_pending.signal);
2708 spin_unlock_irq(&current->sighand->siglock);
2710 /* Outside the lock because only this thread touches it. */
2711 sigandsets(set, &current->blocked, set);
2712 return 0;
2716 * sys_rt_sigpending - examine a pending signal that has been raised
2717 * while blocked
2718 * @uset: stores pending signals
2719 * @sigsetsize: size of sigset_t type or larger
2721 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2723 sigset_t set;
2724 int err = do_sigpending(&set, sigsetsize);
2725 if (!err && copy_to_user(uset, &set, sigsetsize))
2726 err = -EFAULT;
2727 return err;
2730 #ifdef CONFIG_COMPAT
2731 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2732 compat_size_t, sigsetsize)
2734 #ifdef __BIG_ENDIAN
2735 sigset_t set;
2736 int err = do_sigpending(&set, sigsetsize);
2737 if (!err) {
2738 compat_sigset_t set32;
2739 sigset_to_compat(&set32, &set);
2740 /* we can get here only if sigsetsize <= sizeof(set) */
2741 if (copy_to_user(uset, &set32, sigsetsize))
2742 err = -EFAULT;
2744 return err;
2745 #else
2746 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2747 #endif
2749 #endif
2751 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2753 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2755 int err;
2757 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2758 return -EFAULT;
2759 if (from->si_code < 0)
2760 return __copy_to_user(to, from, sizeof(siginfo_t))
2761 ? -EFAULT : 0;
2763 * If you change siginfo_t structure, please be sure
2764 * this code is fixed accordingly.
2765 * Please remember to update the signalfd_copyinfo() function
2766 * inside fs/signalfd.c too, in case siginfo_t changes.
2767 * It should never copy any pad contained in the structure
2768 * to avoid security leaks, but must copy the generic
2769 * 3 ints plus the relevant union member.
2771 err = __put_user(from->si_signo, &to->si_signo);
2772 err |= __put_user(from->si_errno, &to->si_errno);
2773 err |= __put_user((short)from->si_code, &to->si_code);
2774 switch (from->si_code & __SI_MASK) {
2775 case __SI_KILL:
2776 err |= __put_user(from->si_pid, &to->si_pid);
2777 err |= __put_user(from->si_uid, &to->si_uid);
2778 break;
2779 case __SI_TIMER:
2780 err |= __put_user(from->si_tid, &to->si_tid);
2781 err |= __put_user(from->si_overrun, &to->si_overrun);
2782 err |= __put_user(from->si_ptr, &to->si_ptr);
2783 break;
2784 case __SI_POLL:
2785 err |= __put_user(from->si_band, &to->si_band);
2786 err |= __put_user(from->si_fd, &to->si_fd);
2787 break;
2788 case __SI_FAULT:
2789 err |= __put_user(from->si_addr, &to->si_addr);
2790 #ifdef __ARCH_SI_TRAPNO
2791 err |= __put_user(from->si_trapno, &to->si_trapno);
2792 #endif
2793 #ifdef BUS_MCEERR_AO
2795 * Other callers might not initialize the si_lsb field,
2796 * so check explicitly for the right codes here.
2798 if (from->si_signo == SIGBUS &&
2799 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2800 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2801 #endif
2802 #ifdef SEGV_BNDERR
2803 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2804 err |= __put_user(from->si_lower, &to->si_lower);
2805 err |= __put_user(from->si_upper, &to->si_upper);
2807 #endif
2808 #ifdef SEGV_PKUERR
2809 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2810 err |= __put_user(from->si_pkey, &to->si_pkey);
2811 #endif
2812 break;
2813 case __SI_CHLD:
2814 err |= __put_user(from->si_pid, &to->si_pid);
2815 err |= __put_user(from->si_uid, &to->si_uid);
2816 err |= __put_user(from->si_status, &to->si_status);
2817 err |= __put_user(from->si_utime, &to->si_utime);
2818 err |= __put_user(from->si_stime, &to->si_stime);
2819 break;
2820 case __SI_RT: /* This is not generated by the kernel as of now. */
2821 case __SI_MESGQ: /* But this is */
2822 err |= __put_user(from->si_pid, &to->si_pid);
2823 err |= __put_user(from->si_uid, &to->si_uid);
2824 err |= __put_user(from->si_ptr, &to->si_ptr);
2825 break;
2826 #ifdef __ARCH_SIGSYS
2827 case __SI_SYS:
2828 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2829 err |= __put_user(from->si_syscall, &to->si_syscall);
2830 err |= __put_user(from->si_arch, &to->si_arch);
2831 break;
2832 #endif
2833 default: /* this is just in case for now ... */
2834 err |= __put_user(from->si_pid, &to->si_pid);
2835 err |= __put_user(from->si_uid, &to->si_uid);
2836 break;
2838 return err;
2841 #endif
2844 * do_sigtimedwait - wait for queued signals specified in @which
2845 * @which: queued signals to wait for
2846 * @info: if non-null, the signal's siginfo is returned here
2847 * @ts: upper bound on process time suspension
2849 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2850 const struct timespec *ts)
2852 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
2853 struct task_struct *tsk = current;
2854 sigset_t mask = *which;
2855 int sig, ret = 0;
2857 if (ts) {
2858 if (!timespec_valid(ts))
2859 return -EINVAL;
2860 timeout = timespec_to_ktime(*ts);
2861 to = &timeout;
2865 * Invert the set of allowed signals to get those we want to block.
2867 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2868 signotset(&mask);
2870 spin_lock_irq(&tsk->sighand->siglock);
2871 sig = dequeue_signal(tsk, &mask, info);
2872 if (!sig && timeout.tv64) {
2874 * None ready, temporarily unblock those we're interested
2875 * while we are sleeping in so that we'll be awakened when
2876 * they arrive. Unblocking is always fine, we can avoid
2877 * set_current_blocked().
2879 tsk->real_blocked = tsk->blocked;
2880 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2881 recalc_sigpending();
2882 spin_unlock_irq(&tsk->sighand->siglock);
2884 __set_current_state(TASK_INTERRUPTIBLE);
2885 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2886 HRTIMER_MODE_REL);
2887 spin_lock_irq(&tsk->sighand->siglock);
2888 __set_task_blocked(tsk, &tsk->real_blocked);
2889 sigemptyset(&tsk->real_blocked);
2890 sig = dequeue_signal(tsk, &mask, info);
2892 spin_unlock_irq(&tsk->sighand->siglock);
2894 if (sig)
2895 return sig;
2896 return ret ? -EINTR : -EAGAIN;
2900 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2901 * in @uthese
2902 * @uthese: queued signals to wait for
2903 * @uinfo: if non-null, the signal's siginfo is returned here
2904 * @uts: upper bound on process time suspension
2905 * @sigsetsize: size of sigset_t type
2907 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2908 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2909 size_t, sigsetsize)
2911 sigset_t these;
2912 struct timespec ts;
2913 siginfo_t info;
2914 int ret;
2916 /* XXX: Don't preclude handling different sized sigset_t's. */
2917 if (sigsetsize != sizeof(sigset_t))
2918 return -EINVAL;
2920 if (copy_from_user(&these, uthese, sizeof(these)))
2921 return -EFAULT;
2923 if (uts) {
2924 if (copy_from_user(&ts, uts, sizeof(ts)))
2925 return -EFAULT;
2928 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2930 if (ret > 0 && uinfo) {
2931 if (copy_siginfo_to_user(uinfo, &info))
2932 ret = -EFAULT;
2935 return ret;
2939 * sys_kill - send a signal to a process
2940 * @pid: the PID of the process
2941 * @sig: signal to be sent
2943 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2945 struct siginfo info;
2947 info.si_signo = sig;
2948 info.si_errno = 0;
2949 info.si_code = SI_USER;
2950 info.si_pid = task_tgid_vnr(current);
2951 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2953 return kill_something_info(sig, &info, pid);
2956 static int
2957 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2959 struct task_struct *p;
2960 int error = -ESRCH;
2962 rcu_read_lock();
2963 p = find_task_by_vpid(pid);
2964 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2965 error = check_kill_permission(sig, info, p);
2967 * The null signal is a permissions and process existence
2968 * probe. No signal is actually delivered.
2970 if (!error && sig) {
2971 error = do_send_sig_info(sig, info, p, false);
2973 * If lock_task_sighand() failed we pretend the task
2974 * dies after receiving the signal. The window is tiny,
2975 * and the signal is private anyway.
2977 if (unlikely(error == -ESRCH))
2978 error = 0;
2981 rcu_read_unlock();
2983 return error;
2986 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2988 struct siginfo info = {};
2990 info.si_signo = sig;
2991 info.si_errno = 0;
2992 info.si_code = SI_TKILL;
2993 info.si_pid = task_tgid_vnr(current);
2994 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2996 return do_send_specific(tgid, pid, sig, &info);
3000 * sys_tgkill - send signal to one specific thread
3001 * @tgid: the thread group ID of the thread
3002 * @pid: the PID of the thread
3003 * @sig: signal to be sent
3005 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3006 * exists but it's not belonging to the target process anymore. This
3007 * method solves the problem of threads exiting and PIDs getting reused.
3009 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3011 /* This is only valid for single tasks */
3012 if (pid <= 0 || tgid <= 0)
3013 return -EINVAL;
3015 return do_tkill(tgid, pid, sig);
3019 * sys_tkill - send signal to one specific task
3020 * @pid: the PID of the task
3021 * @sig: signal to be sent
3023 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3025 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3027 /* This is only valid for single tasks */
3028 if (pid <= 0)
3029 return -EINVAL;
3031 return do_tkill(0, pid, sig);
3034 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3036 /* Not even root can pretend to send signals from the kernel.
3037 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3039 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3040 (task_pid_vnr(current) != pid))
3041 return -EPERM;
3043 info->si_signo = sig;
3045 /* POSIX.1b doesn't mention process groups. */
3046 return kill_proc_info(sig, info, pid);
3050 * sys_rt_sigqueueinfo - send signal information to a signal
3051 * @pid: the PID of the thread
3052 * @sig: signal to be sent
3053 * @uinfo: signal info to be sent
3055 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3056 siginfo_t __user *, uinfo)
3058 siginfo_t info;
3059 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3060 return -EFAULT;
3061 return do_rt_sigqueueinfo(pid, sig, &info);
3064 #ifdef CONFIG_COMPAT
3065 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3066 compat_pid_t, pid,
3067 int, sig,
3068 struct compat_siginfo __user *, uinfo)
3070 siginfo_t info = {};
3071 int ret = copy_siginfo_from_user32(&info, uinfo);
3072 if (unlikely(ret))
3073 return ret;
3074 return do_rt_sigqueueinfo(pid, sig, &info);
3076 #endif
3078 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3080 /* This is only valid for single tasks */
3081 if (pid <= 0 || tgid <= 0)
3082 return -EINVAL;
3084 /* Not even root can pretend to send signals from the kernel.
3085 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3087 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3088 (task_pid_vnr(current) != pid))
3089 return -EPERM;
3091 info->si_signo = sig;
3093 return do_send_specific(tgid, pid, sig, info);
3096 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3097 siginfo_t __user *, uinfo)
3099 siginfo_t info;
3101 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3102 return -EFAULT;
3104 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3107 #ifdef CONFIG_COMPAT
3108 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3109 compat_pid_t, tgid,
3110 compat_pid_t, pid,
3111 int, sig,
3112 struct compat_siginfo __user *, uinfo)
3114 siginfo_t info = {};
3116 if (copy_siginfo_from_user32(&info, uinfo))
3117 return -EFAULT;
3118 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3120 #endif
3123 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3125 void kernel_sigaction(int sig, __sighandler_t action)
3127 spin_lock_irq(&current->sighand->siglock);
3128 current->sighand->action[sig - 1].sa.sa_handler = action;
3129 if (action == SIG_IGN) {
3130 sigset_t mask;
3132 sigemptyset(&mask);
3133 sigaddset(&mask, sig);
3135 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3136 flush_sigqueue_mask(&mask, &current->pending);
3137 recalc_sigpending();
3139 spin_unlock_irq(&current->sighand->siglock);
3141 EXPORT_SYMBOL(kernel_sigaction);
3143 void __weak sigaction_compat_abi(struct k_sigaction *act,
3144 struct k_sigaction *oact)
3148 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3150 struct task_struct *p = current, *t;
3151 struct k_sigaction *k;
3152 sigset_t mask;
3154 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3155 return -EINVAL;
3157 k = &p->sighand->action[sig-1];
3159 spin_lock_irq(&p->sighand->siglock);
3160 if (oact)
3161 *oact = *k;
3163 sigaction_compat_abi(act, oact);
3165 if (act) {
3166 sigdelsetmask(&act->sa.sa_mask,
3167 sigmask(SIGKILL) | sigmask(SIGSTOP));
3168 *k = *act;
3170 * POSIX 3.3.1.3:
3171 * "Setting a signal action to SIG_IGN for a signal that is
3172 * pending shall cause the pending signal to be discarded,
3173 * whether or not it is blocked."
3175 * "Setting a signal action to SIG_DFL for a signal that is
3176 * pending and whose default action is to ignore the signal
3177 * (for example, SIGCHLD), shall cause the pending signal to
3178 * be discarded, whether or not it is blocked"
3180 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3181 sigemptyset(&mask);
3182 sigaddset(&mask, sig);
3183 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3184 for_each_thread(p, t)
3185 flush_sigqueue_mask(&mask, &t->pending);
3189 spin_unlock_irq(&p->sighand->siglock);
3190 return 0;
3193 static int
3194 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3195 size_t min_ss_size)
3197 stack_t oss;
3198 int error;
3200 oss.ss_sp = (void __user *) current->sas_ss_sp;
3201 oss.ss_size = current->sas_ss_size;
3202 oss.ss_flags = sas_ss_flags(sp) |
3203 (current->sas_ss_flags & SS_FLAG_BITS);
3205 if (uss) {
3206 void __user *ss_sp;
3207 size_t ss_size;
3208 unsigned ss_flags;
3209 int ss_mode;
3211 error = -EFAULT;
3212 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3213 goto out;
3214 error = __get_user(ss_sp, &uss->ss_sp) |
3215 __get_user(ss_flags, &uss->ss_flags) |
3216 __get_user(ss_size, &uss->ss_size);
3217 if (error)
3218 goto out;
3220 error = -EPERM;
3221 if (on_sig_stack(sp))
3222 goto out;
3224 ss_mode = ss_flags & ~SS_FLAG_BITS;
3225 error = -EINVAL;
3226 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3227 ss_mode != 0)
3228 goto out;
3230 if (ss_mode == SS_DISABLE) {
3231 ss_size = 0;
3232 ss_sp = NULL;
3233 } else {
3234 if (unlikely(ss_size < min_ss_size))
3235 return -ENOMEM;
3238 current->sas_ss_sp = (unsigned long) ss_sp;
3239 current->sas_ss_size = ss_size;
3240 current->sas_ss_flags = ss_flags;
3243 error = 0;
3244 if (uoss) {
3245 error = -EFAULT;
3246 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3247 goto out;
3248 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3249 __put_user(oss.ss_size, &uoss->ss_size) |
3250 __put_user(oss.ss_flags, &uoss->ss_flags);
3253 out:
3254 return error;
3256 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3258 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3259 MINSIGSTKSZ);
3262 int restore_altstack(const stack_t __user *uss)
3264 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3265 MINSIGSTKSZ);
3266 /* squash all but EFAULT for now */
3267 return err == -EFAULT ? err : 0;
3270 int __save_altstack(stack_t __user *uss, unsigned long sp)
3272 struct task_struct *t = current;
3273 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3274 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3275 __put_user(t->sas_ss_size, &uss->ss_size);
3276 if (err)
3277 return err;
3278 if (t->sas_ss_flags & SS_AUTODISARM)
3279 sas_ss_reset(t);
3280 return 0;
3283 #ifdef CONFIG_COMPAT
3284 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3285 const compat_stack_t __user *, uss_ptr,
3286 compat_stack_t __user *, uoss_ptr)
3288 stack_t uss, uoss;
3289 int ret;
3290 mm_segment_t seg;
3292 if (uss_ptr) {
3293 compat_stack_t uss32;
3295 memset(&uss, 0, sizeof(stack_t));
3296 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3297 return -EFAULT;
3298 uss.ss_sp = compat_ptr(uss32.ss_sp);
3299 uss.ss_flags = uss32.ss_flags;
3300 uss.ss_size = uss32.ss_size;
3302 seg = get_fs();
3303 set_fs(KERNEL_DS);
3304 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3305 (stack_t __force __user *) &uoss,
3306 compat_user_stack_pointer(),
3307 COMPAT_MINSIGSTKSZ);
3308 set_fs(seg);
3309 if (ret >= 0 && uoss_ptr) {
3310 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3311 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3312 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3313 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3314 ret = -EFAULT;
3316 return ret;
3319 int compat_restore_altstack(const compat_stack_t __user *uss)
3321 int err = compat_sys_sigaltstack(uss, NULL);
3322 /* squash all but -EFAULT for now */
3323 return err == -EFAULT ? err : 0;
3326 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3328 int err;
3329 struct task_struct *t = current;
3330 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3331 &uss->ss_sp) |
3332 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3333 __put_user(t->sas_ss_size, &uss->ss_size);
3334 if (err)
3335 return err;
3336 if (t->sas_ss_flags & SS_AUTODISARM)
3337 sas_ss_reset(t);
3338 return 0;
3340 #endif
3342 #ifdef __ARCH_WANT_SYS_SIGPENDING
3345 * sys_sigpending - examine pending signals
3346 * @set: where mask of pending signal is returned
3348 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3350 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3353 #endif
3355 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3357 * sys_sigprocmask - examine and change blocked signals
3358 * @how: whether to add, remove, or set signals
3359 * @nset: signals to add or remove (if non-null)
3360 * @oset: previous value of signal mask if non-null
3362 * Some platforms have their own version with special arguments;
3363 * others support only sys_rt_sigprocmask.
3366 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3367 old_sigset_t __user *, oset)
3369 old_sigset_t old_set, new_set;
3370 sigset_t new_blocked;
3372 old_set = current->blocked.sig[0];
3374 if (nset) {
3375 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3376 return -EFAULT;
3378 new_blocked = current->blocked;
3380 switch (how) {
3381 case SIG_BLOCK:
3382 sigaddsetmask(&new_blocked, new_set);
3383 break;
3384 case SIG_UNBLOCK:
3385 sigdelsetmask(&new_blocked, new_set);
3386 break;
3387 case SIG_SETMASK:
3388 new_blocked.sig[0] = new_set;
3389 break;
3390 default:
3391 return -EINVAL;
3394 set_current_blocked(&new_blocked);
3397 if (oset) {
3398 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3399 return -EFAULT;
3402 return 0;
3404 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3406 #ifndef CONFIG_ODD_RT_SIGACTION
3408 * sys_rt_sigaction - alter an action taken by a process
3409 * @sig: signal to be sent
3410 * @act: new sigaction
3411 * @oact: used to save the previous sigaction
3412 * @sigsetsize: size of sigset_t type
3414 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3415 const struct sigaction __user *, act,
3416 struct sigaction __user *, oact,
3417 size_t, sigsetsize)
3419 struct k_sigaction new_sa, old_sa;
3420 int ret = -EINVAL;
3422 /* XXX: Don't preclude handling different sized sigset_t's. */
3423 if (sigsetsize != sizeof(sigset_t))
3424 goto out;
3426 if (act) {
3427 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3428 return -EFAULT;
3431 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3433 if (!ret && oact) {
3434 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3435 return -EFAULT;
3437 out:
3438 return ret;
3440 #ifdef CONFIG_COMPAT
3441 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3442 const struct compat_sigaction __user *, act,
3443 struct compat_sigaction __user *, oact,
3444 compat_size_t, sigsetsize)
3446 struct k_sigaction new_ka, old_ka;
3447 compat_sigset_t mask;
3448 #ifdef __ARCH_HAS_SA_RESTORER
3449 compat_uptr_t restorer;
3450 #endif
3451 int ret;
3453 /* XXX: Don't preclude handling different sized sigset_t's. */
3454 if (sigsetsize != sizeof(compat_sigset_t))
3455 return -EINVAL;
3457 if (act) {
3458 compat_uptr_t handler;
3459 ret = get_user(handler, &act->sa_handler);
3460 new_ka.sa.sa_handler = compat_ptr(handler);
3461 #ifdef __ARCH_HAS_SA_RESTORER
3462 ret |= get_user(restorer, &act->sa_restorer);
3463 new_ka.sa.sa_restorer = compat_ptr(restorer);
3464 #endif
3465 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3466 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3467 if (ret)
3468 return -EFAULT;
3469 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3472 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3473 if (!ret && oact) {
3474 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3475 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3476 &oact->sa_handler);
3477 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3478 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3479 #ifdef __ARCH_HAS_SA_RESTORER
3480 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3481 &oact->sa_restorer);
3482 #endif
3484 return ret;
3486 #endif
3487 #endif /* !CONFIG_ODD_RT_SIGACTION */
3489 #ifdef CONFIG_OLD_SIGACTION
3490 SYSCALL_DEFINE3(sigaction, int, sig,
3491 const struct old_sigaction __user *, act,
3492 struct old_sigaction __user *, oact)
3494 struct k_sigaction new_ka, old_ka;
3495 int ret;
3497 if (act) {
3498 old_sigset_t mask;
3499 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3500 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3501 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3502 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3503 __get_user(mask, &act->sa_mask))
3504 return -EFAULT;
3505 #ifdef __ARCH_HAS_KA_RESTORER
3506 new_ka.ka_restorer = NULL;
3507 #endif
3508 siginitset(&new_ka.sa.sa_mask, mask);
3511 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3513 if (!ret && oact) {
3514 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3515 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3516 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3517 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3518 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3519 return -EFAULT;
3522 return ret;
3524 #endif
3525 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3526 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3527 const struct compat_old_sigaction __user *, act,
3528 struct compat_old_sigaction __user *, oact)
3530 struct k_sigaction new_ka, old_ka;
3531 int ret;
3532 compat_old_sigset_t mask;
3533 compat_uptr_t handler, restorer;
3535 if (act) {
3536 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3537 __get_user(handler, &act->sa_handler) ||
3538 __get_user(restorer, &act->sa_restorer) ||
3539 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3540 __get_user(mask, &act->sa_mask))
3541 return -EFAULT;
3543 #ifdef __ARCH_HAS_KA_RESTORER
3544 new_ka.ka_restorer = NULL;
3545 #endif
3546 new_ka.sa.sa_handler = compat_ptr(handler);
3547 new_ka.sa.sa_restorer = compat_ptr(restorer);
3548 siginitset(&new_ka.sa.sa_mask, mask);
3551 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3553 if (!ret && oact) {
3554 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3555 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3556 &oact->sa_handler) ||
3557 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3558 &oact->sa_restorer) ||
3559 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3560 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3561 return -EFAULT;
3563 return ret;
3565 #endif
3567 #ifdef CONFIG_SGETMASK_SYSCALL
3570 * For backwards compatibility. Functionality superseded by sigprocmask.
3572 SYSCALL_DEFINE0(sgetmask)
3574 /* SMP safe */
3575 return current->blocked.sig[0];
3578 SYSCALL_DEFINE1(ssetmask, int, newmask)
3580 int old = current->blocked.sig[0];
3581 sigset_t newset;
3583 siginitset(&newset, newmask);
3584 set_current_blocked(&newset);
3586 return old;
3588 #endif /* CONFIG_SGETMASK_SYSCALL */
3590 #ifdef __ARCH_WANT_SYS_SIGNAL
3592 * For backwards compatibility. Functionality superseded by sigaction.
3594 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3596 struct k_sigaction new_sa, old_sa;
3597 int ret;
3599 new_sa.sa.sa_handler = handler;
3600 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3601 sigemptyset(&new_sa.sa.sa_mask);
3603 ret = do_sigaction(sig, &new_sa, &old_sa);
3605 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3607 #endif /* __ARCH_WANT_SYS_SIGNAL */
3609 #ifdef __ARCH_WANT_SYS_PAUSE
3611 SYSCALL_DEFINE0(pause)
3613 while (!signal_pending(current)) {
3614 __set_current_state(TASK_INTERRUPTIBLE);
3615 schedule();
3617 return -ERESTARTNOHAND;
3620 #endif
3622 static int sigsuspend(sigset_t *set)
3624 current->saved_sigmask = current->blocked;
3625 set_current_blocked(set);
3627 while (!signal_pending(current)) {
3628 __set_current_state(TASK_INTERRUPTIBLE);
3629 schedule();
3631 set_restore_sigmask();
3632 return -ERESTARTNOHAND;
3636 * sys_rt_sigsuspend - replace the signal mask for a value with the
3637 * @unewset value until a signal is received
3638 * @unewset: new signal mask value
3639 * @sigsetsize: size of sigset_t type
3641 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3643 sigset_t newset;
3645 /* XXX: Don't preclude handling different sized sigset_t's. */
3646 if (sigsetsize != sizeof(sigset_t))
3647 return -EINVAL;
3649 if (copy_from_user(&newset, unewset, sizeof(newset)))
3650 return -EFAULT;
3651 return sigsuspend(&newset);
3654 #ifdef CONFIG_COMPAT
3655 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3657 #ifdef __BIG_ENDIAN
3658 sigset_t newset;
3659 compat_sigset_t newset32;
3661 /* XXX: Don't preclude handling different sized sigset_t's. */
3662 if (sigsetsize != sizeof(sigset_t))
3663 return -EINVAL;
3665 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3666 return -EFAULT;
3667 sigset_from_compat(&newset, &newset32);
3668 return sigsuspend(&newset);
3669 #else
3670 /* on little-endian bitmaps don't care about granularity */
3671 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3672 #endif
3674 #endif
3676 #ifdef CONFIG_OLD_SIGSUSPEND
3677 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3679 sigset_t blocked;
3680 siginitset(&blocked, mask);
3681 return sigsuspend(&blocked);
3683 #endif
3684 #ifdef CONFIG_OLD_SIGSUSPEND3
3685 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3687 sigset_t blocked;
3688 siginitset(&blocked, mask);
3689 return sigsuspend(&blocked);
3691 #endif
3693 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3695 return NULL;
3698 void __init signals_init(void)
3700 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3701 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3702 != offsetof(struct siginfo, _sifields._pad));
3704 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3707 #ifdef CONFIG_KGDB_KDB
3708 #include <linux/kdb.h>
3710 * kdb_send_sig_info - Allows kdb to send signals without exposing
3711 * signal internals. This function checks if the required locks are
3712 * available before calling the main signal code, to avoid kdb
3713 * deadlocks.
3715 void
3716 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3718 static struct task_struct *kdb_prev_t;
3719 int sig, new_t;
3720 if (!spin_trylock(&t->sighand->siglock)) {
3721 kdb_printf("Can't do kill command now.\n"
3722 "The sigmask lock is held somewhere else in "
3723 "kernel, try again later\n");
3724 return;
3726 spin_unlock(&t->sighand->siglock);
3727 new_t = kdb_prev_t != t;
3728 kdb_prev_t = t;
3729 if (t->state != TASK_RUNNING && new_t) {
3730 kdb_printf("Process is not RUNNING, sending a signal from "
3731 "kdb risks deadlock\n"
3732 "on the run queue locks. "
3733 "The signal has _not_ been sent.\n"
3734 "Reissue the kill command if you want to risk "
3735 "the deadlock.\n");
3736 return;
3738 sig = info->si_signo;
3739 if (send_sig_info(sig, info, t))
3740 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3741 sig, t->pid);
3742 else
3743 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3745 #endif /* CONFIG_KGDB_KDB */