2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache
*sigqueue_cachep
;
43 static int sig_ignored(struct task_struct
*t
, int sig
)
45 void __user
* handler
;
48 * Tracers always want to know about signals..
50 if (t
->ptrace
& PT_PTRACED
)
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
58 if (sigismember(&t
->blocked
, sig
))
61 /* Is it explicitly or implicitly ignored? */
62 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
63 return handler
== SIG_IGN
||
64 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
76 switch (_NSIG_WORDS
) {
78 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
79 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
82 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
83 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
84 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
85 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
88 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
89 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
92 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct
*t
)
101 if (t
->signal
->group_stop_count
> 0 ||
102 PENDING(&t
->pending
, &t
->blocked
) ||
103 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
104 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
108 * We must never clear the flag in another thread, or in current
109 * when it's possible the current syscall is returning -ERESTART*.
110 * So we don't clear it here, and only callers who know they should do.
116 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
117 * This is superfluous when called on current, the wakeup is a harmless no-op.
119 void recalc_sigpending_and_wake(struct task_struct
*t
)
121 if (recalc_sigpending_tsk(t
))
122 signal_wake_up(t
, 0);
125 void recalc_sigpending(void)
127 if (!recalc_sigpending_tsk(current
))
128 clear_thread_flag(TIF_SIGPENDING
);
132 /* Given the mask, find the first available signal that should be serviced. */
134 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
136 unsigned long i
, *s
, *m
, x
;
139 s
= pending
->signal
.sig
;
141 switch (_NSIG_WORDS
) {
143 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
144 if ((x
= *s
&~ *m
) != 0) {
145 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
150 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
152 else if ((x
= s
[1] &~ m
[1]) != 0)
159 case 1: if ((x
= *s
&~ *m
) != 0)
167 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
170 struct sigqueue
*q
= NULL
;
171 struct user_struct
*user
;
174 * In order to avoid problems with "switch_user()", we want to make
175 * sure that the compiler doesn't re-load "t->user"
179 atomic_inc(&user
->sigpending
);
180 if (override_rlimit
||
181 atomic_read(&user
->sigpending
) <=
182 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
183 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
184 if (unlikely(q
== NULL
)) {
185 atomic_dec(&user
->sigpending
);
187 INIT_LIST_HEAD(&q
->list
);
189 q
->user
= get_uid(user
);
194 static void __sigqueue_free(struct sigqueue
*q
)
196 if (q
->flags
& SIGQUEUE_PREALLOC
)
198 atomic_dec(&q
->user
->sigpending
);
200 kmem_cache_free(sigqueue_cachep
, q
);
203 void flush_sigqueue(struct sigpending
*queue
)
207 sigemptyset(&queue
->signal
);
208 while (!list_empty(&queue
->list
)) {
209 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
210 list_del_init(&q
->list
);
216 * Flush all pending signals for a task.
218 void flush_signals(struct task_struct
*t
)
222 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
223 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
224 flush_sigqueue(&t
->pending
);
225 flush_sigqueue(&t
->signal
->shared_pending
);
226 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
229 void ignore_signals(struct task_struct
*t
)
233 for (i
= 0; i
< _NSIG
; ++i
)
234 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
240 * Flush all handlers for a task.
244 flush_signal_handlers(struct task_struct
*t
, int force_default
)
247 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
248 for (i
= _NSIG
; i
!= 0 ; i
--) {
249 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
250 ka
->sa
.sa_handler
= SIG_DFL
;
252 sigemptyset(&ka
->sa
.sa_mask
);
257 int unhandled_signal(struct task_struct
*tsk
, int sig
)
261 if (tsk
->ptrace
& PT_PTRACED
)
263 return (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
) ||
264 (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_DFL
);
268 /* Notify the system that a driver wants to block all signals for this
269 * process, and wants to be notified if any signals at all were to be
270 * sent/acted upon. If the notifier routine returns non-zero, then the
271 * signal will be acted upon after all. If the notifier routine returns 0,
272 * then then signal will be blocked. Only one block per process is
273 * allowed. priv is a pointer to private data that the notifier routine
274 * can use to determine if the signal should be blocked or not. */
277 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
281 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
282 current
->notifier_mask
= mask
;
283 current
->notifier_data
= priv
;
284 current
->notifier
= notifier
;
285 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
288 /* Notify the system that blocking has ended. */
291 unblock_all_signals(void)
295 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
296 current
->notifier
= NULL
;
297 current
->notifier_data
= NULL
;
299 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
302 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
304 struct sigqueue
*q
, *first
= NULL
;
305 int still_pending
= 0;
307 if (unlikely(!sigismember(&list
->signal
, sig
)))
311 * Collect the siginfo appropriate to this signal. Check if
312 * there is another siginfo for the same signal.
314 list_for_each_entry(q
, &list
->list
, list
) {
315 if (q
->info
.si_signo
== sig
) {
324 list_del_init(&first
->list
);
325 copy_siginfo(info
, &first
->info
);
326 __sigqueue_free(first
);
328 sigdelset(&list
->signal
, sig
);
331 /* Ok, it wasn't in the queue. This must be
332 a fast-pathed signal or we must have been
333 out of queue space. So zero out the info.
335 sigdelset(&list
->signal
, sig
);
336 info
->si_signo
= sig
;
345 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
348 int sig
= next_signal(pending
, mask
);
351 if (current
->notifier
) {
352 if (sigismember(current
->notifier_mask
, sig
)) {
353 if (!(current
->notifier
)(current
->notifier_data
)) {
354 clear_thread_flag(TIF_SIGPENDING
);
360 if (!collect_signal(sig
, pending
, info
))
368 * Dequeue a signal and return the element to the caller, which is
369 * expected to free it.
371 * All callers have to hold the siglock.
373 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
377 /* We only dequeue private signals from ourselves, we don't let
378 * signalfd steal them
380 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
382 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
387 * itimers are process shared and we restart periodic
388 * itimers in the signal delivery path to prevent DoS
389 * attacks in the high resolution timer case. This is
390 * compliant with the old way of self restarting
391 * itimers, as the SIGALRM is a legacy signal and only
392 * queued once. Changing the restart behaviour to
393 * restart the timer in the signal dequeue path is
394 * reducing the timer noise on heavy loaded !highres
397 if (unlikely(signr
== SIGALRM
)) {
398 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
400 if (!hrtimer_is_queued(tmr
) &&
401 tsk
->signal
->it_real_incr
.tv64
!= 0) {
402 hrtimer_forward(tmr
, tmr
->base
->get_time(),
403 tsk
->signal
->it_real_incr
);
404 hrtimer_restart(tmr
);
409 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
411 * Set a marker that we have dequeued a stop signal. Our
412 * caller might release the siglock and then the pending
413 * stop signal it is about to process is no longer in the
414 * pending bitmasks, but must still be cleared by a SIGCONT
415 * (and overruled by a SIGKILL). So those cases clear this
416 * shared flag after we've set it. Note that this flag may
417 * remain set after the signal we return is ignored or
418 * handled. That doesn't matter because its only purpose
419 * is to alert stop-signal processing code when another
420 * processor has come along and cleared the flag.
422 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
))
423 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
426 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
427 info
->si_sys_private
){
429 * Release the siglock to ensure proper locking order
430 * of timer locks outside of siglocks. Note, we leave
431 * irqs disabled here, since the posix-timers code is
432 * about to disable them again anyway.
434 spin_unlock(&tsk
->sighand
->siglock
);
435 do_schedule_next_timer(info
);
436 spin_lock(&tsk
->sighand
->siglock
);
442 * Tell a process that it has a new active signal..
444 * NOTE! we rely on the previous spin_lock to
445 * lock interrupts for us! We can only be called with
446 * "siglock" held, and the local interrupt must
447 * have been disabled when that got acquired!
449 * No need to set need_resched since signal event passing
450 * goes through ->blocked
452 void signal_wake_up(struct task_struct
*t
, int resume
)
456 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
459 * For SIGKILL, we want to wake it up in the stopped/traced case.
460 * We don't check t->state here because there is a race with it
461 * executing another processor and just now entering stopped state.
462 * By using wake_up_state, we ensure the process will wake up and
463 * handle its death signal.
465 mask
= TASK_INTERRUPTIBLE
;
467 mask
|= TASK_STOPPED
| TASK_TRACED
;
468 if (!wake_up_state(t
, mask
))
473 * Remove signals in mask from the pending set and queue.
474 * Returns 1 if any signals were found.
476 * All callers must be holding the siglock.
478 * This version takes a sigset mask and looks at all signals,
479 * not just those in the first mask word.
481 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
483 struct sigqueue
*q
, *n
;
486 sigandsets(&m
, mask
, &s
->signal
);
487 if (sigisemptyset(&m
))
490 signandsets(&s
->signal
, &s
->signal
, mask
);
491 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
492 if (sigismember(mask
, q
->info
.si_signo
)) {
493 list_del_init(&q
->list
);
500 * Remove signals in mask from the pending set and queue.
501 * Returns 1 if any signals were found.
503 * All callers must be holding the siglock.
505 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
507 struct sigqueue
*q
, *n
;
509 if (!sigtestsetmask(&s
->signal
, mask
))
512 sigdelsetmask(&s
->signal
, mask
);
513 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
514 if (q
->info
.si_signo
< SIGRTMIN
&&
515 (mask
& sigmask(q
->info
.si_signo
))) {
516 list_del_init(&q
->list
);
524 * Bad permissions for sending the signal
526 static int check_kill_permission(int sig
, struct siginfo
*info
,
527 struct task_struct
*t
)
530 if (!valid_signal(sig
))
533 if (info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
))) {
534 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
538 if (((sig
!= SIGCONT
) ||
539 (process_session(current
) != process_session(t
)))
540 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
541 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
542 && !capable(CAP_KILL
))
546 return security_task_kill(t
, info
, sig
, 0);
550 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
);
553 * Handle magic process-wide effects of stop/continue signals.
554 * Unlike the signal actions, these happen immediately at signal-generation
555 * time regardless of blocking, ignoring, or handling. This does the
556 * actual continuing for SIGCONT, but not the actual stopping for stop
557 * signals. The process stop is done as a signal action for SIG_DFL.
559 static void handle_stop_signal(int sig
, struct task_struct
*p
)
561 struct task_struct
*t
;
563 if (p
->signal
->flags
& SIGNAL_GROUP_EXIT
)
565 * The process is in the middle of dying already.
569 if (sig_kernel_stop(sig
)) {
571 * This is a stop signal. Remove SIGCONT from all queues.
573 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
576 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
579 } else if (sig
== SIGCONT
) {
581 * Remove all stop signals from all queues,
582 * and wake all threads.
584 if (unlikely(p
->signal
->group_stop_count
> 0)) {
586 * There was a group stop in progress. We'll
587 * pretend it finished before we got here. We are
588 * obliged to report it to the parent: if the
589 * SIGSTOP happened "after" this SIGCONT, then it
590 * would have cleared this pending SIGCONT. If it
591 * happened "before" this SIGCONT, then the parent
592 * got the SIGCHLD about the stop finishing before
593 * the continue happened. We do the notification
594 * now, and it's as if the stop had finished and
595 * the SIGCHLD was pending on entry to this kill.
597 p
->signal
->group_stop_count
= 0;
598 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
599 spin_unlock(&p
->sighand
->siglock
);
600 do_notify_parent_cldstop(p
, CLD_STOPPED
);
601 spin_lock(&p
->sighand
->siglock
);
603 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
607 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
610 * If there is a handler for SIGCONT, we must make
611 * sure that no thread returns to user mode before
612 * we post the signal, in case it was the only
613 * thread eligible to run the signal handler--then
614 * it must not do anything between resuming and
615 * running the handler. With the TIF_SIGPENDING
616 * flag set, the thread will pause and acquire the
617 * siglock that we hold now and until we've queued
618 * the pending signal.
620 * Wake up the stopped thread _after_ setting
623 state
= TASK_STOPPED
;
624 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
625 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
626 state
|= TASK_INTERRUPTIBLE
;
628 wake_up_state(t
, state
);
633 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
635 * We were in fact stopped, and are now continued.
636 * Notify the parent with CLD_CONTINUED.
638 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
639 p
->signal
->group_exit_code
= 0;
640 spin_unlock(&p
->sighand
->siglock
);
641 do_notify_parent_cldstop(p
, CLD_CONTINUED
);
642 spin_lock(&p
->sighand
->siglock
);
645 * We are not stopped, but there could be a stop
646 * signal in the middle of being processed after
647 * being removed from the queue. Clear that too.
649 p
->signal
->flags
= 0;
651 } else if (sig
== SIGKILL
) {
653 * Make sure that any pending stop signal already dequeued
654 * is undone by the wakeup for SIGKILL.
656 p
->signal
->flags
= 0;
660 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
661 struct sigpending
*signals
)
663 struct sigqueue
* q
= NULL
;
667 * Deliver the signal to listening signalfds. This must be called
668 * with the sighand lock held.
670 signalfd_notify(t
, sig
);
673 * fast-pathed signals for kernel-internal things like SIGSTOP
676 if (info
== SEND_SIG_FORCED
)
679 /* Real-time signals must be queued if sent by sigqueue, or
680 some other real-time mechanism. It is implementation
681 defined whether kill() does so. We attempt to do so, on
682 the principle of least surprise, but since kill is not
683 allowed to fail with EAGAIN when low on memory we just
684 make sure at least one signal gets delivered and don't
685 pass on the info struct. */
687 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
688 (is_si_special(info
) ||
689 info
->si_code
>= 0)));
691 list_add_tail(&q
->list
, &signals
->list
);
692 switch ((unsigned long) info
) {
693 case (unsigned long) SEND_SIG_NOINFO
:
694 q
->info
.si_signo
= sig
;
695 q
->info
.si_errno
= 0;
696 q
->info
.si_code
= SI_USER
;
697 q
->info
.si_pid
= current
->pid
;
698 q
->info
.si_uid
= current
->uid
;
700 case (unsigned long) SEND_SIG_PRIV
:
701 q
->info
.si_signo
= sig
;
702 q
->info
.si_errno
= 0;
703 q
->info
.si_code
= SI_KERNEL
;
708 copy_siginfo(&q
->info
, info
);
711 } else if (!is_si_special(info
)) {
712 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
714 * Queue overflow, abort. We may abort if the signal was rt
715 * and sent by user using something other than kill().
721 sigaddset(&signals
->signal
, sig
);
725 #define LEGACY_QUEUE(sigptr, sig) \
726 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
728 int print_fatal_signals
;
730 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
732 printk("%s/%d: potentially unexpected fatal signal %d.\n",
733 current
->comm
, current
->pid
, signr
);
736 printk("code at %08lx: ", regs
->eip
);
739 for (i
= 0; i
< 16; i
++) {
742 __get_user(insn
, (unsigned char *)(regs
->eip
+ i
));
743 printk("%02x ", insn
);
751 static int __init
setup_print_fatal_signals(char *str
)
753 get_option (&str
, &print_fatal_signals
);
758 __setup("print-fatal-signals=", setup_print_fatal_signals
);
761 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
765 BUG_ON(!irqs_disabled());
766 assert_spin_locked(&t
->sighand
->siglock
);
768 /* Short-circuit ignored signals. */
769 if (sig_ignored(t
, sig
))
772 /* Support queueing exactly one non-rt signal, so that we
773 can get more detailed information about the cause of
775 if (LEGACY_QUEUE(&t
->pending
, sig
))
778 ret
= send_signal(sig
, info
, t
, &t
->pending
);
779 if (!ret
&& !sigismember(&t
->blocked
, sig
))
780 signal_wake_up(t
, sig
== SIGKILL
);
786 * Force a signal that the process can't ignore: if necessary
787 * we unblock the signal and change any SIG_IGN to SIG_DFL.
789 * Note: If we unblock the signal, we always reset it to SIG_DFL,
790 * since we do not want to have a signal handler that was blocked
791 * be invoked when user space had explicitly blocked it.
793 * We don't want to have recursive SIGSEGV's etc, for example.
796 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
798 unsigned long int flags
;
799 int ret
, blocked
, ignored
;
800 struct k_sigaction
*action
;
802 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
803 action
= &t
->sighand
->action
[sig
-1];
804 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
805 blocked
= sigismember(&t
->blocked
, sig
);
806 if (blocked
|| ignored
) {
807 action
->sa
.sa_handler
= SIG_DFL
;
809 sigdelset(&t
->blocked
, sig
);
810 recalc_sigpending_and_wake(t
);
813 ret
= specific_send_sig_info(sig
, info
, t
);
814 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
820 force_sig_specific(int sig
, struct task_struct
*t
)
822 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
826 * Test if P wants to take SIG. After we've checked all threads with this,
827 * it's equivalent to finding no threads not blocking SIG. Any threads not
828 * blocking SIG were ruled out because they are not running and already
829 * have pending signals. Such threads will dequeue from the shared queue
830 * as soon as they're available, so putting the signal on the shared queue
831 * will be equivalent to sending it to one such thread.
833 static inline int wants_signal(int sig
, struct task_struct
*p
)
835 if (sigismember(&p
->blocked
, sig
))
837 if (p
->flags
& PF_EXITING
)
841 if (p
->state
& (TASK_STOPPED
| TASK_TRACED
))
843 return task_curr(p
) || !signal_pending(p
);
847 __group_complete_signal(int sig
, struct task_struct
*p
)
849 struct task_struct
*t
;
852 * Now find a thread we can wake up to take the signal off the queue.
854 * If the main thread wants the signal, it gets first crack.
855 * Probably the least surprising to the average bear.
857 if (wants_signal(sig
, p
))
859 else if (thread_group_empty(p
))
861 * There is just one thread and it does not need to be woken.
862 * It will dequeue unblocked signals before it runs again.
867 * Otherwise try to find a suitable thread.
869 t
= p
->signal
->curr_target
;
871 /* restart balancing at this thread */
872 t
= p
->signal
->curr_target
= p
;
874 while (!wants_signal(sig
, t
)) {
876 if (t
== p
->signal
->curr_target
)
878 * No thread needs to be woken.
879 * Any eligible threads will see
880 * the signal in the queue soon.
884 p
->signal
->curr_target
= t
;
888 * Found a killable thread. If the signal will be fatal,
889 * then start taking the whole group down immediately.
891 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
892 !sigismember(&t
->real_blocked
, sig
) &&
893 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
895 * This signal will be fatal to the whole group.
897 if (!sig_kernel_coredump(sig
)) {
899 * Start a group exit and wake everybody up.
900 * This way we don't have other threads
901 * running and doing things after a slower
902 * thread has the fatal signal pending.
904 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
905 p
->signal
->group_exit_code
= sig
;
906 p
->signal
->group_stop_count
= 0;
909 sigaddset(&t
->pending
.signal
, SIGKILL
);
910 signal_wake_up(t
, 1);
911 } while_each_thread(p
, t
);
916 * There will be a core dump. We make all threads other
917 * than the chosen one go into a group stop so that nothing
918 * happens until it gets scheduled, takes the signal off
919 * the shared queue, and does the core dump. This is a
920 * little more complicated than strictly necessary, but it
921 * keeps the signal state that winds up in the core dump
922 * unchanged from the death state, e.g. which thread had
923 * the core-dump signal unblocked.
925 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
926 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
927 p
->signal
->group_stop_count
= 0;
928 p
->signal
->group_exit_task
= t
;
931 p
->signal
->group_stop_count
++;
932 signal_wake_up(t
, t
== p
);
933 } while_each_thread(p
, t
);
938 * The signal is already in the shared-pending queue.
939 * Tell the chosen thread to wake up and dequeue it.
941 signal_wake_up(t
, sig
== SIGKILL
);
946 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
950 assert_spin_locked(&p
->sighand
->siglock
);
951 handle_stop_signal(sig
, p
);
953 /* Short-circuit ignored signals. */
954 if (sig_ignored(p
, sig
))
957 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
958 /* This is a non-RT signal and we already have one queued. */
962 * Put this signal on the shared-pending queue, or fail with EAGAIN.
963 * We always use the shared queue for process-wide signals,
964 * to avoid several races.
966 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
970 __group_complete_signal(sig
, p
);
975 * Nuke all other threads in the group.
977 void zap_other_threads(struct task_struct
*p
)
979 struct task_struct
*t
;
981 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
982 p
->signal
->group_stop_count
= 0;
984 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
986 * Don't bother with already dead threads
991 /* SIGKILL will be handled before any pending SIGSTOP */
992 sigaddset(&t
->pending
.signal
, SIGKILL
);
993 signal_wake_up(t
, 1);
998 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1000 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
1002 struct sighand_struct
*sighand
;
1005 sighand
= rcu_dereference(tsk
->sighand
);
1006 if (unlikely(sighand
== NULL
))
1009 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1010 if (likely(sighand
== tsk
->sighand
))
1012 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1018 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1020 unsigned long flags
;
1023 ret
= check_kill_permission(sig
, info
, p
);
1027 if (lock_task_sighand(p
, &flags
)) {
1028 ret
= __group_send_sig_info(sig
, info
, p
);
1029 unlock_task_sighand(p
, &flags
);
1037 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1038 * control characters do (^C, ^Z etc)
1041 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1043 struct task_struct
*p
= NULL
;
1044 int retval
, success
;
1048 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1049 int err
= group_send_sig_info(sig
, info
, p
);
1052 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1053 return success
? 0 : retval
;
1056 int kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1060 read_lock(&tasklist_lock
);
1061 retval
= __kill_pgrp_info(sig
, info
, pgrp
);
1062 read_unlock(&tasklist_lock
);
1067 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1070 struct task_struct
*p
;
1073 if (unlikely(sig_needs_tasklist(sig
)))
1074 read_lock(&tasklist_lock
);
1076 p
= pid_task(pid
, PIDTYPE_PID
);
1079 error
= group_send_sig_info(sig
, info
, p
);
1081 if (unlikely(sig_needs_tasklist(sig
)))
1082 read_unlock(&tasklist_lock
);
1088 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1092 error
= kill_pid_info(sig
, info
, find_pid(pid
));
1097 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1098 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1099 uid_t uid
, uid_t euid
, u32 secid
)
1102 struct task_struct
*p
;
1104 if (!valid_signal(sig
))
1107 read_lock(&tasklist_lock
);
1108 p
= pid_task(pid
, PIDTYPE_PID
);
1113 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1114 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1115 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1119 ret
= security_task_kill(p
, info
, sig
, secid
);
1122 if (sig
&& p
->sighand
) {
1123 unsigned long flags
;
1124 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1125 ret
= __group_send_sig_info(sig
, info
, p
);
1126 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1129 read_unlock(&tasklist_lock
);
1132 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1135 * kill_something_info() interprets pid in interesting ways just like kill(2).
1137 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1138 * is probably wrong. Should make it like BSD or SYSV.
1141 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1146 ret
= kill_pgrp_info(sig
, info
, task_pgrp(current
));
1147 } else if (pid
== -1) {
1148 int retval
= 0, count
= 0;
1149 struct task_struct
* p
;
1151 read_lock(&tasklist_lock
);
1152 for_each_process(p
) {
1153 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1154 int err
= group_send_sig_info(sig
, info
, p
);
1160 read_unlock(&tasklist_lock
);
1161 ret
= count
? retval
: -ESRCH
;
1162 } else if (pid
< 0) {
1163 ret
= kill_pgrp_info(sig
, info
, find_pid(-pid
));
1165 ret
= kill_pid_info(sig
, info
, find_pid(pid
));
1172 * These are for backward compatibility with the rest of the kernel source.
1176 * These two are the most common entry points. They send a signal
1177 * just to the specific thread.
1180 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1183 unsigned long flags
;
1186 * Make sure legacy kernel users don't send in bad values
1187 * (normal paths check this in check_kill_permission).
1189 if (!valid_signal(sig
))
1193 * We need the tasklist lock even for the specific
1194 * thread case (when we don't need to follow the group
1195 * lists) in order to avoid races with "p->sighand"
1196 * going away or changing from under us.
1198 read_lock(&tasklist_lock
);
1199 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1200 ret
= specific_send_sig_info(sig
, info
, p
);
1201 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1202 read_unlock(&tasklist_lock
);
1206 #define __si_special(priv) \
1207 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1210 send_sig(int sig
, struct task_struct
*p
, int priv
)
1212 return send_sig_info(sig
, __si_special(priv
), p
);
1216 * This is the entry point for "process-wide" signals.
1217 * They will go to an appropriate thread in the thread group.
1220 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1223 read_lock(&tasklist_lock
);
1224 ret
= group_send_sig_info(sig
, info
, p
);
1225 read_unlock(&tasklist_lock
);
1230 force_sig(int sig
, struct task_struct
*p
)
1232 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1236 * When things go south during signal handling, we
1237 * will force a SIGSEGV. And if the signal that caused
1238 * the problem was already a SIGSEGV, we'll want to
1239 * make sure we don't even try to deliver the signal..
1242 force_sigsegv(int sig
, struct task_struct
*p
)
1244 if (sig
== SIGSEGV
) {
1245 unsigned long flags
;
1246 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1247 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1248 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1250 force_sig(SIGSEGV
, p
);
1254 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1256 return kill_pgrp_info(sig
, __si_special(priv
), pid
);
1258 EXPORT_SYMBOL(kill_pgrp
);
1260 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1262 return kill_pid_info(sig
, __si_special(priv
), pid
);
1264 EXPORT_SYMBOL(kill_pid
);
1267 kill_proc(pid_t pid
, int sig
, int priv
)
1269 return kill_proc_info(sig
, __si_special(priv
), pid
);
1273 * These functions support sending signals using preallocated sigqueue
1274 * structures. This is needed "because realtime applications cannot
1275 * afford to lose notifications of asynchronous events, like timer
1276 * expirations or I/O completions". In the case of Posix Timers
1277 * we allocate the sigqueue structure from the timer_create. If this
1278 * allocation fails we are able to report the failure to the application
1279 * with an EAGAIN error.
1282 struct sigqueue
*sigqueue_alloc(void)
1286 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1287 q
->flags
|= SIGQUEUE_PREALLOC
;
1291 void sigqueue_free(struct sigqueue
*q
)
1293 unsigned long flags
;
1294 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1296 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1298 * If the signal is still pending remove it from the
1299 * pending queue. We must hold ->siglock while testing
1300 * q->list to serialize with collect_signal().
1302 spin_lock_irqsave(lock
, flags
);
1303 if (!list_empty(&q
->list
))
1304 list_del_init(&q
->list
);
1305 spin_unlock_irqrestore(lock
, flags
);
1307 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1311 int send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1313 unsigned long flags
;
1316 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1319 * The rcu based delayed sighand destroy makes it possible to
1320 * run this without tasklist lock held. The task struct itself
1321 * cannot go away as create_timer did get_task_struct().
1323 * We return -1, when the task is marked exiting, so
1324 * posix_timer_event can redirect it to the group leader
1328 if (!likely(lock_task_sighand(p
, &flags
))) {
1333 if (unlikely(!list_empty(&q
->list
))) {
1335 * If an SI_TIMER entry is already queue just increment
1336 * the overrun count.
1338 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1339 q
->info
.si_overrun
++;
1342 /* Short-circuit ignored signals. */
1343 if (sig_ignored(p
, sig
)) {
1348 * Deliver the signal to listening signalfds. This must be called
1349 * with the sighand lock held.
1351 signalfd_notify(p
, sig
);
1353 list_add_tail(&q
->list
, &p
->pending
.list
);
1354 sigaddset(&p
->pending
.signal
, sig
);
1355 if (!sigismember(&p
->blocked
, sig
))
1356 signal_wake_up(p
, sig
== SIGKILL
);
1359 unlock_task_sighand(p
, &flags
);
1367 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1369 unsigned long flags
;
1372 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1374 read_lock(&tasklist_lock
);
1375 /* Since it_lock is held, p->sighand cannot be NULL. */
1376 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1377 handle_stop_signal(sig
, p
);
1379 /* Short-circuit ignored signals. */
1380 if (sig_ignored(p
, sig
)) {
1385 if (unlikely(!list_empty(&q
->list
))) {
1387 * If an SI_TIMER entry is already queue just increment
1388 * the overrun count. Other uses should not try to
1389 * send the signal multiple times.
1391 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1392 q
->info
.si_overrun
++;
1396 * Deliver the signal to listening signalfds. This must be called
1397 * with the sighand lock held.
1399 signalfd_notify(p
, sig
);
1402 * Put this signal on the shared-pending queue.
1403 * We always use the shared queue for process-wide signals,
1404 * to avoid several races.
1406 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1407 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1409 __group_complete_signal(sig
, p
);
1411 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1412 read_unlock(&tasklist_lock
);
1417 * Wake up any threads in the parent blocked in wait* syscalls.
1419 static inline void __wake_up_parent(struct task_struct
*p
,
1420 struct task_struct
*parent
)
1422 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1426 * Let a parent know about the death of a child.
1427 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1430 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1432 struct siginfo info
;
1433 unsigned long flags
;
1434 struct sighand_struct
*psig
;
1438 /* do_notify_parent_cldstop should have been called instead. */
1439 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1441 BUG_ON(!tsk
->ptrace
&&
1442 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1444 info
.si_signo
= sig
;
1446 info
.si_pid
= tsk
->pid
;
1447 info
.si_uid
= tsk
->uid
;
1449 /* FIXME: find out whether or not this is supposed to be c*time. */
1450 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1451 tsk
->signal
->utime
));
1452 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1453 tsk
->signal
->stime
));
1455 info
.si_status
= tsk
->exit_code
& 0x7f;
1456 if (tsk
->exit_code
& 0x80)
1457 info
.si_code
= CLD_DUMPED
;
1458 else if (tsk
->exit_code
& 0x7f)
1459 info
.si_code
= CLD_KILLED
;
1461 info
.si_code
= CLD_EXITED
;
1462 info
.si_status
= tsk
->exit_code
>> 8;
1465 psig
= tsk
->parent
->sighand
;
1466 spin_lock_irqsave(&psig
->siglock
, flags
);
1467 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1468 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1469 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1471 * We are exiting and our parent doesn't care. POSIX.1
1472 * defines special semantics for setting SIGCHLD to SIG_IGN
1473 * or setting the SA_NOCLDWAIT flag: we should be reaped
1474 * automatically and not left for our parent's wait4 call.
1475 * Rather than having the parent do it as a magic kind of
1476 * signal handler, we just set this to tell do_exit that we
1477 * can be cleaned up without becoming a zombie. Note that
1478 * we still call __wake_up_parent in this case, because a
1479 * blocked sys_wait4 might now return -ECHILD.
1481 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1482 * is implementation-defined: we do (if you don't want
1483 * it, just use SIG_IGN instead).
1485 tsk
->exit_signal
= -1;
1486 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1489 if (valid_signal(sig
) && sig
> 0)
1490 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1491 __wake_up_parent(tsk
, tsk
->parent
);
1492 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1495 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1497 struct siginfo info
;
1498 unsigned long flags
;
1499 struct task_struct
*parent
;
1500 struct sighand_struct
*sighand
;
1502 if (tsk
->ptrace
& PT_PTRACED
)
1503 parent
= tsk
->parent
;
1505 tsk
= tsk
->group_leader
;
1506 parent
= tsk
->real_parent
;
1509 info
.si_signo
= SIGCHLD
;
1511 info
.si_pid
= tsk
->pid
;
1512 info
.si_uid
= tsk
->uid
;
1514 /* FIXME: find out whether or not this is supposed to be c*time. */
1515 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1516 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1521 info
.si_status
= SIGCONT
;
1524 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1527 info
.si_status
= tsk
->exit_code
& 0x7f;
1533 sighand
= parent
->sighand
;
1534 spin_lock_irqsave(&sighand
->siglock
, flags
);
1535 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1536 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1537 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1539 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1541 __wake_up_parent(tsk
, parent
);
1542 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1545 static inline int may_ptrace_stop(void)
1547 if (!likely(current
->ptrace
& PT_PTRACED
))
1550 if (unlikely(current
->parent
== current
->real_parent
&&
1551 (current
->ptrace
& PT_ATTACHED
)))
1555 * Are we in the middle of do_coredump?
1556 * If so and our tracer is also part of the coredump stopping
1557 * is a deadlock situation, and pointless because our tracer
1558 * is dead so don't allow us to stop.
1559 * If SIGKILL was already sent before the caller unlocked
1560 * ->siglock we must see ->core_waiters != 0. Otherwise it
1561 * is safe to enter schedule().
1563 if (unlikely(current
->mm
->core_waiters
) &&
1564 unlikely(current
->mm
== current
->parent
->mm
))
1571 * This must be called with current->sighand->siglock held.
1573 * This should be the path for all ptrace stops.
1574 * We always set current->last_siginfo while stopped here.
1575 * That makes it a way to test a stopped process for
1576 * being ptrace-stopped vs being job-control-stopped.
1578 * If we actually decide not to stop at all because the tracer is gone,
1579 * we leave nostop_code in current->exit_code.
1581 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1584 * If there is a group stop in progress,
1585 * we must participate in the bookkeeping.
1587 if (current
->signal
->group_stop_count
> 0)
1588 --current
->signal
->group_stop_count
;
1590 current
->last_siginfo
= info
;
1591 current
->exit_code
= exit_code
;
1593 /* Let the debugger run. */
1594 set_current_state(TASK_TRACED
);
1595 spin_unlock_irq(¤t
->sighand
->siglock
);
1597 read_lock(&tasklist_lock
);
1598 if (may_ptrace_stop()) {
1599 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1600 read_unlock(&tasklist_lock
);
1604 * By the time we got the lock, our tracer went away.
1607 read_unlock(&tasklist_lock
);
1608 set_current_state(TASK_RUNNING
);
1609 current
->exit_code
= nostop_code
;
1613 * We are back. Now reacquire the siglock before touching
1614 * last_siginfo, so that we are sure to have synchronized with
1615 * any signal-sending on another CPU that wants to examine it.
1617 spin_lock_irq(¤t
->sighand
->siglock
);
1618 current
->last_siginfo
= NULL
;
1621 * Queued signals ignored us while we were stopped for tracing.
1622 * So check for any that we should take before resuming user mode.
1623 * This sets TIF_SIGPENDING, but never clears it.
1625 recalc_sigpending_tsk(current
);
1628 void ptrace_notify(int exit_code
)
1632 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1634 memset(&info
, 0, sizeof info
);
1635 info
.si_signo
= SIGTRAP
;
1636 info
.si_code
= exit_code
;
1637 info
.si_pid
= current
->pid
;
1638 info
.si_uid
= current
->uid
;
1640 /* Let the debugger run. */
1641 spin_lock_irq(¤t
->sighand
->siglock
);
1642 ptrace_stop(exit_code
, 0, &info
);
1643 spin_unlock_irq(¤t
->sighand
->siglock
);
1647 finish_stop(int stop_count
)
1650 * If there are no other threads in the group, or if there is
1651 * a group stop in progress and we are the last to stop,
1652 * report to the parent. When ptraced, every thread reports itself.
1654 if (stop_count
== 0 || (current
->ptrace
& PT_PTRACED
)) {
1655 read_lock(&tasklist_lock
);
1656 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1657 read_unlock(&tasklist_lock
);
1662 } while (try_to_freeze());
1664 * Now we don't run again until continued.
1666 current
->exit_code
= 0;
1670 * This performs the stopping for SIGSTOP and other stop signals.
1671 * We have to stop all threads in the thread group.
1672 * Returns nonzero if we've actually stopped and released the siglock.
1673 * Returns zero if we didn't stop and still hold the siglock.
1675 static int do_signal_stop(int signr
)
1677 struct signal_struct
*sig
= current
->signal
;
1680 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1683 if (sig
->group_stop_count
> 0) {
1685 * There is a group stop in progress. We don't need to
1686 * start another one.
1688 stop_count
= --sig
->group_stop_count
;
1691 * There is no group stop already in progress.
1692 * We must initiate one now.
1694 struct task_struct
*t
;
1696 sig
->group_exit_code
= signr
;
1699 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1701 * Setting state to TASK_STOPPED for a group
1702 * stop is always done with the siglock held,
1703 * so this check has no races.
1705 if (!t
->exit_state
&&
1706 !(t
->state
& (TASK_STOPPED
|TASK_TRACED
))) {
1708 signal_wake_up(t
, 0);
1710 sig
->group_stop_count
= stop_count
;
1713 if (stop_count
== 0)
1714 sig
->flags
= SIGNAL_STOP_STOPPED
;
1715 current
->exit_code
= sig
->group_exit_code
;
1716 __set_current_state(TASK_STOPPED
);
1718 spin_unlock_irq(¤t
->sighand
->siglock
);
1719 finish_stop(stop_count
);
1724 * Do appropriate magic when group_stop_count > 0.
1725 * We return nonzero if we stopped, after releasing the siglock.
1726 * We return zero if we still hold the siglock and should look
1727 * for another signal without checking group_stop_count again.
1729 static int handle_group_stop(void)
1733 if (current
->signal
->group_exit_task
== current
) {
1735 * Group stop is so we can do a core dump,
1736 * We are the initiating thread, so get on with it.
1738 current
->signal
->group_exit_task
= NULL
;
1742 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1744 * Group stop is so another thread can do a core dump,
1745 * or else we are racing against a death signal.
1746 * Just punt the stop so we can get the next signal.
1751 * There is a group stop in progress. We stop
1752 * without any associated signal being in our queue.
1754 stop_count
= --current
->signal
->group_stop_count
;
1755 if (stop_count
== 0)
1756 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1757 current
->exit_code
= current
->signal
->group_exit_code
;
1758 set_current_state(TASK_STOPPED
);
1759 spin_unlock_irq(¤t
->sighand
->siglock
);
1760 finish_stop(stop_count
);
1764 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1765 struct pt_regs
*regs
, void *cookie
)
1767 sigset_t
*mask
= ¤t
->blocked
;
1773 spin_lock_irq(¤t
->sighand
->siglock
);
1775 struct k_sigaction
*ka
;
1777 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1778 handle_group_stop())
1781 signr
= dequeue_signal(current
, mask
, info
);
1784 break; /* will return 0 */
1786 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1787 ptrace_signal_deliver(regs
, cookie
);
1789 /* Let the debugger run. */
1790 ptrace_stop(signr
, signr
, info
);
1792 /* We're back. Did the debugger cancel the sig? */
1793 signr
= current
->exit_code
;
1797 current
->exit_code
= 0;
1799 /* Update the siginfo structure if the signal has
1800 changed. If the debugger wanted something
1801 specific in the siginfo structure then it should
1802 have updated *info via PTRACE_SETSIGINFO. */
1803 if (signr
!= info
->si_signo
) {
1804 info
->si_signo
= signr
;
1806 info
->si_code
= SI_USER
;
1807 info
->si_pid
= current
->parent
->pid
;
1808 info
->si_uid
= current
->parent
->uid
;
1811 /* If the (new) signal is now blocked, requeue it. */
1812 if (sigismember(¤t
->blocked
, signr
)) {
1813 specific_send_sig_info(signr
, info
, current
);
1818 ka
= ¤t
->sighand
->action
[signr
-1];
1819 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1821 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1822 /* Run the handler. */
1825 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1826 ka
->sa
.sa_handler
= SIG_DFL
;
1828 break; /* will return non-zero "signr" value */
1832 * Now we are doing the default action for this signal.
1834 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1838 * Init of a pid space gets no signals it doesn't want from
1839 * within that pid space. It can of course get signals from
1840 * its parent pid space.
1842 if (current
== child_reaper(current
))
1845 if (sig_kernel_stop(signr
)) {
1847 * The default action is to stop all threads in
1848 * the thread group. The job control signals
1849 * do nothing in an orphaned pgrp, but SIGSTOP
1850 * always works. Note that siglock needs to be
1851 * dropped during the call to is_orphaned_pgrp()
1852 * because of lock ordering with tasklist_lock.
1853 * This allows an intervening SIGCONT to be posted.
1854 * We need to check for that and bail out if necessary.
1856 if (signr
!= SIGSTOP
) {
1857 spin_unlock_irq(¤t
->sighand
->siglock
);
1859 /* signals can be posted during this window */
1861 if (is_current_pgrp_orphaned())
1864 spin_lock_irq(¤t
->sighand
->siglock
);
1867 if (likely(do_signal_stop(signr
))) {
1868 /* It released the siglock. */
1873 * We didn't actually stop, due to a race
1874 * with SIGCONT or something like that.
1879 spin_unlock_irq(¤t
->sighand
->siglock
);
1882 * Anything else is fatal, maybe with a core dump.
1884 current
->flags
|= PF_SIGNALED
;
1885 if ((signr
!= SIGKILL
) && print_fatal_signals
)
1886 print_fatal_signal(regs
, signr
);
1887 if (sig_kernel_coredump(signr
)) {
1889 * If it was able to dump core, this kills all
1890 * other threads in the group and synchronizes with
1891 * their demise. If we lost the race with another
1892 * thread getting here, it set group_exit_code
1893 * first and our do_group_exit call below will use
1894 * that value and ignore the one we pass it.
1896 do_coredump((long)signr
, signr
, regs
);
1900 * Death signals, no core dump.
1902 do_group_exit(signr
);
1905 spin_unlock_irq(¤t
->sighand
->siglock
);
1909 EXPORT_SYMBOL(recalc_sigpending
);
1910 EXPORT_SYMBOL_GPL(dequeue_signal
);
1911 EXPORT_SYMBOL(flush_signals
);
1912 EXPORT_SYMBOL(force_sig
);
1913 EXPORT_SYMBOL(kill_proc
);
1914 EXPORT_SYMBOL(ptrace_notify
);
1915 EXPORT_SYMBOL(send_sig
);
1916 EXPORT_SYMBOL(send_sig_info
);
1917 EXPORT_SYMBOL(sigprocmask
);
1918 EXPORT_SYMBOL(block_all_signals
);
1919 EXPORT_SYMBOL(unblock_all_signals
);
1923 * System call entry points.
1926 asmlinkage
long sys_restart_syscall(void)
1928 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1929 return restart
->fn(restart
);
1932 long do_no_restart_syscall(struct restart_block
*param
)
1938 * We don't need to get the kernel lock - this is all local to this
1939 * particular thread.. (and that's good, because this is _heavily_
1940 * used by various programs)
1944 * This is also useful for kernel threads that want to temporarily
1945 * (or permanently) block certain signals.
1947 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1948 * interface happily blocks "unblockable" signals like SIGKILL
1951 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1955 spin_lock_irq(¤t
->sighand
->siglock
);
1957 *oldset
= current
->blocked
;
1962 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1965 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1968 current
->blocked
= *set
;
1973 recalc_sigpending();
1974 spin_unlock_irq(¤t
->sighand
->siglock
);
1980 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1982 int error
= -EINVAL
;
1983 sigset_t old_set
, new_set
;
1985 /* XXX: Don't preclude handling different sized sigset_t's. */
1986 if (sigsetsize
!= sizeof(sigset_t
))
1991 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1993 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1995 error
= sigprocmask(how
, &new_set
, &old_set
);
2001 spin_lock_irq(¤t
->sighand
->siglock
);
2002 old_set
= current
->blocked
;
2003 spin_unlock_irq(¤t
->sighand
->siglock
);
2007 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2015 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2017 long error
= -EINVAL
;
2020 if (sigsetsize
> sizeof(sigset_t
))
2023 spin_lock_irq(¤t
->sighand
->siglock
);
2024 sigorsets(&pending
, ¤t
->pending
.signal
,
2025 ¤t
->signal
->shared_pending
.signal
);
2026 spin_unlock_irq(¤t
->sighand
->siglock
);
2028 /* Outside the lock because only this thread touches it. */
2029 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2032 if (!copy_to_user(set
, &pending
, sigsetsize
))
2040 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2042 return do_sigpending(set
, sigsetsize
);
2045 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2047 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2051 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2053 if (from
->si_code
< 0)
2054 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2057 * If you change siginfo_t structure, please be sure
2058 * this code is fixed accordingly.
2059 * Please remember to update the signalfd_copyinfo() function
2060 * inside fs/signalfd.c too, in case siginfo_t changes.
2061 * It should never copy any pad contained in the structure
2062 * to avoid security leaks, but must copy the generic
2063 * 3 ints plus the relevant union member.
2065 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2066 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2067 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2068 switch (from
->si_code
& __SI_MASK
) {
2070 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2071 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2074 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2075 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2076 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2079 err
|= __put_user(from
->si_band
, &to
->si_band
);
2080 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2083 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2084 #ifdef __ARCH_SI_TRAPNO
2085 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2089 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2090 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2091 err
|= __put_user(from
->si_status
, &to
->si_status
);
2092 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2093 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2095 case __SI_RT
: /* This is not generated by the kernel as of now. */
2096 case __SI_MESGQ
: /* But this is */
2097 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2098 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2099 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2101 default: /* this is just in case for now ... */
2102 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2103 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2112 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2113 siginfo_t __user
*uinfo
,
2114 const struct timespec __user
*uts
,
2123 /* XXX: Don't preclude handling different sized sigset_t's. */
2124 if (sigsetsize
!= sizeof(sigset_t
))
2127 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2131 * Invert the set of allowed signals to get those we
2134 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2138 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2140 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2145 spin_lock_irq(¤t
->sighand
->siglock
);
2146 sig
= dequeue_signal(current
, &these
, &info
);
2148 timeout
= MAX_SCHEDULE_TIMEOUT
;
2150 timeout
= (timespec_to_jiffies(&ts
)
2151 + (ts
.tv_sec
|| ts
.tv_nsec
));
2154 /* None ready -- temporarily unblock those we're
2155 * interested while we are sleeping in so that we'll
2156 * be awakened when they arrive. */
2157 current
->real_blocked
= current
->blocked
;
2158 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2159 recalc_sigpending();
2160 spin_unlock_irq(¤t
->sighand
->siglock
);
2162 timeout
= schedule_timeout_interruptible(timeout
);
2164 spin_lock_irq(¤t
->sighand
->siglock
);
2165 sig
= dequeue_signal(current
, &these
, &info
);
2166 current
->blocked
= current
->real_blocked
;
2167 siginitset(¤t
->real_blocked
, 0);
2168 recalc_sigpending();
2171 spin_unlock_irq(¤t
->sighand
->siglock
);
2176 if (copy_siginfo_to_user(uinfo
, &info
))
2189 sys_kill(int pid
, int sig
)
2191 struct siginfo info
;
2193 info
.si_signo
= sig
;
2195 info
.si_code
= SI_USER
;
2196 info
.si_pid
= current
->tgid
;
2197 info
.si_uid
= current
->uid
;
2199 return kill_something_info(sig
, &info
, pid
);
2202 static int do_tkill(int tgid
, int pid
, int sig
)
2205 struct siginfo info
;
2206 struct task_struct
*p
;
2209 info
.si_signo
= sig
;
2211 info
.si_code
= SI_TKILL
;
2212 info
.si_pid
= current
->tgid
;
2213 info
.si_uid
= current
->uid
;
2215 read_lock(&tasklist_lock
);
2216 p
= find_task_by_pid(pid
);
2217 if (p
&& (tgid
<= 0 || p
->tgid
== tgid
)) {
2218 error
= check_kill_permission(sig
, &info
, p
);
2220 * The null signal is a permissions and process existence
2221 * probe. No signal is actually delivered.
2223 if (!error
&& sig
&& p
->sighand
) {
2224 spin_lock_irq(&p
->sighand
->siglock
);
2225 handle_stop_signal(sig
, p
);
2226 error
= specific_send_sig_info(sig
, &info
, p
);
2227 spin_unlock_irq(&p
->sighand
->siglock
);
2230 read_unlock(&tasklist_lock
);
2236 * sys_tgkill - send signal to one specific thread
2237 * @tgid: the thread group ID of the thread
2238 * @pid: the PID of the thread
2239 * @sig: signal to be sent
2241 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2242 * exists but it's not belonging to the target process anymore. This
2243 * method solves the problem of threads exiting and PIDs getting reused.
2245 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2247 /* This is only valid for single tasks */
2248 if (pid
<= 0 || tgid
<= 0)
2251 return do_tkill(tgid
, pid
, sig
);
2255 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2258 sys_tkill(int pid
, int sig
)
2260 /* This is only valid for single tasks */
2264 return do_tkill(0, pid
, sig
);
2268 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2272 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2275 /* Not even root can pretend to send signals from the kernel.
2276 Nor can they impersonate a kill(), which adds source info. */
2277 if (info
.si_code
>= 0)
2279 info
.si_signo
= sig
;
2281 /* POSIX.1b doesn't mention process groups. */
2282 return kill_proc_info(sig
, &info
, pid
);
2285 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2287 struct k_sigaction
*k
;
2290 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2293 k
= ¤t
->sighand
->action
[sig
-1];
2295 spin_lock_irq(¤t
->sighand
->siglock
);
2300 sigdelsetmask(&act
->sa
.sa_mask
,
2301 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2305 * "Setting a signal action to SIG_IGN for a signal that is
2306 * pending shall cause the pending signal to be discarded,
2307 * whether or not it is blocked."
2309 * "Setting a signal action to SIG_DFL for a signal that is
2310 * pending and whose default action is to ignore the signal
2311 * (for example, SIGCHLD), shall cause the pending signal to
2312 * be discarded, whether or not it is blocked"
2314 if (act
->sa
.sa_handler
== SIG_IGN
||
2315 (act
->sa
.sa_handler
== SIG_DFL
&& sig_kernel_ignore(sig
))) {
2316 struct task_struct
*t
= current
;
2318 sigaddset(&mask
, sig
);
2319 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2321 rm_from_queue_full(&mask
, &t
->pending
);
2323 } while (t
!= current
);
2327 spin_unlock_irq(¤t
->sighand
->siglock
);
2332 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2338 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2339 oss
.ss_size
= current
->sas_ss_size
;
2340 oss
.ss_flags
= sas_ss_flags(sp
);
2349 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2350 || __get_user(ss_sp
, &uss
->ss_sp
)
2351 || __get_user(ss_flags
, &uss
->ss_flags
)
2352 || __get_user(ss_size
, &uss
->ss_size
))
2356 if (on_sig_stack(sp
))
2362 * Note - this code used to test ss_flags incorrectly
2363 * old code may have been written using ss_flags==0
2364 * to mean ss_flags==SS_ONSTACK (as this was the only
2365 * way that worked) - this fix preserves that older
2368 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2371 if (ss_flags
== SS_DISABLE
) {
2376 if (ss_size
< MINSIGSTKSZ
)
2380 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2381 current
->sas_ss_size
= ss_size
;
2386 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2395 #ifdef __ARCH_WANT_SYS_SIGPENDING
2398 sys_sigpending(old_sigset_t __user
*set
)
2400 return do_sigpending(set
, sizeof(*set
));
2405 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2406 /* Some platforms have their own version with special arguments others
2407 support only sys_rt_sigprocmask. */
2410 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2413 old_sigset_t old_set
, new_set
;
2417 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2419 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2421 spin_lock_irq(¤t
->sighand
->siglock
);
2422 old_set
= current
->blocked
.sig
[0];
2430 sigaddsetmask(¤t
->blocked
, new_set
);
2433 sigdelsetmask(¤t
->blocked
, new_set
);
2436 current
->blocked
.sig
[0] = new_set
;
2440 recalc_sigpending();
2441 spin_unlock_irq(¤t
->sighand
->siglock
);
2447 old_set
= current
->blocked
.sig
[0];
2450 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2457 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2459 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2461 sys_rt_sigaction(int sig
,
2462 const struct sigaction __user
*act
,
2463 struct sigaction __user
*oact
,
2466 struct k_sigaction new_sa
, old_sa
;
2469 /* XXX: Don't preclude handling different sized sigset_t's. */
2470 if (sigsetsize
!= sizeof(sigset_t
))
2474 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2478 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2481 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2487 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2489 #ifdef __ARCH_WANT_SYS_SGETMASK
2492 * For backwards compatibility. Functionality superseded by sigprocmask.
2498 return current
->blocked
.sig
[0];
2502 sys_ssetmask(int newmask
)
2506 spin_lock_irq(¤t
->sighand
->siglock
);
2507 old
= current
->blocked
.sig
[0];
2509 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2511 recalc_sigpending();
2512 spin_unlock_irq(¤t
->sighand
->siglock
);
2516 #endif /* __ARCH_WANT_SGETMASK */
2518 #ifdef __ARCH_WANT_SYS_SIGNAL
2520 * For backwards compatibility. Functionality superseded by sigaction.
2522 asmlinkage
unsigned long
2523 sys_signal(int sig
, __sighandler_t handler
)
2525 struct k_sigaction new_sa
, old_sa
;
2528 new_sa
.sa
.sa_handler
= handler
;
2529 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2530 sigemptyset(&new_sa
.sa
.sa_mask
);
2532 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2534 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2536 #endif /* __ARCH_WANT_SYS_SIGNAL */
2538 #ifdef __ARCH_WANT_SYS_PAUSE
2543 current
->state
= TASK_INTERRUPTIBLE
;
2545 return -ERESTARTNOHAND
;
2550 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2551 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2555 /* XXX: Don't preclude handling different sized sigset_t's. */
2556 if (sigsetsize
!= sizeof(sigset_t
))
2559 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2561 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2563 spin_lock_irq(¤t
->sighand
->siglock
);
2564 current
->saved_sigmask
= current
->blocked
;
2565 current
->blocked
= newset
;
2566 recalc_sigpending();
2567 spin_unlock_irq(¤t
->sighand
->siglock
);
2569 current
->state
= TASK_INTERRUPTIBLE
;
2571 set_thread_flag(TIF_RESTORE_SIGMASK
);
2572 return -ERESTARTNOHAND
;
2574 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2576 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2581 void __init
signals_init(void)
2583 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);