2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34 #include "audit.h" /* audit_signal_info() */
37 * SLAB caches for signal bits.
40 static struct kmem_cache
*sigqueue_cachep
;
43 static int sig_ignored(struct task_struct
*t
, int sig
)
45 void __user
* handler
;
48 * Tracers always want to know about signals..
50 if (t
->ptrace
& PT_PTRACED
)
54 * Blocked signals are never ignored, since the
55 * signal handler may change by the time it is
58 if (sigismember(&t
->blocked
, sig
))
61 /* Is it explicitly or implicitly ignored? */
62 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
63 return handler
== SIG_IGN
||
64 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
68 * Re-calculate pending state from the set of locally pending
69 * signals, globally pending signals, and blocked signals.
71 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
76 switch (_NSIG_WORDS
) {
78 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
79 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
82 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
83 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
84 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
85 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
88 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
89 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
92 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
97 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
99 static int recalc_sigpending_tsk(struct task_struct
*t
)
101 if (t
->signal
->group_stop_count
> 0 ||
103 PENDING(&t
->pending
, &t
->blocked
) ||
104 PENDING(&t
->signal
->shared_pending
, &t
->blocked
)) {
105 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
109 * We must never clear the flag in another thread, or in current
110 * when it's possible the current syscall is returning -ERESTART*.
111 * So we don't clear it here, and only callers who know they should do.
117 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
118 * This is superfluous when called on current, the wakeup is a harmless no-op.
120 void recalc_sigpending_and_wake(struct task_struct
*t
)
122 if (recalc_sigpending_tsk(t
))
123 signal_wake_up(t
, 0);
126 void recalc_sigpending(void)
128 if (!recalc_sigpending_tsk(current
))
129 clear_thread_flag(TIF_SIGPENDING
);
133 /* Given the mask, find the first available signal that should be serviced. */
135 int next_signal(struct sigpending
*pending
, sigset_t
*mask
)
137 unsigned long i
, *s
, *m
, x
;
140 s
= pending
->signal
.sig
;
142 switch (_NSIG_WORDS
) {
144 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
145 if ((x
= *s
&~ *m
) != 0) {
146 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
151 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
153 else if ((x
= s
[1] &~ m
[1]) != 0)
160 case 1: if ((x
= *s
&~ *m
) != 0)
168 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
171 struct sigqueue
*q
= NULL
;
172 struct user_struct
*user
;
175 * In order to avoid problems with "switch_user()", we want to make
176 * sure that the compiler doesn't re-load "t->user"
180 atomic_inc(&user
->sigpending
);
181 if (override_rlimit
||
182 atomic_read(&user
->sigpending
) <=
183 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
184 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
185 if (unlikely(q
== NULL
)) {
186 atomic_dec(&user
->sigpending
);
188 INIT_LIST_HEAD(&q
->list
);
190 q
->user
= get_uid(user
);
195 static void __sigqueue_free(struct sigqueue
*q
)
197 if (q
->flags
& SIGQUEUE_PREALLOC
)
199 atomic_dec(&q
->user
->sigpending
);
201 kmem_cache_free(sigqueue_cachep
, q
);
204 void flush_sigqueue(struct sigpending
*queue
)
208 sigemptyset(&queue
->signal
);
209 while (!list_empty(&queue
->list
)) {
210 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
211 list_del_init(&q
->list
);
217 * Flush all pending signals for a task.
219 void flush_signals(struct task_struct
*t
)
223 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
224 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
225 flush_sigqueue(&t
->pending
);
226 flush_sigqueue(&t
->signal
->shared_pending
);
227 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
230 void ignore_signals(struct task_struct
*t
)
234 for (i
= 0; i
< _NSIG
; ++i
)
235 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
241 * Flush all handlers for a task.
245 flush_signal_handlers(struct task_struct
*t
, int force_default
)
248 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
249 for (i
= _NSIG
; i
!= 0 ; i
--) {
250 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
251 ka
->sa
.sa_handler
= SIG_DFL
;
253 sigemptyset(&ka
->sa
.sa_mask
);
258 int unhandled_signal(struct task_struct
*tsk
, int sig
)
262 if (tsk
->ptrace
& PT_PTRACED
)
264 return (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
) ||
265 (tsk
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_DFL
);
269 /* Notify the system that a driver wants to block all signals for this
270 * process, and wants to be notified if any signals at all were to be
271 * sent/acted upon. If the notifier routine returns non-zero, then the
272 * signal will be acted upon after all. If the notifier routine returns 0,
273 * then then signal will be blocked. Only one block per process is
274 * allowed. priv is a pointer to private data that the notifier routine
275 * can use to determine if the signal should be blocked or not. */
278 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
282 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
283 current
->notifier_mask
= mask
;
284 current
->notifier_data
= priv
;
285 current
->notifier
= notifier
;
286 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
289 /* Notify the system that blocking has ended. */
292 unblock_all_signals(void)
296 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
297 current
->notifier
= NULL
;
298 current
->notifier_data
= NULL
;
300 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
303 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
305 struct sigqueue
*q
, *first
= NULL
;
306 int still_pending
= 0;
308 if (unlikely(!sigismember(&list
->signal
, sig
)))
312 * Collect the siginfo appropriate to this signal. Check if
313 * there is another siginfo for the same signal.
315 list_for_each_entry(q
, &list
->list
, list
) {
316 if (q
->info
.si_signo
== sig
) {
325 list_del_init(&first
->list
);
326 copy_siginfo(info
, &first
->info
);
327 __sigqueue_free(first
);
329 sigdelset(&list
->signal
, sig
);
332 /* Ok, it wasn't in the queue. This must be
333 a fast-pathed signal or we must have been
334 out of queue space. So zero out the info.
336 sigdelset(&list
->signal
, sig
);
337 info
->si_signo
= sig
;
346 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
349 int sig
= next_signal(pending
, mask
);
352 if (current
->notifier
) {
353 if (sigismember(current
->notifier_mask
, sig
)) {
354 if (!(current
->notifier
)(current
->notifier_data
)) {
355 clear_thread_flag(TIF_SIGPENDING
);
361 if (!collect_signal(sig
, pending
, info
))
369 * Dequeue a signal and return the element to the caller, which is
370 * expected to free it.
372 * All callers have to hold the siglock.
374 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
378 /* We only dequeue private signals from ourselves, we don't let
379 * signalfd steal them
381 signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
383 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
388 * itimers are process shared and we restart periodic
389 * itimers in the signal delivery path to prevent DoS
390 * attacks in the high resolution timer case. This is
391 * compliant with the old way of self restarting
392 * itimers, as the SIGALRM is a legacy signal and only
393 * queued once. Changing the restart behaviour to
394 * restart the timer in the signal dequeue path is
395 * reducing the timer noise on heavy loaded !highres
398 if (unlikely(signr
== SIGALRM
)) {
399 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
401 if (!hrtimer_is_queued(tmr
) &&
402 tsk
->signal
->it_real_incr
.tv64
!= 0) {
403 hrtimer_forward(tmr
, tmr
->base
->get_time(),
404 tsk
->signal
->it_real_incr
);
405 hrtimer_restart(tmr
);
410 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
412 * Set a marker that we have dequeued a stop signal. Our
413 * caller might release the siglock and then the pending
414 * stop signal it is about to process is no longer in the
415 * pending bitmasks, but must still be cleared by a SIGCONT
416 * (and overruled by a SIGKILL). So those cases clear this
417 * shared flag after we've set it. Note that this flag may
418 * remain set after the signal we return is ignored or
419 * handled. That doesn't matter because its only purpose
420 * is to alert stop-signal processing code when another
421 * processor has come along and cleared the flag.
423 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
))
424 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
427 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
428 info
->si_sys_private
){
430 * Release the siglock to ensure proper locking order
431 * of timer locks outside of siglocks. Note, we leave
432 * irqs disabled here, since the posix-timers code is
433 * about to disable them again anyway.
435 spin_unlock(&tsk
->sighand
->siglock
);
436 do_schedule_next_timer(info
);
437 spin_lock(&tsk
->sighand
->siglock
);
443 * Tell a process that it has a new active signal..
445 * NOTE! we rely on the previous spin_lock to
446 * lock interrupts for us! We can only be called with
447 * "siglock" held, and the local interrupt must
448 * have been disabled when that got acquired!
450 * No need to set need_resched since signal event passing
451 * goes through ->blocked
453 void signal_wake_up(struct task_struct
*t
, int resume
)
457 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
460 * For SIGKILL, we want to wake it up in the stopped/traced case.
461 * We don't check t->state here because there is a race with it
462 * executing another processor and just now entering stopped state.
463 * By using wake_up_state, we ensure the process will wake up and
464 * handle its death signal.
466 mask
= TASK_INTERRUPTIBLE
;
468 mask
|= TASK_STOPPED
| TASK_TRACED
;
469 if (!wake_up_state(t
, mask
))
474 * Remove signals in mask from the pending set and queue.
475 * Returns 1 if any signals were found.
477 * All callers must be holding the siglock.
479 * This version takes a sigset mask and looks at all signals,
480 * not just those in the first mask word.
482 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
484 struct sigqueue
*q
, *n
;
487 sigandsets(&m
, mask
, &s
->signal
);
488 if (sigisemptyset(&m
))
491 signandsets(&s
->signal
, &s
->signal
, mask
);
492 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
493 if (sigismember(mask
, q
->info
.si_signo
)) {
494 list_del_init(&q
->list
);
501 * Remove signals in mask from the pending set and queue.
502 * Returns 1 if any signals were found.
504 * All callers must be holding the siglock.
506 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
508 struct sigqueue
*q
, *n
;
510 if (!sigtestsetmask(&s
->signal
, mask
))
513 sigdelsetmask(&s
->signal
, mask
);
514 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
515 if (q
->info
.si_signo
< SIGRTMIN
&&
516 (mask
& sigmask(q
->info
.si_signo
))) {
517 list_del_init(&q
->list
);
525 * Bad permissions for sending the signal
527 static int check_kill_permission(int sig
, struct siginfo
*info
,
528 struct task_struct
*t
)
531 if (!valid_signal(sig
))
534 if (info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
))) {
535 error
= audit_signal_info(sig
, t
); /* Let audit system see the signal */
539 if (((sig
!= SIGCONT
) ||
540 (process_session(current
) != process_session(t
)))
541 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
542 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
543 && !capable(CAP_KILL
))
547 return security_task_kill(t
, info
, sig
, 0);
551 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
);
554 * Handle magic process-wide effects of stop/continue signals.
555 * Unlike the signal actions, these happen immediately at signal-generation
556 * time regardless of blocking, ignoring, or handling. This does the
557 * actual continuing for SIGCONT, but not the actual stopping for stop
558 * signals. The process stop is done as a signal action for SIG_DFL.
560 static void handle_stop_signal(int sig
, struct task_struct
*p
)
562 struct task_struct
*t
;
564 if (p
->signal
->flags
& SIGNAL_GROUP_EXIT
)
566 * The process is in the middle of dying already.
570 if (sig_kernel_stop(sig
)) {
572 * This is a stop signal. Remove SIGCONT from all queues.
574 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
577 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
580 } else if (sig
== SIGCONT
) {
582 * Remove all stop signals from all queues,
583 * and wake all threads.
585 if (unlikely(p
->signal
->group_stop_count
> 0)) {
587 * There was a group stop in progress. We'll
588 * pretend it finished before we got here. We are
589 * obliged to report it to the parent: if the
590 * SIGSTOP happened "after" this SIGCONT, then it
591 * would have cleared this pending SIGCONT. If it
592 * happened "before" this SIGCONT, then the parent
593 * got the SIGCHLD about the stop finishing before
594 * the continue happened. We do the notification
595 * now, and it's as if the stop had finished and
596 * the SIGCHLD was pending on entry to this kill.
598 p
->signal
->group_stop_count
= 0;
599 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
600 spin_unlock(&p
->sighand
->siglock
);
601 do_notify_parent_cldstop(p
, CLD_STOPPED
);
602 spin_lock(&p
->sighand
->siglock
);
604 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
608 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
611 * If there is a handler for SIGCONT, we must make
612 * sure that no thread returns to user mode before
613 * we post the signal, in case it was the only
614 * thread eligible to run the signal handler--then
615 * it must not do anything between resuming and
616 * running the handler. With the TIF_SIGPENDING
617 * flag set, the thread will pause and acquire the
618 * siglock that we hold now and until we've queued
619 * the pending signal.
621 * Wake up the stopped thread _after_ setting
624 state
= TASK_STOPPED
;
625 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
626 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
627 state
|= TASK_INTERRUPTIBLE
;
629 wake_up_state(t
, state
);
634 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
636 * We were in fact stopped, and are now continued.
637 * Notify the parent with CLD_CONTINUED.
639 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
640 p
->signal
->group_exit_code
= 0;
641 spin_unlock(&p
->sighand
->siglock
);
642 do_notify_parent_cldstop(p
, CLD_CONTINUED
);
643 spin_lock(&p
->sighand
->siglock
);
646 * We are not stopped, but there could be a stop
647 * signal in the middle of being processed after
648 * being removed from the queue. Clear that too.
650 p
->signal
->flags
= 0;
652 } else if (sig
== SIGKILL
) {
654 * Make sure that any pending stop signal already dequeued
655 * is undone by the wakeup for SIGKILL.
657 p
->signal
->flags
= 0;
661 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
662 struct sigpending
*signals
)
664 struct sigqueue
* q
= NULL
;
668 * Deliver the signal to listening signalfds. This must be called
669 * with the sighand lock held.
671 signalfd_notify(t
, sig
);
674 * fast-pathed signals for kernel-internal things like SIGSTOP
677 if (info
== SEND_SIG_FORCED
)
680 /* Real-time signals must be queued if sent by sigqueue, or
681 some other real-time mechanism. It is implementation
682 defined whether kill() does so. We attempt to do so, on
683 the principle of least surprise, but since kill is not
684 allowed to fail with EAGAIN when low on memory we just
685 make sure at least one signal gets delivered and don't
686 pass on the info struct. */
688 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
689 (is_si_special(info
) ||
690 info
->si_code
>= 0)));
692 list_add_tail(&q
->list
, &signals
->list
);
693 switch ((unsigned long) info
) {
694 case (unsigned long) SEND_SIG_NOINFO
:
695 q
->info
.si_signo
= sig
;
696 q
->info
.si_errno
= 0;
697 q
->info
.si_code
= SI_USER
;
698 q
->info
.si_pid
= current
->pid
;
699 q
->info
.si_uid
= current
->uid
;
701 case (unsigned long) SEND_SIG_PRIV
:
702 q
->info
.si_signo
= sig
;
703 q
->info
.si_errno
= 0;
704 q
->info
.si_code
= SI_KERNEL
;
709 copy_siginfo(&q
->info
, info
);
712 } else if (!is_si_special(info
)) {
713 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
715 * Queue overflow, abort. We may abort if the signal was rt
716 * and sent by user using something other than kill().
722 sigaddset(&signals
->signal
, sig
);
726 #define LEGACY_QUEUE(sigptr, sig) \
727 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
729 int print_fatal_signals
;
731 static void print_fatal_signal(struct pt_regs
*regs
, int signr
)
733 printk("%s/%d: potentially unexpected fatal signal %d.\n",
734 current
->comm
, current
->pid
, signr
);
737 printk("code at %08lx: ", regs
->eip
);
740 for (i
= 0; i
< 16; i
++) {
743 __get_user(insn
, (unsigned char *)(regs
->eip
+ i
));
744 printk("%02x ", insn
);
752 static int __init
setup_print_fatal_signals(char *str
)
754 get_option (&str
, &print_fatal_signals
);
759 __setup("print-fatal-signals=", setup_print_fatal_signals
);
762 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
766 BUG_ON(!irqs_disabled());
767 assert_spin_locked(&t
->sighand
->siglock
);
769 /* Short-circuit ignored signals. */
770 if (sig_ignored(t
, sig
))
773 /* Support queueing exactly one non-rt signal, so that we
774 can get more detailed information about the cause of
776 if (LEGACY_QUEUE(&t
->pending
, sig
))
779 ret
= send_signal(sig
, info
, t
, &t
->pending
);
780 if (!ret
&& !sigismember(&t
->blocked
, sig
))
781 signal_wake_up(t
, sig
== SIGKILL
);
787 * Force a signal that the process can't ignore: if necessary
788 * we unblock the signal and change any SIG_IGN to SIG_DFL.
790 * Note: If we unblock the signal, we always reset it to SIG_DFL,
791 * since we do not want to have a signal handler that was blocked
792 * be invoked when user space had explicitly blocked it.
794 * We don't want to have recursive SIGSEGV's etc, for example.
797 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
799 unsigned long int flags
;
800 int ret
, blocked
, ignored
;
801 struct k_sigaction
*action
;
803 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
804 action
= &t
->sighand
->action
[sig
-1];
805 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
806 blocked
= sigismember(&t
->blocked
, sig
);
807 if (blocked
|| ignored
) {
808 action
->sa
.sa_handler
= SIG_DFL
;
810 sigdelset(&t
->blocked
, sig
);
811 recalc_sigpending_and_wake(t
);
814 ret
= specific_send_sig_info(sig
, info
, t
);
815 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
821 force_sig_specific(int sig
, struct task_struct
*t
)
823 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
827 * Test if P wants to take SIG. After we've checked all threads with this,
828 * it's equivalent to finding no threads not blocking SIG. Any threads not
829 * blocking SIG were ruled out because they are not running and already
830 * have pending signals. Such threads will dequeue from the shared queue
831 * as soon as they're available, so putting the signal on the shared queue
832 * will be equivalent to sending it to one such thread.
834 static inline int wants_signal(int sig
, struct task_struct
*p
)
836 if (sigismember(&p
->blocked
, sig
))
838 if (p
->flags
& PF_EXITING
)
842 if (p
->state
& (TASK_STOPPED
| TASK_TRACED
))
844 return task_curr(p
) || !signal_pending(p
);
848 __group_complete_signal(int sig
, struct task_struct
*p
)
850 struct task_struct
*t
;
853 * Now find a thread we can wake up to take the signal off the queue.
855 * If the main thread wants the signal, it gets first crack.
856 * Probably the least surprising to the average bear.
858 if (wants_signal(sig
, p
))
860 else if (thread_group_empty(p
))
862 * There is just one thread and it does not need to be woken.
863 * It will dequeue unblocked signals before it runs again.
868 * Otherwise try to find a suitable thread.
870 t
= p
->signal
->curr_target
;
872 /* restart balancing at this thread */
873 t
= p
->signal
->curr_target
= p
;
875 while (!wants_signal(sig
, t
)) {
877 if (t
== p
->signal
->curr_target
)
879 * No thread needs to be woken.
880 * Any eligible threads will see
881 * the signal in the queue soon.
885 p
->signal
->curr_target
= t
;
889 * Found a killable thread. If the signal will be fatal,
890 * then start taking the whole group down immediately.
892 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
893 !sigismember(&t
->real_blocked
, sig
) &&
894 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
896 * This signal will be fatal to the whole group.
898 if (!sig_kernel_coredump(sig
)) {
900 * Start a group exit and wake everybody up.
901 * This way we don't have other threads
902 * running and doing things after a slower
903 * thread has the fatal signal pending.
905 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
906 p
->signal
->group_exit_code
= sig
;
907 p
->signal
->group_stop_count
= 0;
910 sigaddset(&t
->pending
.signal
, SIGKILL
);
911 signal_wake_up(t
, 1);
912 } while_each_thread(p
, t
);
917 * There will be a core dump. We make all threads other
918 * than the chosen one go into a group stop so that nothing
919 * happens until it gets scheduled, takes the signal off
920 * the shared queue, and does the core dump. This is a
921 * little more complicated than strictly necessary, but it
922 * keeps the signal state that winds up in the core dump
923 * unchanged from the death state, e.g. which thread had
924 * the core-dump signal unblocked.
926 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
927 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
928 p
->signal
->group_stop_count
= 0;
929 p
->signal
->group_exit_task
= t
;
932 p
->signal
->group_stop_count
++;
933 signal_wake_up(t
, t
== p
);
934 } while_each_thread(p
, t
);
939 * The signal is already in the shared-pending queue.
940 * Tell the chosen thread to wake up and dequeue it.
942 signal_wake_up(t
, sig
== SIGKILL
);
947 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
951 assert_spin_locked(&p
->sighand
->siglock
);
952 handle_stop_signal(sig
, p
);
954 /* Short-circuit ignored signals. */
955 if (sig_ignored(p
, sig
))
958 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
959 /* This is a non-RT signal and we already have one queued. */
963 * Put this signal on the shared-pending queue, or fail with EAGAIN.
964 * We always use the shared queue for process-wide signals,
965 * to avoid several races.
967 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
971 __group_complete_signal(sig
, p
);
976 * Nuke all other threads in the group.
978 void zap_other_threads(struct task_struct
*p
)
980 struct task_struct
*t
;
982 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
983 p
->signal
->group_stop_count
= 0;
985 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
987 * Don't bother with already dead threads
992 /* SIGKILL will be handled before any pending SIGSTOP */
993 sigaddset(&t
->pending
.signal
, SIGKILL
);
994 signal_wake_up(t
, 1);
999 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1001 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
1003 struct sighand_struct
*sighand
;
1006 sighand
= rcu_dereference(tsk
->sighand
);
1007 if (unlikely(sighand
== NULL
))
1010 spin_lock_irqsave(&sighand
->siglock
, *flags
);
1011 if (likely(sighand
== tsk
->sighand
))
1013 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
1019 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1021 unsigned long flags
;
1024 ret
= check_kill_permission(sig
, info
, p
);
1028 if (lock_task_sighand(p
, &flags
)) {
1029 ret
= __group_send_sig_info(sig
, info
, p
);
1030 unlock_task_sighand(p
, &flags
);
1038 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1039 * control characters do (^C, ^Z etc)
1042 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1044 struct task_struct
*p
= NULL
;
1045 int retval
, success
;
1049 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
1050 int err
= group_send_sig_info(sig
, info
, p
);
1053 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
1054 return success
? 0 : retval
;
1057 int kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
1061 read_lock(&tasklist_lock
);
1062 retval
= __kill_pgrp_info(sig
, info
, pgrp
);
1063 read_unlock(&tasklist_lock
);
1068 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1071 struct task_struct
*p
;
1074 if (unlikely(sig_needs_tasklist(sig
)))
1075 read_lock(&tasklist_lock
);
1077 p
= pid_task(pid
, PIDTYPE_PID
);
1080 error
= group_send_sig_info(sig
, info
, p
);
1082 if (unlikely(sig_needs_tasklist(sig
)))
1083 read_unlock(&tasklist_lock
);
1089 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1093 error
= kill_pid_info(sig
, info
, find_pid(pid
));
1098 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1099 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1100 uid_t uid
, uid_t euid
, u32 secid
)
1103 struct task_struct
*p
;
1105 if (!valid_signal(sig
))
1108 read_lock(&tasklist_lock
);
1109 p
= pid_task(pid
, PIDTYPE_PID
);
1114 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1115 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1116 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1120 ret
= security_task_kill(p
, info
, sig
, secid
);
1123 if (sig
&& p
->sighand
) {
1124 unsigned long flags
;
1125 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1126 ret
= __group_send_sig_info(sig
, info
, p
);
1127 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1130 read_unlock(&tasklist_lock
);
1133 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1136 * kill_something_info() interprets pid in interesting ways just like kill(2).
1138 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1139 * is probably wrong. Should make it like BSD or SYSV.
1142 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1147 ret
= kill_pgrp_info(sig
, info
, task_pgrp(current
));
1148 } else if (pid
== -1) {
1149 int retval
= 0, count
= 0;
1150 struct task_struct
* p
;
1152 read_lock(&tasklist_lock
);
1153 for_each_process(p
) {
1154 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1155 int err
= group_send_sig_info(sig
, info
, p
);
1161 read_unlock(&tasklist_lock
);
1162 ret
= count
? retval
: -ESRCH
;
1163 } else if (pid
< 0) {
1164 ret
= kill_pgrp_info(sig
, info
, find_pid(-pid
));
1166 ret
= kill_pid_info(sig
, info
, find_pid(pid
));
1173 * These are for backward compatibility with the rest of the kernel source.
1177 * These two are the most common entry points. They send a signal
1178 * just to the specific thread.
1181 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1184 unsigned long flags
;
1187 * Make sure legacy kernel users don't send in bad values
1188 * (normal paths check this in check_kill_permission).
1190 if (!valid_signal(sig
))
1194 * We need the tasklist lock even for the specific
1195 * thread case (when we don't need to follow the group
1196 * lists) in order to avoid races with "p->sighand"
1197 * going away or changing from under us.
1199 read_lock(&tasklist_lock
);
1200 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1201 ret
= specific_send_sig_info(sig
, info
, p
);
1202 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1203 read_unlock(&tasklist_lock
);
1207 #define __si_special(priv) \
1208 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1211 send_sig(int sig
, struct task_struct
*p
, int priv
)
1213 return send_sig_info(sig
, __si_special(priv
), p
);
1217 * This is the entry point for "process-wide" signals.
1218 * They will go to an appropriate thread in the thread group.
1221 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1224 read_lock(&tasklist_lock
);
1225 ret
= group_send_sig_info(sig
, info
, p
);
1226 read_unlock(&tasklist_lock
);
1231 force_sig(int sig
, struct task_struct
*p
)
1233 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1237 * When things go south during signal handling, we
1238 * will force a SIGSEGV. And if the signal that caused
1239 * the problem was already a SIGSEGV, we'll want to
1240 * make sure we don't even try to deliver the signal..
1243 force_sigsegv(int sig
, struct task_struct
*p
)
1245 if (sig
== SIGSEGV
) {
1246 unsigned long flags
;
1247 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1248 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1249 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1251 force_sig(SIGSEGV
, p
);
1255 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1257 return kill_pgrp_info(sig
, __si_special(priv
), pid
);
1259 EXPORT_SYMBOL(kill_pgrp
);
1261 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1263 return kill_pid_info(sig
, __si_special(priv
), pid
);
1265 EXPORT_SYMBOL(kill_pid
);
1268 kill_proc(pid_t pid
, int sig
, int priv
)
1270 return kill_proc_info(sig
, __si_special(priv
), pid
);
1274 * These functions support sending signals using preallocated sigqueue
1275 * structures. This is needed "because realtime applications cannot
1276 * afford to lose notifications of asynchronous events, like timer
1277 * expirations or I/O completions". In the case of Posix Timers
1278 * we allocate the sigqueue structure from the timer_create. If this
1279 * allocation fails we are able to report the failure to the application
1280 * with an EAGAIN error.
1283 struct sigqueue
*sigqueue_alloc(void)
1287 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1288 q
->flags
|= SIGQUEUE_PREALLOC
;
1292 void sigqueue_free(struct sigqueue
*q
)
1294 unsigned long flags
;
1295 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1297 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1299 * If the signal is still pending remove it from the
1300 * pending queue. We must hold ->siglock while testing
1301 * q->list to serialize with collect_signal().
1303 spin_lock_irqsave(lock
, flags
);
1304 if (!list_empty(&q
->list
))
1305 list_del_init(&q
->list
);
1306 spin_unlock_irqrestore(lock
, flags
);
1308 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1312 int send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1314 unsigned long flags
;
1317 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1320 * The rcu based delayed sighand destroy makes it possible to
1321 * run this without tasklist lock held. The task struct itself
1322 * cannot go away as create_timer did get_task_struct().
1324 * We return -1, when the task is marked exiting, so
1325 * posix_timer_event can redirect it to the group leader
1329 if (!likely(lock_task_sighand(p
, &flags
))) {
1334 if (unlikely(!list_empty(&q
->list
))) {
1336 * If an SI_TIMER entry is already queue just increment
1337 * the overrun count.
1339 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1340 q
->info
.si_overrun
++;
1343 /* Short-circuit ignored signals. */
1344 if (sig_ignored(p
, sig
)) {
1349 * Deliver the signal to listening signalfds. This must be called
1350 * with the sighand lock held.
1352 signalfd_notify(p
, sig
);
1354 list_add_tail(&q
->list
, &p
->pending
.list
);
1355 sigaddset(&p
->pending
.signal
, sig
);
1356 if (!sigismember(&p
->blocked
, sig
))
1357 signal_wake_up(p
, sig
== SIGKILL
);
1360 unlock_task_sighand(p
, &flags
);
1368 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1370 unsigned long flags
;
1373 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1375 read_lock(&tasklist_lock
);
1376 /* Since it_lock is held, p->sighand cannot be NULL. */
1377 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1378 handle_stop_signal(sig
, p
);
1380 /* Short-circuit ignored signals. */
1381 if (sig_ignored(p
, sig
)) {
1386 if (unlikely(!list_empty(&q
->list
))) {
1388 * If an SI_TIMER entry is already queue just increment
1389 * the overrun count. Other uses should not try to
1390 * send the signal multiple times.
1392 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1393 q
->info
.si_overrun
++;
1397 * Deliver the signal to listening signalfds. This must be called
1398 * with the sighand lock held.
1400 signalfd_notify(p
, sig
);
1403 * Put this signal on the shared-pending queue.
1404 * We always use the shared queue for process-wide signals,
1405 * to avoid several races.
1407 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1408 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1410 __group_complete_signal(sig
, p
);
1412 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1413 read_unlock(&tasklist_lock
);
1418 * Wake up any threads in the parent blocked in wait* syscalls.
1420 static inline void __wake_up_parent(struct task_struct
*p
,
1421 struct task_struct
*parent
)
1423 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1427 * Let a parent know about the death of a child.
1428 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1431 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1433 struct siginfo info
;
1434 unsigned long flags
;
1435 struct sighand_struct
*psig
;
1439 /* do_notify_parent_cldstop should have been called instead. */
1440 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1442 BUG_ON(!tsk
->ptrace
&&
1443 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1445 info
.si_signo
= sig
;
1447 info
.si_pid
= tsk
->pid
;
1448 info
.si_uid
= tsk
->uid
;
1450 /* FIXME: find out whether or not this is supposed to be c*time. */
1451 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1452 tsk
->signal
->utime
));
1453 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1454 tsk
->signal
->stime
));
1456 info
.si_status
= tsk
->exit_code
& 0x7f;
1457 if (tsk
->exit_code
& 0x80)
1458 info
.si_code
= CLD_DUMPED
;
1459 else if (tsk
->exit_code
& 0x7f)
1460 info
.si_code
= CLD_KILLED
;
1462 info
.si_code
= CLD_EXITED
;
1463 info
.si_status
= tsk
->exit_code
>> 8;
1466 psig
= tsk
->parent
->sighand
;
1467 spin_lock_irqsave(&psig
->siglock
, flags
);
1468 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1469 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1470 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1472 * We are exiting and our parent doesn't care. POSIX.1
1473 * defines special semantics for setting SIGCHLD to SIG_IGN
1474 * or setting the SA_NOCLDWAIT flag: we should be reaped
1475 * automatically and not left for our parent's wait4 call.
1476 * Rather than having the parent do it as a magic kind of
1477 * signal handler, we just set this to tell do_exit that we
1478 * can be cleaned up without becoming a zombie. Note that
1479 * we still call __wake_up_parent in this case, because a
1480 * blocked sys_wait4 might now return -ECHILD.
1482 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1483 * is implementation-defined: we do (if you don't want
1484 * it, just use SIG_IGN instead).
1486 tsk
->exit_signal
= -1;
1487 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1490 if (valid_signal(sig
) && sig
> 0)
1491 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1492 __wake_up_parent(tsk
, tsk
->parent
);
1493 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1496 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1498 struct siginfo info
;
1499 unsigned long flags
;
1500 struct task_struct
*parent
;
1501 struct sighand_struct
*sighand
;
1503 if (tsk
->ptrace
& PT_PTRACED
)
1504 parent
= tsk
->parent
;
1506 tsk
= tsk
->group_leader
;
1507 parent
= tsk
->real_parent
;
1510 info
.si_signo
= SIGCHLD
;
1512 info
.si_pid
= tsk
->pid
;
1513 info
.si_uid
= tsk
->uid
;
1515 /* FIXME: find out whether or not this is supposed to be c*time. */
1516 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1517 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1522 info
.si_status
= SIGCONT
;
1525 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1528 info
.si_status
= tsk
->exit_code
& 0x7f;
1534 sighand
= parent
->sighand
;
1535 spin_lock_irqsave(&sighand
->siglock
, flags
);
1536 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1537 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1538 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1540 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1542 __wake_up_parent(tsk
, parent
);
1543 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1546 static inline int may_ptrace_stop(void)
1548 if (!likely(current
->ptrace
& PT_PTRACED
))
1551 if (unlikely(current
->parent
== current
->real_parent
&&
1552 (current
->ptrace
& PT_ATTACHED
)))
1556 * Are we in the middle of do_coredump?
1557 * If so and our tracer is also part of the coredump stopping
1558 * is a deadlock situation, and pointless because our tracer
1559 * is dead so don't allow us to stop.
1560 * If SIGKILL was already sent before the caller unlocked
1561 * ->siglock we must see ->core_waiters != 0. Otherwise it
1562 * is safe to enter schedule().
1564 if (unlikely(current
->mm
->core_waiters
) &&
1565 unlikely(current
->mm
== current
->parent
->mm
))
1572 * This must be called with current->sighand->siglock held.
1574 * This should be the path for all ptrace stops.
1575 * We always set current->last_siginfo while stopped here.
1576 * That makes it a way to test a stopped process for
1577 * being ptrace-stopped vs being job-control-stopped.
1579 * If we actually decide not to stop at all because the tracer is gone,
1580 * we leave nostop_code in current->exit_code.
1582 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1585 * If there is a group stop in progress,
1586 * we must participate in the bookkeeping.
1588 if (current
->signal
->group_stop_count
> 0)
1589 --current
->signal
->group_stop_count
;
1591 current
->last_siginfo
= info
;
1592 current
->exit_code
= exit_code
;
1594 /* Let the debugger run. */
1595 set_current_state(TASK_TRACED
);
1596 spin_unlock_irq(¤t
->sighand
->siglock
);
1598 read_lock(&tasklist_lock
);
1599 if (may_ptrace_stop()) {
1600 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1601 read_unlock(&tasklist_lock
);
1605 * By the time we got the lock, our tracer went away.
1608 read_unlock(&tasklist_lock
);
1609 set_current_state(TASK_RUNNING
);
1610 current
->exit_code
= nostop_code
;
1614 * We are back. Now reacquire the siglock before touching
1615 * last_siginfo, so that we are sure to have synchronized with
1616 * any signal-sending on another CPU that wants to examine it.
1618 spin_lock_irq(¤t
->sighand
->siglock
);
1619 current
->last_siginfo
= NULL
;
1622 * Queued signals ignored us while we were stopped for tracing.
1623 * So check for any that we should take before resuming user mode.
1624 * This sets TIF_SIGPENDING, but never clears it.
1626 recalc_sigpending_tsk(current
);
1629 void ptrace_notify(int exit_code
)
1633 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1635 memset(&info
, 0, sizeof info
);
1636 info
.si_signo
= SIGTRAP
;
1637 info
.si_code
= exit_code
;
1638 info
.si_pid
= current
->pid
;
1639 info
.si_uid
= current
->uid
;
1641 /* Let the debugger run. */
1642 spin_lock_irq(¤t
->sighand
->siglock
);
1643 ptrace_stop(exit_code
, 0, &info
);
1644 spin_unlock_irq(¤t
->sighand
->siglock
);
1648 finish_stop(int stop_count
)
1651 * If there are no other threads in the group, or if there is
1652 * a group stop in progress and we are the last to stop,
1653 * report to the parent. When ptraced, every thread reports itself.
1655 if (stop_count
== 0 || (current
->ptrace
& PT_PTRACED
)) {
1656 read_lock(&tasklist_lock
);
1657 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1658 read_unlock(&tasklist_lock
);
1663 } while (try_to_freeze());
1665 * Now we don't run again until continued.
1667 current
->exit_code
= 0;
1671 * This performs the stopping for SIGSTOP and other stop signals.
1672 * We have to stop all threads in the thread group.
1673 * Returns nonzero if we've actually stopped and released the siglock.
1674 * Returns zero if we didn't stop and still hold the siglock.
1676 static int do_signal_stop(int signr
)
1678 struct signal_struct
*sig
= current
->signal
;
1681 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1684 if (sig
->group_stop_count
> 0) {
1686 * There is a group stop in progress. We don't need to
1687 * start another one.
1689 stop_count
= --sig
->group_stop_count
;
1692 * There is no group stop already in progress.
1693 * We must initiate one now.
1695 struct task_struct
*t
;
1697 sig
->group_exit_code
= signr
;
1700 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1702 * Setting state to TASK_STOPPED for a group
1703 * stop is always done with the siglock held,
1704 * so this check has no races.
1706 if (!t
->exit_state
&&
1707 !(t
->state
& (TASK_STOPPED
|TASK_TRACED
))) {
1709 signal_wake_up(t
, 0);
1711 sig
->group_stop_count
= stop_count
;
1714 if (stop_count
== 0)
1715 sig
->flags
= SIGNAL_STOP_STOPPED
;
1716 current
->exit_code
= sig
->group_exit_code
;
1717 __set_current_state(TASK_STOPPED
);
1719 spin_unlock_irq(¤t
->sighand
->siglock
);
1720 finish_stop(stop_count
);
1725 * Do appropriate magic when group_stop_count > 0.
1726 * We return nonzero if we stopped, after releasing the siglock.
1727 * We return zero if we still hold the siglock and should look
1728 * for another signal without checking group_stop_count again.
1730 static int handle_group_stop(void)
1734 if (current
->signal
->group_exit_task
== current
) {
1736 * Group stop is so we can do a core dump,
1737 * We are the initiating thread, so get on with it.
1739 current
->signal
->group_exit_task
= NULL
;
1743 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1745 * Group stop is so another thread can do a core dump,
1746 * or else we are racing against a death signal.
1747 * Just punt the stop so we can get the next signal.
1752 * There is a group stop in progress. We stop
1753 * without any associated signal being in our queue.
1755 stop_count
= --current
->signal
->group_stop_count
;
1756 if (stop_count
== 0)
1757 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1758 current
->exit_code
= current
->signal
->group_exit_code
;
1759 set_current_state(TASK_STOPPED
);
1760 spin_unlock_irq(¤t
->sighand
->siglock
);
1761 finish_stop(stop_count
);
1765 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1766 struct pt_regs
*regs
, void *cookie
)
1768 sigset_t
*mask
= ¤t
->blocked
;
1774 spin_lock_irq(¤t
->sighand
->siglock
);
1776 struct k_sigaction
*ka
;
1778 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1779 handle_group_stop())
1782 signr
= dequeue_signal(current
, mask
, info
);
1785 break; /* will return 0 */
1787 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1788 ptrace_signal_deliver(regs
, cookie
);
1790 /* Let the debugger run. */
1791 ptrace_stop(signr
, signr
, info
);
1793 /* We're back. Did the debugger cancel the sig? */
1794 signr
= current
->exit_code
;
1798 current
->exit_code
= 0;
1800 /* Update the siginfo structure if the signal has
1801 changed. If the debugger wanted something
1802 specific in the siginfo structure then it should
1803 have updated *info via PTRACE_SETSIGINFO. */
1804 if (signr
!= info
->si_signo
) {
1805 info
->si_signo
= signr
;
1807 info
->si_code
= SI_USER
;
1808 info
->si_pid
= current
->parent
->pid
;
1809 info
->si_uid
= current
->parent
->uid
;
1812 /* If the (new) signal is now blocked, requeue it. */
1813 if (sigismember(¤t
->blocked
, signr
)) {
1814 specific_send_sig_info(signr
, info
, current
);
1819 ka
= ¤t
->sighand
->action
[signr
-1];
1820 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1822 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1823 /* Run the handler. */
1826 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1827 ka
->sa
.sa_handler
= SIG_DFL
;
1829 break; /* will return non-zero "signr" value */
1833 * Now we are doing the default action for this signal.
1835 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1839 * Init of a pid space gets no signals it doesn't want from
1840 * within that pid space. It can of course get signals from
1841 * its parent pid space.
1843 if (current
== child_reaper(current
))
1846 if (sig_kernel_stop(signr
)) {
1848 * The default action is to stop all threads in
1849 * the thread group. The job control signals
1850 * do nothing in an orphaned pgrp, but SIGSTOP
1851 * always works. Note that siglock needs to be
1852 * dropped during the call to is_orphaned_pgrp()
1853 * because of lock ordering with tasklist_lock.
1854 * This allows an intervening SIGCONT to be posted.
1855 * We need to check for that and bail out if necessary.
1857 if (signr
!= SIGSTOP
) {
1858 spin_unlock_irq(¤t
->sighand
->siglock
);
1860 /* signals can be posted during this window */
1862 if (is_current_pgrp_orphaned())
1865 spin_lock_irq(¤t
->sighand
->siglock
);
1868 if (likely(do_signal_stop(signr
))) {
1869 /* It released the siglock. */
1874 * We didn't actually stop, due to a race
1875 * with SIGCONT or something like that.
1880 spin_unlock_irq(¤t
->sighand
->siglock
);
1883 * Anything else is fatal, maybe with a core dump.
1885 current
->flags
|= PF_SIGNALED
;
1886 if ((signr
!= SIGKILL
) && print_fatal_signals
)
1887 print_fatal_signal(regs
, signr
);
1888 if (sig_kernel_coredump(signr
)) {
1890 * If it was able to dump core, this kills all
1891 * other threads in the group and synchronizes with
1892 * their demise. If we lost the race with another
1893 * thread getting here, it set group_exit_code
1894 * first and our do_group_exit call below will use
1895 * that value and ignore the one we pass it.
1897 do_coredump((long)signr
, signr
, regs
);
1901 * Death signals, no core dump.
1903 do_group_exit(signr
);
1906 spin_unlock_irq(¤t
->sighand
->siglock
);
1910 EXPORT_SYMBOL(recalc_sigpending
);
1911 EXPORT_SYMBOL_GPL(dequeue_signal
);
1912 EXPORT_SYMBOL(flush_signals
);
1913 EXPORT_SYMBOL(force_sig
);
1914 EXPORT_SYMBOL(kill_proc
);
1915 EXPORT_SYMBOL(ptrace_notify
);
1916 EXPORT_SYMBOL(send_sig
);
1917 EXPORT_SYMBOL(send_sig_info
);
1918 EXPORT_SYMBOL(sigprocmask
);
1919 EXPORT_SYMBOL(block_all_signals
);
1920 EXPORT_SYMBOL(unblock_all_signals
);
1924 * System call entry points.
1927 asmlinkage
long sys_restart_syscall(void)
1929 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1930 return restart
->fn(restart
);
1933 long do_no_restart_syscall(struct restart_block
*param
)
1939 * We don't need to get the kernel lock - this is all local to this
1940 * particular thread.. (and that's good, because this is _heavily_
1941 * used by various programs)
1945 * This is also useful for kernel threads that want to temporarily
1946 * (or permanently) block certain signals.
1948 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1949 * interface happily blocks "unblockable" signals like SIGKILL
1952 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1956 spin_lock_irq(¤t
->sighand
->siglock
);
1958 *oldset
= current
->blocked
;
1963 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1966 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1969 current
->blocked
= *set
;
1974 recalc_sigpending();
1975 spin_unlock_irq(¤t
->sighand
->siglock
);
1981 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1983 int error
= -EINVAL
;
1984 sigset_t old_set
, new_set
;
1986 /* XXX: Don't preclude handling different sized sigset_t's. */
1987 if (sigsetsize
!= sizeof(sigset_t
))
1992 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1994 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1996 error
= sigprocmask(how
, &new_set
, &old_set
);
2002 spin_lock_irq(¤t
->sighand
->siglock
);
2003 old_set
= current
->blocked
;
2004 spin_unlock_irq(¤t
->sighand
->siglock
);
2008 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2016 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2018 long error
= -EINVAL
;
2021 if (sigsetsize
> sizeof(sigset_t
))
2024 spin_lock_irq(¤t
->sighand
->siglock
);
2025 sigorsets(&pending
, ¤t
->pending
.signal
,
2026 ¤t
->signal
->shared_pending
.signal
);
2027 spin_unlock_irq(¤t
->sighand
->siglock
);
2029 /* Outside the lock because only this thread touches it. */
2030 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2033 if (!copy_to_user(set
, &pending
, sigsetsize
))
2041 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2043 return do_sigpending(set
, sigsetsize
);
2046 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2048 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2052 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2054 if (from
->si_code
< 0)
2055 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2058 * If you change siginfo_t structure, please be sure
2059 * this code is fixed accordingly.
2060 * Please remember to update the signalfd_copyinfo() function
2061 * inside fs/signalfd.c too, in case siginfo_t changes.
2062 * It should never copy any pad contained in the structure
2063 * to avoid security leaks, but must copy the generic
2064 * 3 ints plus the relevant union member.
2066 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2067 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2068 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2069 switch (from
->si_code
& __SI_MASK
) {
2071 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2072 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2075 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2076 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2077 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2080 err
|= __put_user(from
->si_band
, &to
->si_band
);
2081 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2084 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2085 #ifdef __ARCH_SI_TRAPNO
2086 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2090 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2091 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2092 err
|= __put_user(from
->si_status
, &to
->si_status
);
2093 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2094 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2096 case __SI_RT
: /* This is not generated by the kernel as of now. */
2097 case __SI_MESGQ
: /* But this is */
2098 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2099 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2100 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2102 default: /* this is just in case for now ... */
2103 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2104 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2113 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2114 siginfo_t __user
*uinfo
,
2115 const struct timespec __user
*uts
,
2124 /* XXX: Don't preclude handling different sized sigset_t's. */
2125 if (sigsetsize
!= sizeof(sigset_t
))
2128 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2132 * Invert the set of allowed signals to get those we
2135 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2139 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2141 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2146 spin_lock_irq(¤t
->sighand
->siglock
);
2147 sig
= dequeue_signal(current
, &these
, &info
);
2149 timeout
= MAX_SCHEDULE_TIMEOUT
;
2151 timeout
= (timespec_to_jiffies(&ts
)
2152 + (ts
.tv_sec
|| ts
.tv_nsec
));
2155 /* None ready -- temporarily unblock those we're
2156 * interested while we are sleeping in so that we'll
2157 * be awakened when they arrive. */
2158 current
->real_blocked
= current
->blocked
;
2159 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2160 recalc_sigpending();
2161 spin_unlock_irq(¤t
->sighand
->siglock
);
2163 timeout
= schedule_timeout_interruptible(timeout
);
2165 spin_lock_irq(¤t
->sighand
->siglock
);
2166 sig
= dequeue_signal(current
, &these
, &info
);
2167 current
->blocked
= current
->real_blocked
;
2168 siginitset(¤t
->real_blocked
, 0);
2169 recalc_sigpending();
2172 spin_unlock_irq(¤t
->sighand
->siglock
);
2177 if (copy_siginfo_to_user(uinfo
, &info
))
2190 sys_kill(int pid
, int sig
)
2192 struct siginfo info
;
2194 info
.si_signo
= sig
;
2196 info
.si_code
= SI_USER
;
2197 info
.si_pid
= current
->tgid
;
2198 info
.si_uid
= current
->uid
;
2200 return kill_something_info(sig
, &info
, pid
);
2203 static int do_tkill(int tgid
, int pid
, int sig
)
2206 struct siginfo info
;
2207 struct task_struct
*p
;
2210 info
.si_signo
= sig
;
2212 info
.si_code
= SI_TKILL
;
2213 info
.si_pid
= current
->tgid
;
2214 info
.si_uid
= current
->uid
;
2216 read_lock(&tasklist_lock
);
2217 p
= find_task_by_pid(pid
);
2218 if (p
&& (tgid
<= 0 || p
->tgid
== tgid
)) {
2219 error
= check_kill_permission(sig
, &info
, p
);
2221 * The null signal is a permissions and process existence
2222 * probe. No signal is actually delivered.
2224 if (!error
&& sig
&& p
->sighand
) {
2225 spin_lock_irq(&p
->sighand
->siglock
);
2226 handle_stop_signal(sig
, p
);
2227 error
= specific_send_sig_info(sig
, &info
, p
);
2228 spin_unlock_irq(&p
->sighand
->siglock
);
2231 read_unlock(&tasklist_lock
);
2237 * sys_tgkill - send signal to one specific thread
2238 * @tgid: the thread group ID of the thread
2239 * @pid: the PID of the thread
2240 * @sig: signal to be sent
2242 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2243 * exists but it's not belonging to the target process anymore. This
2244 * method solves the problem of threads exiting and PIDs getting reused.
2246 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2248 /* This is only valid for single tasks */
2249 if (pid
<= 0 || tgid
<= 0)
2252 return do_tkill(tgid
, pid
, sig
);
2256 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2259 sys_tkill(int pid
, int sig
)
2261 /* This is only valid for single tasks */
2265 return do_tkill(0, pid
, sig
);
2269 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2273 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2276 /* Not even root can pretend to send signals from the kernel.
2277 Nor can they impersonate a kill(), which adds source info. */
2278 if (info
.si_code
>= 0)
2280 info
.si_signo
= sig
;
2282 /* POSIX.1b doesn't mention process groups. */
2283 return kill_proc_info(sig
, &info
, pid
);
2286 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2288 struct k_sigaction
*k
;
2291 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2294 k
= ¤t
->sighand
->action
[sig
-1];
2296 spin_lock_irq(¤t
->sighand
->siglock
);
2301 sigdelsetmask(&act
->sa
.sa_mask
,
2302 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2306 * "Setting a signal action to SIG_IGN for a signal that is
2307 * pending shall cause the pending signal to be discarded,
2308 * whether or not it is blocked."
2310 * "Setting a signal action to SIG_DFL for a signal that is
2311 * pending and whose default action is to ignore the signal
2312 * (for example, SIGCHLD), shall cause the pending signal to
2313 * be discarded, whether or not it is blocked"
2315 if (act
->sa
.sa_handler
== SIG_IGN
||
2316 (act
->sa
.sa_handler
== SIG_DFL
&& sig_kernel_ignore(sig
))) {
2317 struct task_struct
*t
= current
;
2319 sigaddset(&mask
, sig
);
2320 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2322 rm_from_queue_full(&mask
, &t
->pending
);
2324 } while (t
!= current
);
2328 spin_unlock_irq(¤t
->sighand
->siglock
);
2333 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2339 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2340 oss
.ss_size
= current
->sas_ss_size
;
2341 oss
.ss_flags
= sas_ss_flags(sp
);
2350 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2351 || __get_user(ss_sp
, &uss
->ss_sp
)
2352 || __get_user(ss_flags
, &uss
->ss_flags
)
2353 || __get_user(ss_size
, &uss
->ss_size
))
2357 if (on_sig_stack(sp
))
2363 * Note - this code used to test ss_flags incorrectly
2364 * old code may have been written using ss_flags==0
2365 * to mean ss_flags==SS_ONSTACK (as this was the only
2366 * way that worked) - this fix preserves that older
2369 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2372 if (ss_flags
== SS_DISABLE
) {
2377 if (ss_size
< MINSIGSTKSZ
)
2381 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2382 current
->sas_ss_size
= ss_size
;
2387 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2396 #ifdef __ARCH_WANT_SYS_SIGPENDING
2399 sys_sigpending(old_sigset_t __user
*set
)
2401 return do_sigpending(set
, sizeof(*set
));
2406 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2407 /* Some platforms have their own version with special arguments others
2408 support only sys_rt_sigprocmask. */
2411 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2414 old_sigset_t old_set
, new_set
;
2418 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2420 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2422 spin_lock_irq(¤t
->sighand
->siglock
);
2423 old_set
= current
->blocked
.sig
[0];
2431 sigaddsetmask(¤t
->blocked
, new_set
);
2434 sigdelsetmask(¤t
->blocked
, new_set
);
2437 current
->blocked
.sig
[0] = new_set
;
2441 recalc_sigpending();
2442 spin_unlock_irq(¤t
->sighand
->siglock
);
2448 old_set
= current
->blocked
.sig
[0];
2451 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2458 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2460 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2462 sys_rt_sigaction(int sig
,
2463 const struct sigaction __user
*act
,
2464 struct sigaction __user
*oact
,
2467 struct k_sigaction new_sa
, old_sa
;
2470 /* XXX: Don't preclude handling different sized sigset_t's. */
2471 if (sigsetsize
!= sizeof(sigset_t
))
2475 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2479 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2482 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2488 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2490 #ifdef __ARCH_WANT_SYS_SGETMASK
2493 * For backwards compatibility. Functionality superseded by sigprocmask.
2499 return current
->blocked
.sig
[0];
2503 sys_ssetmask(int newmask
)
2507 spin_lock_irq(¤t
->sighand
->siglock
);
2508 old
= current
->blocked
.sig
[0];
2510 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2512 recalc_sigpending();
2513 spin_unlock_irq(¤t
->sighand
->siglock
);
2517 #endif /* __ARCH_WANT_SGETMASK */
2519 #ifdef __ARCH_WANT_SYS_SIGNAL
2521 * For backwards compatibility. Functionality superseded by sigaction.
2523 asmlinkage
unsigned long
2524 sys_signal(int sig
, __sighandler_t handler
)
2526 struct k_sigaction new_sa
, old_sa
;
2529 new_sa
.sa
.sa_handler
= handler
;
2530 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2531 sigemptyset(&new_sa
.sa
.sa_mask
);
2533 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2535 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2537 #endif /* __ARCH_WANT_SYS_SIGNAL */
2539 #ifdef __ARCH_WANT_SYS_PAUSE
2544 current
->state
= TASK_INTERRUPTIBLE
;
2546 return -ERESTARTNOHAND
;
2551 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2552 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2556 /* XXX: Don't preclude handling different sized sigset_t's. */
2557 if (sigsetsize
!= sizeof(sigset_t
))
2560 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2562 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2564 spin_lock_irq(¤t
->sighand
->siglock
);
2565 current
->saved_sigmask
= current
->blocked
;
2566 current
->blocked
= newset
;
2567 recalc_sigpending();
2568 spin_unlock_irq(¤t
->sighand
->siglock
);
2570 current
->state
= TASK_INTERRUPTIBLE
;
2572 set_thread_flag(TIF_RESTORE_SIGMASK
);
2573 return -ERESTARTNOHAND
;
2575 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2577 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2582 void __init
signals_init(void)
2584 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);