2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/capability.h>
25 #include <linux/freezer.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/nsproxy.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
33 #include "audit.h" /* audit_signal_info() */
36 * SLAB caches for signal bits.
39 static struct kmem_cache
*sigqueue_cachep
;
42 static int sig_ignored(struct task_struct
*t
, int sig
)
44 void __user
* handler
;
47 * Tracers always want to know about signals..
49 if (t
->ptrace
& PT_PTRACED
)
53 * Blocked signals are never ignored, since the
54 * signal handler may change by the time it is
57 if (sigismember(&t
->blocked
, sig
))
60 /* Is it explicitly or implicitly ignored? */
61 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
62 return handler
== SIG_IGN
||
63 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
67 * Re-calculate pending state from the set of locally pending
68 * signals, globally pending signals, and blocked signals.
70 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
75 switch (_NSIG_WORDS
) {
77 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
78 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
81 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
82 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
83 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
84 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
87 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
88 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
91 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
96 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 fastcall
void recalc_sigpending_tsk(struct task_struct
*t
)
100 if (t
->signal
->group_stop_count
> 0 ||
102 PENDING(&t
->pending
, &t
->blocked
) ||
103 PENDING(&t
->signal
->shared_pending
, &t
->blocked
))
104 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
106 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
109 void recalc_sigpending(void)
111 recalc_sigpending_tsk(current
);
114 /* Given the mask, find the first available signal that should be serviced. */
117 next_signal(struct sigpending
*pending
, sigset_t
*mask
)
119 unsigned long i
, *s
, *m
, x
;
122 s
= pending
->signal
.sig
;
124 switch (_NSIG_WORDS
) {
126 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
127 if ((x
= *s
&~ *m
) != 0) {
128 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
133 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
135 else if ((x
= s
[1] &~ m
[1]) != 0)
142 case 1: if ((x
= *s
&~ *m
) != 0)
150 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, gfp_t flags
,
153 struct sigqueue
*q
= NULL
;
154 struct user_struct
*user
;
157 * In order to avoid problems with "switch_user()", we want to make
158 * sure that the compiler doesn't re-load "t->user"
162 atomic_inc(&user
->sigpending
);
163 if (override_rlimit
||
164 atomic_read(&user
->sigpending
) <=
165 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
166 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
167 if (unlikely(q
== NULL
)) {
168 atomic_dec(&user
->sigpending
);
170 INIT_LIST_HEAD(&q
->list
);
172 q
->user
= get_uid(user
);
177 static void __sigqueue_free(struct sigqueue
*q
)
179 if (q
->flags
& SIGQUEUE_PREALLOC
)
181 atomic_dec(&q
->user
->sigpending
);
183 kmem_cache_free(sigqueue_cachep
, q
);
186 void flush_sigqueue(struct sigpending
*queue
)
190 sigemptyset(&queue
->signal
);
191 while (!list_empty(&queue
->list
)) {
192 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
193 list_del_init(&q
->list
);
199 * Flush all pending signals for a task.
201 void flush_signals(struct task_struct
*t
)
205 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
206 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
207 flush_sigqueue(&t
->pending
);
208 flush_sigqueue(&t
->signal
->shared_pending
);
209 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
212 void ignore_signals(struct task_struct
*t
)
216 for (i
= 0; i
< _NSIG
; ++i
)
217 t
->sighand
->action
[i
].sa
.sa_handler
= SIG_IGN
;
223 * Flush all handlers for a task.
227 flush_signal_handlers(struct task_struct
*t
, int force_default
)
230 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
231 for (i
= _NSIG
; i
!= 0 ; i
--) {
232 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
233 ka
->sa
.sa_handler
= SIG_DFL
;
235 sigemptyset(&ka
->sa
.sa_mask
);
241 /* Notify the system that a driver wants to block all signals for this
242 * process, and wants to be notified if any signals at all were to be
243 * sent/acted upon. If the notifier routine returns non-zero, then the
244 * signal will be acted upon after all. If the notifier routine returns 0,
245 * then then signal will be blocked. Only one block per process is
246 * allowed. priv is a pointer to private data that the notifier routine
247 * can use to determine if the signal should be blocked or not. */
250 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
254 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
255 current
->notifier_mask
= mask
;
256 current
->notifier_data
= priv
;
257 current
->notifier
= notifier
;
258 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
261 /* Notify the system that blocking has ended. */
264 unblock_all_signals(void)
268 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
269 current
->notifier
= NULL
;
270 current
->notifier_data
= NULL
;
272 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
275 static int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
277 struct sigqueue
*q
, *first
= NULL
;
278 int still_pending
= 0;
280 if (unlikely(!sigismember(&list
->signal
, sig
)))
284 * Collect the siginfo appropriate to this signal. Check if
285 * there is another siginfo for the same signal.
287 list_for_each_entry(q
, &list
->list
, list
) {
288 if (q
->info
.si_signo
== sig
) {
297 list_del_init(&first
->list
);
298 copy_siginfo(info
, &first
->info
);
299 __sigqueue_free(first
);
301 sigdelset(&list
->signal
, sig
);
304 /* Ok, it wasn't in the queue. This must be
305 a fast-pathed signal or we must have been
306 out of queue space. So zero out the info.
308 sigdelset(&list
->signal
, sig
);
309 info
->si_signo
= sig
;
318 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
321 int sig
= next_signal(pending
, mask
);
324 if (current
->notifier
) {
325 if (sigismember(current
->notifier_mask
, sig
)) {
326 if (!(current
->notifier
)(current
->notifier_data
)) {
327 clear_thread_flag(TIF_SIGPENDING
);
333 if (!collect_signal(sig
, pending
, info
))
341 * Dequeue a signal and return the element to the caller, which is
342 * expected to free it.
344 * All callers have to hold the siglock.
346 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
348 int signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
350 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
355 * itimers are process shared and we restart periodic
356 * itimers in the signal delivery path to prevent DoS
357 * attacks in the high resolution timer case. This is
358 * compliant with the old way of self restarting
359 * itimers, as the SIGALRM is a legacy signal and only
360 * queued once. Changing the restart behaviour to
361 * restart the timer in the signal dequeue path is
362 * reducing the timer noise on heavy loaded !highres
365 if (unlikely(signr
== SIGALRM
)) {
366 struct hrtimer
*tmr
= &tsk
->signal
->real_timer
;
368 if (!hrtimer_is_queued(tmr
) &&
369 tsk
->signal
->it_real_incr
.tv64
!= 0) {
370 hrtimer_forward(tmr
, tmr
->base
->get_time(),
371 tsk
->signal
->it_real_incr
);
372 hrtimer_restart(tmr
);
376 recalc_sigpending_tsk(tsk
);
377 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
379 * Set a marker that we have dequeued a stop signal. Our
380 * caller might release the siglock and then the pending
381 * stop signal it is about to process is no longer in the
382 * pending bitmasks, but must still be cleared by a SIGCONT
383 * (and overruled by a SIGKILL). So those cases clear this
384 * shared flag after we've set it. Note that this flag may
385 * remain set after the signal we return is ignored or
386 * handled. That doesn't matter because its only purpose
387 * is to alert stop-signal processing code when another
388 * processor has come along and cleared the flag.
390 if (!(tsk
->signal
->flags
& SIGNAL_GROUP_EXIT
))
391 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
394 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
395 info
->si_sys_private
){
397 * Release the siglock to ensure proper locking order
398 * of timer locks outside of siglocks. Note, we leave
399 * irqs disabled here, since the posix-timers code is
400 * about to disable them again anyway.
402 spin_unlock(&tsk
->sighand
->siglock
);
403 do_schedule_next_timer(info
);
404 spin_lock(&tsk
->sighand
->siglock
);
410 * Tell a process that it has a new active signal..
412 * NOTE! we rely on the previous spin_lock to
413 * lock interrupts for us! We can only be called with
414 * "siglock" held, and the local interrupt must
415 * have been disabled when that got acquired!
417 * No need to set need_resched since signal event passing
418 * goes through ->blocked
420 void signal_wake_up(struct task_struct
*t
, int resume
)
424 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
427 * For SIGKILL, we want to wake it up in the stopped/traced case.
428 * We don't check t->state here because there is a race with it
429 * executing another processor and just now entering stopped state.
430 * By using wake_up_state, we ensure the process will wake up and
431 * handle its death signal.
433 mask
= TASK_INTERRUPTIBLE
;
435 mask
|= TASK_STOPPED
| TASK_TRACED
;
436 if (!wake_up_state(t
, mask
))
441 * Remove signals in mask from the pending set and queue.
442 * Returns 1 if any signals were found.
444 * All callers must be holding the siglock.
446 * This version takes a sigset mask and looks at all signals,
447 * not just those in the first mask word.
449 static int rm_from_queue_full(sigset_t
*mask
, struct sigpending
*s
)
451 struct sigqueue
*q
, *n
;
454 sigandsets(&m
, mask
, &s
->signal
);
455 if (sigisemptyset(&m
))
458 signandsets(&s
->signal
, &s
->signal
, mask
);
459 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
460 if (sigismember(mask
, q
->info
.si_signo
)) {
461 list_del_init(&q
->list
);
468 * Remove signals in mask from the pending set and queue.
469 * Returns 1 if any signals were found.
471 * All callers must be holding the siglock.
473 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
475 struct sigqueue
*q
, *n
;
477 if (!sigtestsetmask(&s
->signal
, mask
))
480 sigdelsetmask(&s
->signal
, mask
);
481 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
482 if (q
->info
.si_signo
< SIGRTMIN
&&
483 (mask
& sigmask(q
->info
.si_signo
))) {
484 list_del_init(&q
->list
);
492 * Bad permissions for sending the signal
494 static int check_kill_permission(int sig
, struct siginfo
*info
,
495 struct task_struct
*t
)
498 if (!valid_signal(sig
))
501 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
502 && ((sig
!= SIGCONT
) ||
503 (process_session(current
) != process_session(t
)))
504 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
505 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
506 && !capable(CAP_KILL
))
509 error
= security_task_kill(t
, info
, sig
, 0);
511 audit_signal_info(sig
, t
); /* Let audit system see the signal */
516 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
);
519 * Handle magic process-wide effects of stop/continue signals.
520 * Unlike the signal actions, these happen immediately at signal-generation
521 * time regardless of blocking, ignoring, or handling. This does the
522 * actual continuing for SIGCONT, but not the actual stopping for stop
523 * signals. The process stop is done as a signal action for SIG_DFL.
525 static void handle_stop_signal(int sig
, struct task_struct
*p
)
527 struct task_struct
*t
;
529 if (p
->signal
->flags
& SIGNAL_GROUP_EXIT
)
531 * The process is in the middle of dying already.
535 if (sig_kernel_stop(sig
)) {
537 * This is a stop signal. Remove SIGCONT from all queues.
539 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
542 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
545 } else if (sig
== SIGCONT
) {
547 * Remove all stop signals from all queues,
548 * and wake all threads.
550 if (unlikely(p
->signal
->group_stop_count
> 0)) {
552 * There was a group stop in progress. We'll
553 * pretend it finished before we got here. We are
554 * obliged to report it to the parent: if the
555 * SIGSTOP happened "after" this SIGCONT, then it
556 * would have cleared this pending SIGCONT. If it
557 * happened "before" this SIGCONT, then the parent
558 * got the SIGCHLD about the stop finishing before
559 * the continue happened. We do the notification
560 * now, and it's as if the stop had finished and
561 * the SIGCHLD was pending on entry to this kill.
563 p
->signal
->group_stop_count
= 0;
564 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
565 spin_unlock(&p
->sighand
->siglock
);
566 do_notify_parent_cldstop(p
, CLD_STOPPED
);
567 spin_lock(&p
->sighand
->siglock
);
569 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
573 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
576 * If there is a handler for SIGCONT, we must make
577 * sure that no thread returns to user mode before
578 * we post the signal, in case it was the only
579 * thread eligible to run the signal handler--then
580 * it must not do anything between resuming and
581 * running the handler. With the TIF_SIGPENDING
582 * flag set, the thread will pause and acquire the
583 * siglock that we hold now and until we've queued
584 * the pending signal.
586 * Wake up the stopped thread _after_ setting
589 state
= TASK_STOPPED
;
590 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
591 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
592 state
|= TASK_INTERRUPTIBLE
;
594 wake_up_state(t
, state
);
599 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
601 * We were in fact stopped, and are now continued.
602 * Notify the parent with CLD_CONTINUED.
604 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
605 p
->signal
->group_exit_code
= 0;
606 spin_unlock(&p
->sighand
->siglock
);
607 do_notify_parent_cldstop(p
, CLD_CONTINUED
);
608 spin_lock(&p
->sighand
->siglock
);
611 * We are not stopped, but there could be a stop
612 * signal in the middle of being processed after
613 * being removed from the queue. Clear that too.
615 p
->signal
->flags
= 0;
617 } else if (sig
== SIGKILL
) {
619 * Make sure that any pending stop signal already dequeued
620 * is undone by the wakeup for SIGKILL.
622 p
->signal
->flags
= 0;
626 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
627 struct sigpending
*signals
)
629 struct sigqueue
* q
= NULL
;
633 * fast-pathed signals for kernel-internal things like SIGSTOP
636 if (info
== SEND_SIG_FORCED
)
639 /* Real-time signals must be queued if sent by sigqueue, or
640 some other real-time mechanism. It is implementation
641 defined whether kill() does so. We attempt to do so, on
642 the principle of least surprise, but since kill is not
643 allowed to fail with EAGAIN when low on memory we just
644 make sure at least one signal gets delivered and don't
645 pass on the info struct. */
647 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
648 (is_si_special(info
) ||
649 info
->si_code
>= 0)));
651 list_add_tail(&q
->list
, &signals
->list
);
652 switch ((unsigned long) info
) {
653 case (unsigned long) SEND_SIG_NOINFO
:
654 q
->info
.si_signo
= sig
;
655 q
->info
.si_errno
= 0;
656 q
->info
.si_code
= SI_USER
;
657 q
->info
.si_pid
= current
->pid
;
658 q
->info
.si_uid
= current
->uid
;
660 case (unsigned long) SEND_SIG_PRIV
:
661 q
->info
.si_signo
= sig
;
662 q
->info
.si_errno
= 0;
663 q
->info
.si_code
= SI_KERNEL
;
668 copy_siginfo(&q
->info
, info
);
671 } else if (!is_si_special(info
)) {
672 if (sig
>= SIGRTMIN
&& info
->si_code
!= SI_USER
)
674 * Queue overflow, abort. We may abort if the signal was rt
675 * and sent by user using something other than kill().
681 sigaddset(&signals
->signal
, sig
);
685 #define LEGACY_QUEUE(sigptr, sig) \
686 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
690 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
694 BUG_ON(!irqs_disabled());
695 assert_spin_locked(&t
->sighand
->siglock
);
697 /* Short-circuit ignored signals. */
698 if (sig_ignored(t
, sig
))
701 /* Support queueing exactly one non-rt signal, so that we
702 can get more detailed information about the cause of
704 if (LEGACY_QUEUE(&t
->pending
, sig
))
707 ret
= send_signal(sig
, info
, t
, &t
->pending
);
708 if (!ret
&& !sigismember(&t
->blocked
, sig
))
709 signal_wake_up(t
, sig
== SIGKILL
);
715 * Force a signal that the process can't ignore: if necessary
716 * we unblock the signal and change any SIG_IGN to SIG_DFL.
718 * Note: If we unblock the signal, we always reset it to SIG_DFL,
719 * since we do not want to have a signal handler that was blocked
720 * be invoked when user space had explicitly blocked it.
722 * We don't want to have recursive SIGSEGV's etc, for example.
725 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
727 unsigned long int flags
;
728 int ret
, blocked
, ignored
;
729 struct k_sigaction
*action
;
731 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
732 action
= &t
->sighand
->action
[sig
-1];
733 ignored
= action
->sa
.sa_handler
== SIG_IGN
;
734 blocked
= sigismember(&t
->blocked
, sig
);
735 if (blocked
|| ignored
) {
736 action
->sa
.sa_handler
= SIG_DFL
;
738 sigdelset(&t
->blocked
, sig
);
739 recalc_sigpending_tsk(t
);
742 ret
= specific_send_sig_info(sig
, info
, t
);
743 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
749 force_sig_specific(int sig
, struct task_struct
*t
)
751 force_sig_info(sig
, SEND_SIG_FORCED
, t
);
755 * Test if P wants to take SIG. After we've checked all threads with this,
756 * it's equivalent to finding no threads not blocking SIG. Any threads not
757 * blocking SIG were ruled out because they are not running and already
758 * have pending signals. Such threads will dequeue from the shared queue
759 * as soon as they're available, so putting the signal on the shared queue
760 * will be equivalent to sending it to one such thread.
762 static inline int wants_signal(int sig
, struct task_struct
*p
)
764 if (sigismember(&p
->blocked
, sig
))
766 if (p
->flags
& PF_EXITING
)
770 if (p
->state
& (TASK_STOPPED
| TASK_TRACED
))
772 return task_curr(p
) || !signal_pending(p
);
776 __group_complete_signal(int sig
, struct task_struct
*p
)
778 struct task_struct
*t
;
781 * Now find a thread we can wake up to take the signal off the queue.
783 * If the main thread wants the signal, it gets first crack.
784 * Probably the least surprising to the average bear.
786 if (wants_signal(sig
, p
))
788 else if (thread_group_empty(p
))
790 * There is just one thread and it does not need to be woken.
791 * It will dequeue unblocked signals before it runs again.
796 * Otherwise try to find a suitable thread.
798 t
= p
->signal
->curr_target
;
800 /* restart balancing at this thread */
801 t
= p
->signal
->curr_target
= p
;
803 while (!wants_signal(sig
, t
)) {
805 if (t
== p
->signal
->curr_target
)
807 * No thread needs to be woken.
808 * Any eligible threads will see
809 * the signal in the queue soon.
813 p
->signal
->curr_target
= t
;
817 * Found a killable thread. If the signal will be fatal,
818 * then start taking the whole group down immediately.
820 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
821 !sigismember(&t
->real_blocked
, sig
) &&
822 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
824 * This signal will be fatal to the whole group.
826 if (!sig_kernel_coredump(sig
)) {
828 * Start a group exit and wake everybody up.
829 * This way we don't have other threads
830 * running and doing things after a slower
831 * thread has the fatal signal pending.
833 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
834 p
->signal
->group_exit_code
= sig
;
835 p
->signal
->group_stop_count
= 0;
838 sigaddset(&t
->pending
.signal
, SIGKILL
);
839 signal_wake_up(t
, 1);
846 * There will be a core dump. We make all threads other
847 * than the chosen one go into a group stop so that nothing
848 * happens until it gets scheduled, takes the signal off
849 * the shared queue, and does the core dump. This is a
850 * little more complicated than strictly necessary, but it
851 * keeps the signal state that winds up in the core dump
852 * unchanged from the death state, e.g. which thread had
853 * the core-dump signal unblocked.
855 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
856 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
857 p
->signal
->group_stop_count
= 0;
858 p
->signal
->group_exit_task
= t
;
861 p
->signal
->group_stop_count
++;
862 signal_wake_up(t
, 0);
865 wake_up_process(p
->signal
->group_exit_task
);
870 * The signal is already in the shared-pending queue.
871 * Tell the chosen thread to wake up and dequeue it.
873 signal_wake_up(t
, sig
== SIGKILL
);
878 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
882 assert_spin_locked(&p
->sighand
->siglock
);
883 handle_stop_signal(sig
, p
);
885 /* Short-circuit ignored signals. */
886 if (sig_ignored(p
, sig
))
889 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
890 /* This is a non-RT signal and we already have one queued. */
894 * Put this signal on the shared-pending queue, or fail with EAGAIN.
895 * We always use the shared queue for process-wide signals,
896 * to avoid several races.
898 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
902 __group_complete_signal(sig
, p
);
907 * Nuke all other threads in the group.
909 void zap_other_threads(struct task_struct
*p
)
911 struct task_struct
*t
;
913 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
914 p
->signal
->group_stop_count
= 0;
916 if (thread_group_empty(p
))
919 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
921 * Don't bother with already dead threads
926 /* SIGKILL will be handled before any pending SIGSTOP */
927 sigaddset(&t
->pending
.signal
, SIGKILL
);
928 signal_wake_up(t
, 1);
933 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
935 struct sighand_struct
*lock_task_sighand(struct task_struct
*tsk
, unsigned long *flags
)
937 struct sighand_struct
*sighand
;
940 sighand
= rcu_dereference(tsk
->sighand
);
941 if (unlikely(sighand
== NULL
))
944 spin_lock_irqsave(&sighand
->siglock
, *flags
);
945 if (likely(sighand
== tsk
->sighand
))
947 spin_unlock_irqrestore(&sighand
->siglock
, *flags
);
953 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
958 ret
= check_kill_permission(sig
, info
, p
);
962 if (lock_task_sighand(p
, &flags
)) {
963 ret
= __group_send_sig_info(sig
, info
, p
);
964 unlock_task_sighand(p
, &flags
);
972 * kill_pgrp_info() sends a signal to a process group: this is what the tty
973 * control characters do (^C, ^Z etc)
976 int __kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
978 struct task_struct
*p
= NULL
;
983 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
984 int err
= group_send_sig_info(sig
, info
, p
);
987 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
988 return success
? 0 : retval
;
991 int kill_pgrp_info(int sig
, struct siginfo
*info
, struct pid
*pgrp
)
995 read_lock(&tasklist_lock
);
996 retval
= __kill_pgrp_info(sig
, info
, pgrp
);
997 read_unlock(&tasklist_lock
);
1002 int kill_pid_info(int sig
, struct siginfo
*info
, struct pid
*pid
)
1005 struct task_struct
*p
;
1008 if (unlikely(sig_needs_tasklist(sig
)))
1009 read_lock(&tasklist_lock
);
1011 p
= pid_task(pid
, PIDTYPE_PID
);
1014 error
= group_send_sig_info(sig
, info
, p
);
1016 if (unlikely(sig_needs_tasklist(sig
)))
1017 read_unlock(&tasklist_lock
);
1023 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1027 error
= kill_pid_info(sig
, info
, find_pid(pid
));
1032 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1033 int kill_pid_info_as_uid(int sig
, struct siginfo
*info
, struct pid
*pid
,
1034 uid_t uid
, uid_t euid
, u32 secid
)
1037 struct task_struct
*p
;
1039 if (!valid_signal(sig
))
1042 read_lock(&tasklist_lock
);
1043 p
= pid_task(pid
, PIDTYPE_PID
);
1048 if ((info
== SEND_SIG_NOINFO
|| (!is_si_special(info
) && SI_FROMUSER(info
)))
1049 && (euid
!= p
->suid
) && (euid
!= p
->uid
)
1050 && (uid
!= p
->suid
) && (uid
!= p
->uid
)) {
1054 ret
= security_task_kill(p
, info
, sig
, secid
);
1057 if (sig
&& p
->sighand
) {
1058 unsigned long flags
;
1059 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1060 ret
= __group_send_sig_info(sig
, info
, p
);
1061 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1064 read_unlock(&tasklist_lock
);
1067 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid
);
1070 * kill_something_info() interprets pid in interesting ways just like kill(2).
1072 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1073 * is probably wrong. Should make it like BSD or SYSV.
1076 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1081 ret
= kill_pgrp_info(sig
, info
, task_pgrp(current
));
1082 } else if (pid
== -1) {
1083 int retval
= 0, count
= 0;
1084 struct task_struct
* p
;
1086 read_lock(&tasklist_lock
);
1087 for_each_process(p
) {
1088 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1089 int err
= group_send_sig_info(sig
, info
, p
);
1095 read_unlock(&tasklist_lock
);
1096 ret
= count
? retval
: -ESRCH
;
1097 } else if (pid
< 0) {
1098 ret
= kill_pgrp_info(sig
, info
, find_pid(-pid
));
1100 ret
= kill_pid_info(sig
, info
, find_pid(pid
));
1107 * These are for backward compatibility with the rest of the kernel source.
1111 * These two are the most common entry points. They send a signal
1112 * just to the specific thread.
1115 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1118 unsigned long flags
;
1121 * Make sure legacy kernel users don't send in bad values
1122 * (normal paths check this in check_kill_permission).
1124 if (!valid_signal(sig
))
1128 * We need the tasklist lock even for the specific
1129 * thread case (when we don't need to follow the group
1130 * lists) in order to avoid races with "p->sighand"
1131 * going away or changing from under us.
1133 read_lock(&tasklist_lock
);
1134 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1135 ret
= specific_send_sig_info(sig
, info
, p
);
1136 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1137 read_unlock(&tasklist_lock
);
1141 #define __si_special(priv) \
1142 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1145 send_sig(int sig
, struct task_struct
*p
, int priv
)
1147 return send_sig_info(sig
, __si_special(priv
), p
);
1151 * This is the entry point for "process-wide" signals.
1152 * They will go to an appropriate thread in the thread group.
1155 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1158 read_lock(&tasklist_lock
);
1159 ret
= group_send_sig_info(sig
, info
, p
);
1160 read_unlock(&tasklist_lock
);
1165 force_sig(int sig
, struct task_struct
*p
)
1167 force_sig_info(sig
, SEND_SIG_PRIV
, p
);
1171 * When things go south during signal handling, we
1172 * will force a SIGSEGV. And if the signal that caused
1173 * the problem was already a SIGSEGV, we'll want to
1174 * make sure we don't even try to deliver the signal..
1177 force_sigsegv(int sig
, struct task_struct
*p
)
1179 if (sig
== SIGSEGV
) {
1180 unsigned long flags
;
1181 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1182 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1183 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1185 force_sig(SIGSEGV
, p
);
1189 int kill_pgrp(struct pid
*pid
, int sig
, int priv
)
1191 return kill_pgrp_info(sig
, __si_special(priv
), pid
);
1193 EXPORT_SYMBOL(kill_pgrp
);
1195 int kill_pid(struct pid
*pid
, int sig
, int priv
)
1197 return kill_pid_info(sig
, __si_special(priv
), pid
);
1199 EXPORT_SYMBOL(kill_pid
);
1202 kill_proc(pid_t pid
, int sig
, int priv
)
1204 return kill_proc_info(sig
, __si_special(priv
), pid
);
1208 * These functions support sending signals using preallocated sigqueue
1209 * structures. This is needed "because realtime applications cannot
1210 * afford to lose notifications of asynchronous events, like timer
1211 * expirations or I/O completions". In the case of Posix Timers
1212 * we allocate the sigqueue structure from the timer_create. If this
1213 * allocation fails we are able to report the failure to the application
1214 * with an EAGAIN error.
1217 struct sigqueue
*sigqueue_alloc(void)
1221 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1222 q
->flags
|= SIGQUEUE_PREALLOC
;
1226 void sigqueue_free(struct sigqueue
*q
)
1228 unsigned long flags
;
1229 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1231 * If the signal is still pending remove it from the
1234 if (unlikely(!list_empty(&q
->list
))) {
1235 spinlock_t
*lock
= ¤t
->sighand
->siglock
;
1236 read_lock(&tasklist_lock
);
1237 spin_lock_irqsave(lock
, flags
);
1238 if (!list_empty(&q
->list
))
1239 list_del_init(&q
->list
);
1240 spin_unlock_irqrestore(lock
, flags
);
1241 read_unlock(&tasklist_lock
);
1243 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1247 int send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1249 unsigned long flags
;
1252 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1255 * The rcu based delayed sighand destroy makes it possible to
1256 * run this without tasklist lock held. The task struct itself
1257 * cannot go away as create_timer did get_task_struct().
1259 * We return -1, when the task is marked exiting, so
1260 * posix_timer_event can redirect it to the group leader
1264 if (!likely(lock_task_sighand(p
, &flags
))) {
1269 if (unlikely(!list_empty(&q
->list
))) {
1271 * If an SI_TIMER entry is already queue just increment
1272 * the overrun count.
1274 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1275 q
->info
.si_overrun
++;
1278 /* Short-circuit ignored signals. */
1279 if (sig_ignored(p
, sig
)) {
1284 list_add_tail(&q
->list
, &p
->pending
.list
);
1285 sigaddset(&p
->pending
.signal
, sig
);
1286 if (!sigismember(&p
->blocked
, sig
))
1287 signal_wake_up(p
, sig
== SIGKILL
);
1290 unlock_task_sighand(p
, &flags
);
1298 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1300 unsigned long flags
;
1303 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1305 read_lock(&tasklist_lock
);
1306 /* Since it_lock is held, p->sighand cannot be NULL. */
1307 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1308 handle_stop_signal(sig
, p
);
1310 /* Short-circuit ignored signals. */
1311 if (sig_ignored(p
, sig
)) {
1316 if (unlikely(!list_empty(&q
->list
))) {
1318 * If an SI_TIMER entry is already queue just increment
1319 * the overrun count. Other uses should not try to
1320 * send the signal multiple times.
1322 BUG_ON(q
->info
.si_code
!= SI_TIMER
);
1323 q
->info
.si_overrun
++;
1328 * Put this signal on the shared-pending queue.
1329 * We always use the shared queue for process-wide signals,
1330 * to avoid several races.
1332 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1333 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1335 __group_complete_signal(sig
, p
);
1337 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1338 read_unlock(&tasklist_lock
);
1343 * Wake up any threads in the parent blocked in wait* syscalls.
1345 static inline void __wake_up_parent(struct task_struct
*p
,
1346 struct task_struct
*parent
)
1348 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1352 * Let a parent know about the death of a child.
1353 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1356 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1358 struct siginfo info
;
1359 unsigned long flags
;
1360 struct sighand_struct
*psig
;
1364 /* do_notify_parent_cldstop should have been called instead. */
1365 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1367 BUG_ON(!tsk
->ptrace
&&
1368 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1370 info
.si_signo
= sig
;
1372 info
.si_pid
= tsk
->pid
;
1373 info
.si_uid
= tsk
->uid
;
1375 /* FIXME: find out whether or not this is supposed to be c*time. */
1376 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1377 tsk
->signal
->utime
));
1378 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1379 tsk
->signal
->stime
));
1381 info
.si_status
= tsk
->exit_code
& 0x7f;
1382 if (tsk
->exit_code
& 0x80)
1383 info
.si_code
= CLD_DUMPED
;
1384 else if (tsk
->exit_code
& 0x7f)
1385 info
.si_code
= CLD_KILLED
;
1387 info
.si_code
= CLD_EXITED
;
1388 info
.si_status
= tsk
->exit_code
>> 8;
1391 psig
= tsk
->parent
->sighand
;
1392 spin_lock_irqsave(&psig
->siglock
, flags
);
1393 if (!tsk
->ptrace
&& sig
== SIGCHLD
&&
1394 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1395 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1397 * We are exiting and our parent doesn't care. POSIX.1
1398 * defines special semantics for setting SIGCHLD to SIG_IGN
1399 * or setting the SA_NOCLDWAIT flag: we should be reaped
1400 * automatically and not left for our parent's wait4 call.
1401 * Rather than having the parent do it as a magic kind of
1402 * signal handler, we just set this to tell do_exit that we
1403 * can be cleaned up without becoming a zombie. Note that
1404 * we still call __wake_up_parent in this case, because a
1405 * blocked sys_wait4 might now return -ECHILD.
1407 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1408 * is implementation-defined: we do (if you don't want
1409 * it, just use SIG_IGN instead).
1411 tsk
->exit_signal
= -1;
1412 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1415 if (valid_signal(sig
) && sig
> 0)
1416 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1417 __wake_up_parent(tsk
, tsk
->parent
);
1418 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1421 static void do_notify_parent_cldstop(struct task_struct
*tsk
, int why
)
1423 struct siginfo info
;
1424 unsigned long flags
;
1425 struct task_struct
*parent
;
1426 struct sighand_struct
*sighand
;
1428 if (tsk
->ptrace
& PT_PTRACED
)
1429 parent
= tsk
->parent
;
1431 tsk
= tsk
->group_leader
;
1432 parent
= tsk
->real_parent
;
1435 info
.si_signo
= SIGCHLD
;
1437 info
.si_pid
= tsk
->pid
;
1438 info
.si_uid
= tsk
->uid
;
1440 /* FIXME: find out whether or not this is supposed to be c*time. */
1441 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1442 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1447 info
.si_status
= SIGCONT
;
1450 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1453 info
.si_status
= tsk
->exit_code
& 0x7f;
1459 sighand
= parent
->sighand
;
1460 spin_lock_irqsave(&sighand
->siglock
, flags
);
1461 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1462 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1463 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1465 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1467 __wake_up_parent(tsk
, parent
);
1468 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1471 static inline int may_ptrace_stop(void)
1473 if (!likely(current
->ptrace
& PT_PTRACED
))
1476 if (unlikely(current
->parent
== current
->real_parent
&&
1477 (current
->ptrace
& PT_ATTACHED
)))
1480 if (unlikely(current
->signal
== current
->parent
->signal
) &&
1481 unlikely(current
->signal
->flags
& SIGNAL_GROUP_EXIT
))
1485 * Are we in the middle of do_coredump?
1486 * If so and our tracer is also part of the coredump stopping
1487 * is a deadlock situation, and pointless because our tracer
1488 * is dead so don't allow us to stop.
1489 * If SIGKILL was already sent before the caller unlocked
1490 * ->siglock we must see ->core_waiters != 0. Otherwise it
1491 * is safe to enter schedule().
1493 if (unlikely(current
->mm
->core_waiters
) &&
1494 unlikely(current
->mm
== current
->parent
->mm
))
1501 * This must be called with current->sighand->siglock held.
1503 * This should be the path for all ptrace stops.
1504 * We always set current->last_siginfo while stopped here.
1505 * That makes it a way to test a stopped process for
1506 * being ptrace-stopped vs being job-control-stopped.
1508 * If we actually decide not to stop at all because the tracer is gone,
1509 * we leave nostop_code in current->exit_code.
1511 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1514 * If there is a group stop in progress,
1515 * we must participate in the bookkeeping.
1517 if (current
->signal
->group_stop_count
> 0)
1518 --current
->signal
->group_stop_count
;
1520 current
->last_siginfo
= info
;
1521 current
->exit_code
= exit_code
;
1523 /* Let the debugger run. */
1524 set_current_state(TASK_TRACED
);
1525 spin_unlock_irq(¤t
->sighand
->siglock
);
1527 read_lock(&tasklist_lock
);
1528 if (may_ptrace_stop()) {
1529 do_notify_parent_cldstop(current
, CLD_TRAPPED
);
1530 read_unlock(&tasklist_lock
);
1534 * By the time we got the lock, our tracer went away.
1537 read_unlock(&tasklist_lock
);
1538 set_current_state(TASK_RUNNING
);
1539 current
->exit_code
= nostop_code
;
1543 * We are back. Now reacquire the siglock before touching
1544 * last_siginfo, so that we are sure to have synchronized with
1545 * any signal-sending on another CPU that wants to examine it.
1547 spin_lock_irq(¤t
->sighand
->siglock
);
1548 current
->last_siginfo
= NULL
;
1551 * Queued signals ignored us while we were stopped for tracing.
1552 * So check for any that we should take before resuming user mode.
1554 recalc_sigpending();
1557 void ptrace_notify(int exit_code
)
1561 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1563 memset(&info
, 0, sizeof info
);
1564 info
.si_signo
= SIGTRAP
;
1565 info
.si_code
= exit_code
;
1566 info
.si_pid
= current
->pid
;
1567 info
.si_uid
= current
->uid
;
1569 /* Let the debugger run. */
1570 spin_lock_irq(¤t
->sighand
->siglock
);
1571 ptrace_stop(exit_code
, 0, &info
);
1572 spin_unlock_irq(¤t
->sighand
->siglock
);
1576 finish_stop(int stop_count
)
1579 * If there are no other threads in the group, or if there is
1580 * a group stop in progress and we are the last to stop,
1581 * report to the parent. When ptraced, every thread reports itself.
1583 if (stop_count
== 0 || (current
->ptrace
& PT_PTRACED
)) {
1584 read_lock(&tasklist_lock
);
1585 do_notify_parent_cldstop(current
, CLD_STOPPED
);
1586 read_unlock(&tasklist_lock
);
1591 } while (try_to_freeze());
1593 * Now we don't run again until continued.
1595 current
->exit_code
= 0;
1599 * This performs the stopping for SIGSTOP and other stop signals.
1600 * We have to stop all threads in the thread group.
1601 * Returns nonzero if we've actually stopped and released the siglock.
1602 * Returns zero if we didn't stop and still hold the siglock.
1604 static int do_signal_stop(int signr
)
1606 struct signal_struct
*sig
= current
->signal
;
1609 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1612 if (sig
->group_stop_count
> 0) {
1614 * There is a group stop in progress. We don't need to
1615 * start another one.
1617 stop_count
= --sig
->group_stop_count
;
1620 * There is no group stop already in progress.
1621 * We must initiate one now.
1623 struct task_struct
*t
;
1625 sig
->group_exit_code
= signr
;
1628 for (t
= next_thread(current
); t
!= current
; t
= next_thread(t
))
1630 * Setting state to TASK_STOPPED for a group
1631 * stop is always done with the siglock held,
1632 * so this check has no races.
1634 if (!t
->exit_state
&&
1635 !(t
->state
& (TASK_STOPPED
|TASK_TRACED
))) {
1637 signal_wake_up(t
, 0);
1639 sig
->group_stop_count
= stop_count
;
1642 if (stop_count
== 0)
1643 sig
->flags
= SIGNAL_STOP_STOPPED
;
1644 current
->exit_code
= sig
->group_exit_code
;
1645 __set_current_state(TASK_STOPPED
);
1647 spin_unlock_irq(¤t
->sighand
->siglock
);
1648 finish_stop(stop_count
);
1653 * Do appropriate magic when group_stop_count > 0.
1654 * We return nonzero if we stopped, after releasing the siglock.
1655 * We return zero if we still hold the siglock and should look
1656 * for another signal without checking group_stop_count again.
1658 static int handle_group_stop(void)
1662 if (current
->signal
->group_exit_task
== current
) {
1664 * Group stop is so we can do a core dump,
1665 * We are the initiating thread, so get on with it.
1667 current
->signal
->group_exit_task
= NULL
;
1671 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1673 * Group stop is so another thread can do a core dump,
1674 * or else we are racing against a death signal.
1675 * Just punt the stop so we can get the next signal.
1680 * There is a group stop in progress. We stop
1681 * without any associated signal being in our queue.
1683 stop_count
= --current
->signal
->group_stop_count
;
1684 if (stop_count
== 0)
1685 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1686 current
->exit_code
= current
->signal
->group_exit_code
;
1687 set_current_state(TASK_STOPPED
);
1688 spin_unlock_irq(¤t
->sighand
->siglock
);
1689 finish_stop(stop_count
);
1693 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1694 struct pt_regs
*regs
, void *cookie
)
1696 sigset_t
*mask
= ¤t
->blocked
;
1702 spin_lock_irq(¤t
->sighand
->siglock
);
1704 struct k_sigaction
*ka
;
1706 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1707 handle_group_stop())
1710 signr
= dequeue_signal(current
, mask
, info
);
1713 break; /* will return 0 */
1715 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1716 ptrace_signal_deliver(regs
, cookie
);
1718 /* Let the debugger run. */
1719 ptrace_stop(signr
, signr
, info
);
1721 /* We're back. Did the debugger cancel the sig? */
1722 signr
= current
->exit_code
;
1726 current
->exit_code
= 0;
1728 /* Update the siginfo structure if the signal has
1729 changed. If the debugger wanted something
1730 specific in the siginfo structure then it should
1731 have updated *info via PTRACE_SETSIGINFO. */
1732 if (signr
!= info
->si_signo
) {
1733 info
->si_signo
= signr
;
1735 info
->si_code
= SI_USER
;
1736 info
->si_pid
= current
->parent
->pid
;
1737 info
->si_uid
= current
->parent
->uid
;
1740 /* If the (new) signal is now blocked, requeue it. */
1741 if (sigismember(¤t
->blocked
, signr
)) {
1742 specific_send_sig_info(signr
, info
, current
);
1747 ka
= ¤t
->sighand
->action
[signr
-1];
1748 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1750 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1751 /* Run the handler. */
1754 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1755 ka
->sa
.sa_handler
= SIG_DFL
;
1757 break; /* will return non-zero "signr" value */
1761 * Now we are doing the default action for this signal.
1763 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1767 * Init of a pid space gets no signals it doesn't want from
1768 * within that pid space. It can of course get signals from
1769 * its parent pid space.
1771 if (current
== child_reaper(current
))
1774 if (sig_kernel_stop(signr
)) {
1776 * The default action is to stop all threads in
1777 * the thread group. The job control signals
1778 * do nothing in an orphaned pgrp, but SIGSTOP
1779 * always works. Note that siglock needs to be
1780 * dropped during the call to is_orphaned_pgrp()
1781 * because of lock ordering with tasklist_lock.
1782 * This allows an intervening SIGCONT to be posted.
1783 * We need to check for that and bail out if necessary.
1785 if (signr
!= SIGSTOP
) {
1786 spin_unlock_irq(¤t
->sighand
->siglock
);
1788 /* signals can be posted during this window */
1790 if (is_current_pgrp_orphaned())
1793 spin_lock_irq(¤t
->sighand
->siglock
);
1796 if (likely(do_signal_stop(signr
))) {
1797 /* It released the siglock. */
1802 * We didn't actually stop, due to a race
1803 * with SIGCONT or something like that.
1808 spin_unlock_irq(¤t
->sighand
->siglock
);
1811 * Anything else is fatal, maybe with a core dump.
1813 current
->flags
|= PF_SIGNALED
;
1814 if (sig_kernel_coredump(signr
)) {
1816 * If it was able to dump core, this kills all
1817 * other threads in the group and synchronizes with
1818 * their demise. If we lost the race with another
1819 * thread getting here, it set group_exit_code
1820 * first and our do_group_exit call below will use
1821 * that value and ignore the one we pass it.
1823 do_coredump((long)signr
, signr
, regs
);
1827 * Death signals, no core dump.
1829 do_group_exit(signr
);
1832 spin_unlock_irq(¤t
->sighand
->siglock
);
1836 EXPORT_SYMBOL(recalc_sigpending
);
1837 EXPORT_SYMBOL_GPL(dequeue_signal
);
1838 EXPORT_SYMBOL(flush_signals
);
1839 EXPORT_SYMBOL(force_sig
);
1840 EXPORT_SYMBOL(kill_proc
);
1841 EXPORT_SYMBOL(ptrace_notify
);
1842 EXPORT_SYMBOL(send_sig
);
1843 EXPORT_SYMBOL(send_sig_info
);
1844 EXPORT_SYMBOL(sigprocmask
);
1845 EXPORT_SYMBOL(block_all_signals
);
1846 EXPORT_SYMBOL(unblock_all_signals
);
1850 * System call entry points.
1853 asmlinkage
long sys_restart_syscall(void)
1855 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1856 return restart
->fn(restart
);
1859 long do_no_restart_syscall(struct restart_block
*param
)
1865 * We don't need to get the kernel lock - this is all local to this
1866 * particular thread.. (and that's good, because this is _heavily_
1867 * used by various programs)
1871 * This is also useful for kernel threads that want to temporarily
1872 * (or permanently) block certain signals.
1874 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1875 * interface happily blocks "unblockable" signals like SIGKILL
1878 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
1882 spin_lock_irq(¤t
->sighand
->siglock
);
1884 *oldset
= current
->blocked
;
1889 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
1892 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
1895 current
->blocked
= *set
;
1900 recalc_sigpending();
1901 spin_unlock_irq(¤t
->sighand
->siglock
);
1907 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
1909 int error
= -EINVAL
;
1910 sigset_t old_set
, new_set
;
1912 /* XXX: Don't preclude handling different sized sigset_t's. */
1913 if (sigsetsize
!= sizeof(sigset_t
))
1918 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
1920 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1922 error
= sigprocmask(how
, &new_set
, &old_set
);
1928 spin_lock_irq(¤t
->sighand
->siglock
);
1929 old_set
= current
->blocked
;
1930 spin_unlock_irq(¤t
->sighand
->siglock
);
1934 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
1942 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
1944 long error
= -EINVAL
;
1947 if (sigsetsize
> sizeof(sigset_t
))
1950 spin_lock_irq(¤t
->sighand
->siglock
);
1951 sigorsets(&pending
, ¤t
->pending
.signal
,
1952 ¤t
->signal
->shared_pending
.signal
);
1953 spin_unlock_irq(¤t
->sighand
->siglock
);
1955 /* Outside the lock because only this thread touches it. */
1956 sigandsets(&pending
, ¤t
->blocked
, &pending
);
1959 if (!copy_to_user(set
, &pending
, sigsetsize
))
1967 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
1969 return do_sigpending(set
, sigsetsize
);
1972 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1974 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
1978 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
1980 if (from
->si_code
< 0)
1981 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
1984 * If you change siginfo_t structure, please be sure
1985 * this code is fixed accordingly.
1986 * It should never copy any pad contained in the structure
1987 * to avoid security leaks, but must copy the generic
1988 * 3 ints plus the relevant union member.
1990 err
= __put_user(from
->si_signo
, &to
->si_signo
);
1991 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
1992 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
1993 switch (from
->si_code
& __SI_MASK
) {
1995 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
1996 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
1999 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2000 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2001 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2004 err
|= __put_user(from
->si_band
, &to
->si_band
);
2005 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2008 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2009 #ifdef __ARCH_SI_TRAPNO
2010 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2014 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2015 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2016 err
|= __put_user(from
->si_status
, &to
->si_status
);
2017 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2018 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2020 case __SI_RT
: /* This is not generated by the kernel as of now. */
2021 case __SI_MESGQ
: /* But this is */
2022 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2023 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2024 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2026 default: /* this is just in case for now ... */
2027 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2028 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2037 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2038 siginfo_t __user
*uinfo
,
2039 const struct timespec __user
*uts
,
2048 /* XXX: Don't preclude handling different sized sigset_t's. */
2049 if (sigsetsize
!= sizeof(sigset_t
))
2052 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2056 * Invert the set of allowed signals to get those we
2059 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2063 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2065 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2070 spin_lock_irq(¤t
->sighand
->siglock
);
2071 sig
= dequeue_signal(current
, &these
, &info
);
2073 timeout
= MAX_SCHEDULE_TIMEOUT
;
2075 timeout
= (timespec_to_jiffies(&ts
)
2076 + (ts
.tv_sec
|| ts
.tv_nsec
));
2079 /* None ready -- temporarily unblock those we're
2080 * interested while we are sleeping in so that we'll
2081 * be awakened when they arrive. */
2082 current
->real_blocked
= current
->blocked
;
2083 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2084 recalc_sigpending();
2085 spin_unlock_irq(¤t
->sighand
->siglock
);
2087 timeout
= schedule_timeout_interruptible(timeout
);
2089 spin_lock_irq(¤t
->sighand
->siglock
);
2090 sig
= dequeue_signal(current
, &these
, &info
);
2091 current
->blocked
= current
->real_blocked
;
2092 siginitset(¤t
->real_blocked
, 0);
2093 recalc_sigpending();
2096 spin_unlock_irq(¤t
->sighand
->siglock
);
2101 if (copy_siginfo_to_user(uinfo
, &info
))
2114 sys_kill(int pid
, int sig
)
2116 struct siginfo info
;
2118 info
.si_signo
= sig
;
2120 info
.si_code
= SI_USER
;
2121 info
.si_pid
= current
->tgid
;
2122 info
.si_uid
= current
->uid
;
2124 return kill_something_info(sig
, &info
, pid
);
2127 static int do_tkill(int tgid
, int pid
, int sig
)
2130 struct siginfo info
;
2131 struct task_struct
*p
;
2134 info
.si_signo
= sig
;
2136 info
.si_code
= SI_TKILL
;
2137 info
.si_pid
= current
->tgid
;
2138 info
.si_uid
= current
->uid
;
2140 read_lock(&tasklist_lock
);
2141 p
= find_task_by_pid(pid
);
2142 if (p
&& (tgid
<= 0 || p
->tgid
== tgid
)) {
2143 error
= check_kill_permission(sig
, &info
, p
);
2145 * The null signal is a permissions and process existence
2146 * probe. No signal is actually delivered.
2148 if (!error
&& sig
&& p
->sighand
) {
2149 spin_lock_irq(&p
->sighand
->siglock
);
2150 handle_stop_signal(sig
, p
);
2151 error
= specific_send_sig_info(sig
, &info
, p
);
2152 spin_unlock_irq(&p
->sighand
->siglock
);
2155 read_unlock(&tasklist_lock
);
2161 * sys_tgkill - send signal to one specific thread
2162 * @tgid: the thread group ID of the thread
2163 * @pid: the PID of the thread
2164 * @sig: signal to be sent
2166 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2167 * exists but it's not belonging to the target process anymore. This
2168 * method solves the problem of threads exiting and PIDs getting reused.
2170 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2172 /* This is only valid for single tasks */
2173 if (pid
<= 0 || tgid
<= 0)
2176 return do_tkill(tgid
, pid
, sig
);
2180 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2183 sys_tkill(int pid
, int sig
)
2185 /* This is only valid for single tasks */
2189 return do_tkill(0, pid
, sig
);
2193 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2197 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2200 /* Not even root can pretend to send signals from the kernel.
2201 Nor can they impersonate a kill(), which adds source info. */
2202 if (info
.si_code
>= 0)
2204 info
.si_signo
= sig
;
2206 /* POSIX.1b doesn't mention process groups. */
2207 return kill_proc_info(sig
, &info
, pid
);
2210 int do_sigaction(int sig
, struct k_sigaction
*act
, struct k_sigaction
*oact
)
2212 struct k_sigaction
*k
;
2215 if (!valid_signal(sig
) || sig
< 1 || (act
&& sig_kernel_only(sig
)))
2218 k
= ¤t
->sighand
->action
[sig
-1];
2220 spin_lock_irq(¤t
->sighand
->siglock
);
2221 if (signal_pending(current
)) {
2223 * If there might be a fatal signal pending on multiple
2224 * threads, make sure we take it before changing the action.
2226 spin_unlock_irq(¤t
->sighand
->siglock
);
2227 return -ERESTARTNOINTR
;
2234 sigdelsetmask(&act
->sa
.sa_mask
,
2235 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2239 * "Setting a signal action to SIG_IGN for a signal that is
2240 * pending shall cause the pending signal to be discarded,
2241 * whether or not it is blocked."
2243 * "Setting a signal action to SIG_DFL for a signal that is
2244 * pending and whose default action is to ignore the signal
2245 * (for example, SIGCHLD), shall cause the pending signal to
2246 * be discarded, whether or not it is blocked"
2248 if (act
->sa
.sa_handler
== SIG_IGN
||
2249 (act
->sa
.sa_handler
== SIG_DFL
&& sig_kernel_ignore(sig
))) {
2250 struct task_struct
*t
= current
;
2252 sigaddset(&mask
, sig
);
2253 rm_from_queue_full(&mask
, &t
->signal
->shared_pending
);
2255 rm_from_queue_full(&mask
, &t
->pending
);
2256 recalc_sigpending_tsk(t
);
2258 } while (t
!= current
);
2262 spin_unlock_irq(¤t
->sighand
->siglock
);
2267 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2273 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2274 oss
.ss_size
= current
->sas_ss_size
;
2275 oss
.ss_flags
= sas_ss_flags(sp
);
2284 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2285 || __get_user(ss_sp
, &uss
->ss_sp
)
2286 || __get_user(ss_flags
, &uss
->ss_flags
)
2287 || __get_user(ss_size
, &uss
->ss_size
))
2291 if (on_sig_stack(sp
))
2297 * Note - this code used to test ss_flags incorrectly
2298 * old code may have been written using ss_flags==0
2299 * to mean ss_flags==SS_ONSTACK (as this was the only
2300 * way that worked) - this fix preserves that older
2303 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2306 if (ss_flags
== SS_DISABLE
) {
2311 if (ss_size
< MINSIGSTKSZ
)
2315 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2316 current
->sas_ss_size
= ss_size
;
2321 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2330 #ifdef __ARCH_WANT_SYS_SIGPENDING
2333 sys_sigpending(old_sigset_t __user
*set
)
2335 return do_sigpending(set
, sizeof(*set
));
2340 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2341 /* Some platforms have their own version with special arguments others
2342 support only sys_rt_sigprocmask. */
2345 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2348 old_sigset_t old_set
, new_set
;
2352 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2354 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2356 spin_lock_irq(¤t
->sighand
->siglock
);
2357 old_set
= current
->blocked
.sig
[0];
2365 sigaddsetmask(¤t
->blocked
, new_set
);
2368 sigdelsetmask(¤t
->blocked
, new_set
);
2371 current
->blocked
.sig
[0] = new_set
;
2375 recalc_sigpending();
2376 spin_unlock_irq(¤t
->sighand
->siglock
);
2382 old_set
= current
->blocked
.sig
[0];
2385 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2392 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2394 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2396 sys_rt_sigaction(int sig
,
2397 const struct sigaction __user
*act
,
2398 struct sigaction __user
*oact
,
2401 struct k_sigaction new_sa
, old_sa
;
2404 /* XXX: Don't preclude handling different sized sigset_t's. */
2405 if (sigsetsize
!= sizeof(sigset_t
))
2409 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2413 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2416 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2422 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2424 #ifdef __ARCH_WANT_SYS_SGETMASK
2427 * For backwards compatibility. Functionality superseded by sigprocmask.
2433 return current
->blocked
.sig
[0];
2437 sys_ssetmask(int newmask
)
2441 spin_lock_irq(¤t
->sighand
->siglock
);
2442 old
= current
->blocked
.sig
[0];
2444 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2446 recalc_sigpending();
2447 spin_unlock_irq(¤t
->sighand
->siglock
);
2451 #endif /* __ARCH_WANT_SGETMASK */
2453 #ifdef __ARCH_WANT_SYS_SIGNAL
2455 * For backwards compatibility. Functionality superseded by sigaction.
2457 asmlinkage
unsigned long
2458 sys_signal(int sig
, __sighandler_t handler
)
2460 struct k_sigaction new_sa
, old_sa
;
2463 new_sa
.sa
.sa_handler
= handler
;
2464 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2465 sigemptyset(&new_sa
.sa
.sa_mask
);
2467 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2469 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2471 #endif /* __ARCH_WANT_SYS_SIGNAL */
2473 #ifdef __ARCH_WANT_SYS_PAUSE
2478 current
->state
= TASK_INTERRUPTIBLE
;
2480 return -ERESTARTNOHAND
;
2485 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2486 asmlinkage
long sys_rt_sigsuspend(sigset_t __user
*unewset
, size_t sigsetsize
)
2490 /* XXX: Don't preclude handling different sized sigset_t's. */
2491 if (sigsetsize
!= sizeof(sigset_t
))
2494 if (copy_from_user(&newset
, unewset
, sizeof(newset
)))
2496 sigdelsetmask(&newset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2498 spin_lock_irq(¤t
->sighand
->siglock
);
2499 current
->saved_sigmask
= current
->blocked
;
2500 current
->blocked
= newset
;
2501 recalc_sigpending();
2502 spin_unlock_irq(¤t
->sighand
->siglock
);
2504 current
->state
= TASK_INTERRUPTIBLE
;
2506 set_thread_flag(TIF_RESTORE_SIGMASK
);
2507 return -ERESTARTNOHAND
;
2509 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2511 __attribute__((weak
)) const char *arch_vma_name(struct vm_area_struct
*vma
)
2516 void __init
signals_init(void)
2518 sigqueue_cachep
= KMEM_CACHE(sigqueue
, SLAB_PANIC
);