2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <asm/param.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
29 #include <asm/siginfo.h>
32 * SLAB caches for signal bits.
35 static kmem_cache_t
*sigqueue_cachep
;
38 * In POSIX a signal is sent either to a specific thread (Linux task)
39 * or to the process as a whole (Linux thread group). How the signal
40 * is sent determines whether it's to one thread or the whole group,
41 * which determines which signal mask(s) are involved in blocking it
42 * from being delivered until later. When the signal is delivered,
43 * either it's caught or ignored by a user handler or it has a default
44 * effect that applies to the whole thread group (POSIX process).
46 * The possible effects an unblocked signal set to SIG_DFL can have are:
47 * ignore - Nothing Happens
48 * terminate - kill the process, i.e. all threads in the group,
49 * similar to exit_group. The group leader (only) reports
50 * WIFSIGNALED status to its parent.
51 * coredump - write a core dump file describing all threads using
52 * the same mm and then kill all those threads
53 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
55 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
56 * Other signals when not blocked and set to SIG_DFL behaves as follows.
57 * The job control signals also have other special effects.
59 * +--------------------+------------------+
60 * | POSIX signal | default action |
61 * +--------------------+------------------+
62 * | SIGHUP | terminate |
63 * | SIGINT | terminate |
64 * | SIGQUIT | coredump |
65 * | SIGILL | coredump |
66 * | SIGTRAP | coredump |
67 * | SIGABRT/SIGIOT | coredump |
68 * | SIGBUS | coredump |
69 * | SIGFPE | coredump |
70 * | SIGKILL | terminate(+) |
71 * | SIGUSR1 | terminate |
72 * | SIGSEGV | coredump |
73 * | SIGUSR2 | terminate |
74 * | SIGPIPE | terminate |
75 * | SIGALRM | terminate |
76 * | SIGTERM | terminate |
77 * | SIGCHLD | ignore |
78 * | SIGCONT | ignore(*) |
79 * | SIGSTOP | stop(*)(+) |
80 * | SIGTSTP | stop(*) |
81 * | SIGTTIN | stop(*) |
82 * | SIGTTOU | stop(*) |
84 * | SIGXCPU | coredump |
85 * | SIGXFSZ | coredump |
86 * | SIGVTALRM | terminate |
87 * | SIGPROF | terminate |
88 * | SIGPOLL/SIGIO | terminate |
89 * | SIGSYS/SIGUNUSED | coredump |
90 * | SIGSTKFLT | terminate |
91 * | SIGWINCH | ignore |
92 * | SIGPWR | terminate |
93 * | SIGRTMIN-SIGRTMAX | terminate |
94 * +--------------------+------------------+
95 * | non-POSIX signal | default action |
96 * +--------------------+------------------+
97 * | SIGEMT | coredump |
98 * +--------------------+------------------+
100 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
101 * (*) Special job control effects:
102 * When SIGCONT is sent, it resumes the process (all threads in the group)
103 * from TASK_STOPPED state and also clears any pending/queued stop signals
104 * (any of those marked with "stop(*)"). This happens regardless of blocking,
105 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
106 * any pending/queued SIGCONT signals; this happens regardless of blocking,
107 * catching, or ignored the stop signal, though (except for SIGSTOP) the
108 * default action of stopping the process may happen later or never.
112 #define M_SIGEMT M(SIGEMT)
117 #if SIGRTMIN > BITS_PER_LONG
118 #define M(sig) (1ULL << ((sig)-1))
120 #define M(sig) (1UL << ((sig)-1))
122 #define T(sig, mask) (M(sig) & (mask))
124 #define SIG_KERNEL_ONLY_MASK (\
125 M(SIGKILL) | M(SIGSTOP) )
127 #define SIG_KERNEL_STOP_MASK (\
128 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
130 #define SIG_KERNEL_COREDUMP_MASK (\
131 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
132 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
133 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
135 #define SIG_KERNEL_IGNORE_MASK (\
136 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
138 #define sig_kernel_only(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
140 #define sig_kernel_coredump(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
142 #define sig_kernel_ignore(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
144 #define sig_kernel_stop(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
147 #define sig_user_defined(t, signr) \
148 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
149 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
151 #define sig_fatal(t, signr) \
152 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
153 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
155 static int sig_ignored(struct task_struct
*t
, int sig
)
157 void __user
* handler
;
160 * Tracers always want to know about signals..
162 if (t
->ptrace
& PT_PTRACED
)
166 * Blocked signals are never ignored, since the
167 * signal handler may change by the time it is
170 if (sigismember(&t
->blocked
, sig
))
173 /* Is it explicitly or implicitly ignored? */
174 handler
= t
->sighand
->action
[sig
-1].sa
.sa_handler
;
175 return handler
== SIG_IGN
||
176 (handler
== SIG_DFL
&& sig_kernel_ignore(sig
));
180 * Re-calculate pending state from the set of locally pending
181 * signals, globally pending signals, and blocked signals.
183 static inline int has_pending_signals(sigset_t
*signal
, sigset_t
*blocked
)
188 switch (_NSIG_WORDS
) {
190 for (i
= _NSIG_WORDS
, ready
= 0; --i
>= 0 ;)
191 ready
|= signal
->sig
[i
] &~ blocked
->sig
[i
];
194 case 4: ready
= signal
->sig
[3] &~ blocked
->sig
[3];
195 ready
|= signal
->sig
[2] &~ blocked
->sig
[2];
196 ready
|= signal
->sig
[1] &~ blocked
->sig
[1];
197 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
200 case 2: ready
= signal
->sig
[1] &~ blocked
->sig
[1];
201 ready
|= signal
->sig
[0] &~ blocked
->sig
[0];
204 case 1: ready
= signal
->sig
[0] &~ blocked
->sig
[0];
209 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
211 fastcall
void recalc_sigpending_tsk(struct task_struct
*t
)
213 if (t
->signal
->group_stop_count
> 0 ||
214 PENDING(&t
->pending
, &t
->blocked
) ||
215 PENDING(&t
->signal
->shared_pending
, &t
->blocked
))
216 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
218 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
221 void recalc_sigpending(void)
223 recalc_sigpending_tsk(current
);
226 /* Given the mask, find the first available signal that should be serviced. */
229 next_signal(struct sigpending
*pending
, sigset_t
*mask
)
231 unsigned long i
, *s
, *m
, x
;
234 s
= pending
->signal
.sig
;
236 switch (_NSIG_WORDS
) {
238 for (i
= 0; i
< _NSIG_WORDS
; ++i
, ++s
, ++m
)
239 if ((x
= *s
&~ *m
) != 0) {
240 sig
= ffz(~x
) + i
*_NSIG_BPW
+ 1;
245 case 2: if ((x
= s
[0] &~ m
[0]) != 0)
247 else if ((x
= s
[1] &~ m
[1]) != 0)
254 case 1: if ((x
= *s
&~ *m
) != 0)
262 static struct sigqueue
*__sigqueue_alloc(struct task_struct
*t
, unsigned int __nocast flags
,
265 struct sigqueue
*q
= NULL
;
267 atomic_inc(&t
->user
->sigpending
);
268 if (override_rlimit
||
269 atomic_read(&t
->user
->sigpending
) <=
270 t
->signal
->rlim
[RLIMIT_SIGPENDING
].rlim_cur
)
271 q
= kmem_cache_alloc(sigqueue_cachep
, flags
);
272 if (unlikely(q
== NULL
)) {
273 atomic_dec(&t
->user
->sigpending
);
275 INIT_LIST_HEAD(&q
->list
);
278 q
->user
= get_uid(t
->user
);
283 static inline void __sigqueue_free(struct sigqueue
*q
)
285 if (q
->flags
& SIGQUEUE_PREALLOC
)
287 atomic_dec(&q
->user
->sigpending
);
289 kmem_cache_free(sigqueue_cachep
, q
);
292 static void flush_sigqueue(struct sigpending
*queue
)
296 sigemptyset(&queue
->signal
);
297 while (!list_empty(&queue
->list
)) {
298 q
= list_entry(queue
->list
.next
, struct sigqueue
, list
);
299 list_del_init(&q
->list
);
305 * Flush all pending signals for a task.
309 flush_signals(struct task_struct
*t
)
313 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
314 clear_tsk_thread_flag(t
,TIF_SIGPENDING
);
315 flush_sigqueue(&t
->pending
);
316 flush_sigqueue(&t
->signal
->shared_pending
);
317 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
321 * This function expects the tasklist_lock write-locked.
323 void __exit_sighand(struct task_struct
*tsk
)
325 struct sighand_struct
* sighand
= tsk
->sighand
;
327 /* Ok, we're done with the signal handlers */
329 if (atomic_dec_and_test(&sighand
->count
))
330 kmem_cache_free(sighand_cachep
, sighand
);
333 void exit_sighand(struct task_struct
*tsk
)
335 write_lock_irq(&tasklist_lock
);
337 write_unlock_irq(&tasklist_lock
);
341 * This function expects the tasklist_lock write-locked.
343 void __exit_signal(struct task_struct
*tsk
)
345 struct signal_struct
* sig
= tsk
->signal
;
346 struct sighand_struct
* sighand
= tsk
->sighand
;
350 if (!atomic_read(&sig
->count
))
352 spin_lock(&sighand
->siglock
);
353 posix_cpu_timers_exit(tsk
);
354 if (atomic_dec_and_test(&sig
->count
)) {
355 posix_cpu_timers_exit_group(tsk
);
356 if (tsk
== sig
->curr_target
)
357 sig
->curr_target
= next_thread(tsk
);
359 spin_unlock(&sighand
->siglock
);
360 flush_sigqueue(&sig
->shared_pending
);
363 * If there is any task waiting for the group exit
366 if (sig
->group_exit_task
&& atomic_read(&sig
->count
) == sig
->notify_count
) {
367 wake_up_process(sig
->group_exit_task
);
368 sig
->group_exit_task
= NULL
;
370 if (tsk
== sig
->curr_target
)
371 sig
->curr_target
= next_thread(tsk
);
374 * Accumulate here the counters for all threads but the
375 * group leader as they die, so they can be added into
376 * the process-wide totals when those are taken.
377 * The group leader stays around as a zombie as long
378 * as there are other threads. When it gets reaped,
379 * the exit.c code will add its counts into these totals.
380 * We won't ever get here for the group leader, since it
381 * will have been the last reference on the signal_struct.
383 sig
->utime
= cputime_add(sig
->utime
, tsk
->utime
);
384 sig
->stime
= cputime_add(sig
->stime
, tsk
->stime
);
385 sig
->min_flt
+= tsk
->min_flt
;
386 sig
->maj_flt
+= tsk
->maj_flt
;
387 sig
->nvcsw
+= tsk
->nvcsw
;
388 sig
->nivcsw
+= tsk
->nivcsw
;
389 sig
->sched_time
+= tsk
->sched_time
;
390 spin_unlock(&sighand
->siglock
);
391 sig
= NULL
; /* Marker for below. */
393 clear_tsk_thread_flag(tsk
,TIF_SIGPENDING
);
394 flush_sigqueue(&tsk
->pending
);
397 * We are cleaning up the signal_struct here. We delayed
398 * calling exit_itimers until after flush_sigqueue, just in
399 * case our thread-local pending queue contained a queued
400 * timer signal that would have been cleared in
401 * exit_itimers. When that called sigqueue_free, it would
402 * attempt to re-take the tasklist_lock and deadlock. This
403 * can never happen if we ensure that all queues the
404 * timer's signal might be queued on have been flushed
405 * first. The shared_pending queue, and our own pending
406 * queue are the only queues the timer could be on, since
407 * there are no other threads left in the group and timer
408 * signals are constrained to threads inside the group.
411 exit_thread_group_keys(sig
);
412 kmem_cache_free(signal_cachep
, sig
);
416 void exit_signal(struct task_struct
*tsk
)
418 write_lock_irq(&tasklist_lock
);
420 write_unlock_irq(&tasklist_lock
);
424 * Flush all handlers for a task.
428 flush_signal_handlers(struct task_struct
*t
, int force_default
)
431 struct k_sigaction
*ka
= &t
->sighand
->action
[0];
432 for (i
= _NSIG
; i
!= 0 ; i
--) {
433 if (force_default
|| ka
->sa
.sa_handler
!= SIG_IGN
)
434 ka
->sa
.sa_handler
= SIG_DFL
;
436 sigemptyset(&ka
->sa
.sa_mask
);
442 /* Notify the system that a driver wants to block all signals for this
443 * process, and wants to be notified if any signals at all were to be
444 * sent/acted upon. If the notifier routine returns non-zero, then the
445 * signal will be acted upon after all. If the notifier routine returns 0,
446 * then then signal will be blocked. Only one block per process is
447 * allowed. priv is a pointer to private data that the notifier routine
448 * can use to determine if the signal should be blocked or not. */
451 block_all_signals(int (*notifier
)(void *priv
), void *priv
, sigset_t
*mask
)
455 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
456 current
->notifier_mask
= mask
;
457 current
->notifier_data
= priv
;
458 current
->notifier
= notifier
;
459 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
462 /* Notify the system that blocking has ended. */
465 unblock_all_signals(void)
469 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
470 current
->notifier
= NULL
;
471 current
->notifier_data
= NULL
;
473 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
476 static inline int collect_signal(int sig
, struct sigpending
*list
, siginfo_t
*info
)
478 struct sigqueue
*q
, *first
= NULL
;
479 int still_pending
= 0;
481 if (unlikely(!sigismember(&list
->signal
, sig
)))
485 * Collect the siginfo appropriate to this signal. Check if
486 * there is another siginfo for the same signal.
488 list_for_each_entry(q
, &list
->list
, list
) {
489 if (q
->info
.si_signo
== sig
) {
498 list_del_init(&first
->list
);
499 copy_siginfo(info
, &first
->info
);
500 __sigqueue_free(first
);
502 sigdelset(&list
->signal
, sig
);
505 /* Ok, it wasn't in the queue. This must be
506 a fast-pathed signal or we must have been
507 out of queue space. So zero out the info.
509 sigdelset(&list
->signal
, sig
);
510 info
->si_signo
= sig
;
519 static int __dequeue_signal(struct sigpending
*pending
, sigset_t
*mask
,
524 sig
= next_signal(pending
, mask
);
526 if (current
->notifier
) {
527 if (sigismember(current
->notifier_mask
, sig
)) {
528 if (!(current
->notifier
)(current
->notifier_data
)) {
529 clear_thread_flag(TIF_SIGPENDING
);
535 if (!collect_signal(sig
, pending
, info
))
545 * Dequeue a signal and return the element to the caller, which is
546 * expected to free it.
548 * All callers have to hold the siglock.
550 int dequeue_signal(struct task_struct
*tsk
, sigset_t
*mask
, siginfo_t
*info
)
552 int signr
= __dequeue_signal(&tsk
->pending
, mask
, info
);
554 signr
= __dequeue_signal(&tsk
->signal
->shared_pending
,
556 if (signr
&& unlikely(sig_kernel_stop(signr
))) {
558 * Set a marker that we have dequeued a stop signal. Our
559 * caller might release the siglock and then the pending
560 * stop signal it is about to process is no longer in the
561 * pending bitmasks, but must still be cleared by a SIGCONT
562 * (and overruled by a SIGKILL). So those cases clear this
563 * shared flag after we've set it. Note that this flag may
564 * remain set after the signal we return is ignored or
565 * handled. That doesn't matter because its only purpose
566 * is to alert stop-signal processing code when another
567 * processor has come along and cleared the flag.
569 tsk
->signal
->flags
|= SIGNAL_STOP_DEQUEUED
;
572 ((info
->si_code
& __SI_MASK
) == __SI_TIMER
) &&
573 info
->si_sys_private
){
575 * Release the siglock to ensure proper locking order
576 * of timer locks outside of siglocks. Note, we leave
577 * irqs disabled here, since the posix-timers code is
578 * about to disable them again anyway.
580 spin_unlock(&tsk
->sighand
->siglock
);
581 do_schedule_next_timer(info
);
582 spin_lock(&tsk
->sighand
->siglock
);
588 * Tell a process that it has a new active signal..
590 * NOTE! we rely on the previous spin_lock to
591 * lock interrupts for us! We can only be called with
592 * "siglock" held, and the local interrupt must
593 * have been disabled when that got acquired!
595 * No need to set need_resched since signal event passing
596 * goes through ->blocked
598 void signal_wake_up(struct task_struct
*t
, int resume
)
602 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
605 * For SIGKILL, we want to wake it up in the stopped/traced case.
606 * We don't check t->state here because there is a race with it
607 * executing another processor and just now entering stopped state.
608 * By using wake_up_state, we ensure the process will wake up and
609 * handle its death signal.
611 mask
= TASK_INTERRUPTIBLE
;
613 mask
|= TASK_STOPPED
| TASK_TRACED
;
614 if (!wake_up_state(t
, mask
))
619 * Remove signals in mask from the pending set and queue.
620 * Returns 1 if any signals were found.
622 * All callers must be holding the siglock.
624 static int rm_from_queue(unsigned long mask
, struct sigpending
*s
)
626 struct sigqueue
*q
, *n
;
628 if (!sigtestsetmask(&s
->signal
, mask
))
631 sigdelsetmask(&s
->signal
, mask
);
632 list_for_each_entry_safe(q
, n
, &s
->list
, list
) {
633 if (q
->info
.si_signo
< SIGRTMIN
&&
634 (mask
& sigmask(q
->info
.si_signo
))) {
635 list_del_init(&q
->list
);
643 * Bad permissions for sending the signal
645 static int check_kill_permission(int sig
, struct siginfo
*info
,
646 struct task_struct
*t
)
649 if (sig
< 0 || sig
> _NSIG
)
652 if ((!info
|| ((unsigned long)info
!= 1 &&
653 (unsigned long)info
!= 2 && SI_FROMUSER(info
)))
654 && ((sig
!= SIGCONT
) ||
655 (current
->signal
->session
!= t
->signal
->session
))
656 && (current
->euid
^ t
->suid
) && (current
->euid
^ t
->uid
)
657 && (current
->uid
^ t
->suid
) && (current
->uid
^ t
->uid
)
658 && !capable(CAP_KILL
))
660 return security_task_kill(t
, info
, sig
);
664 static void do_notify_parent_cldstop(struct task_struct
*tsk
,
665 struct task_struct
*parent
,
669 * Handle magic process-wide effects of stop/continue signals.
670 * Unlike the signal actions, these happen immediately at signal-generation
671 * time regardless of blocking, ignoring, or handling. This does the
672 * actual continuing for SIGCONT, but not the actual stopping for stop
673 * signals. The process stop is done as a signal action for SIG_DFL.
675 static void handle_stop_signal(int sig
, struct task_struct
*p
)
677 struct task_struct
*t
;
679 if (p
->flags
& SIGNAL_GROUP_EXIT
)
681 * The process is in the middle of dying already.
685 if (sig_kernel_stop(sig
)) {
687 * This is a stop signal. Remove SIGCONT from all queues.
689 rm_from_queue(sigmask(SIGCONT
), &p
->signal
->shared_pending
);
692 rm_from_queue(sigmask(SIGCONT
), &t
->pending
);
695 } else if (sig
== SIGCONT
) {
697 * Remove all stop signals from all queues,
698 * and wake all threads.
700 if (unlikely(p
->signal
->group_stop_count
> 0)) {
702 * There was a group stop in progress. We'll
703 * pretend it finished before we got here. We are
704 * obliged to report it to the parent: if the
705 * SIGSTOP happened "after" this SIGCONT, then it
706 * would have cleared this pending SIGCONT. If it
707 * happened "before" this SIGCONT, then the parent
708 * got the SIGCHLD about the stop finishing before
709 * the continue happened. We do the notification
710 * now, and it's as if the stop had finished and
711 * the SIGCHLD was pending on entry to this kill.
713 p
->signal
->group_stop_count
= 0;
714 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
715 spin_unlock(&p
->sighand
->siglock
);
716 if (p
->ptrace
& PT_PTRACED
)
717 do_notify_parent_cldstop(p
, p
->parent
,
720 do_notify_parent_cldstop(
722 p
->group_leader
->real_parent
,
724 spin_lock(&p
->sighand
->siglock
);
726 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
730 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
733 * If there is a handler for SIGCONT, we must make
734 * sure that no thread returns to user mode before
735 * we post the signal, in case it was the only
736 * thread eligible to run the signal handler--then
737 * it must not do anything between resuming and
738 * running the handler. With the TIF_SIGPENDING
739 * flag set, the thread will pause and acquire the
740 * siglock that we hold now and until we've queued
741 * the pending signal.
743 * Wake up the stopped thread _after_ setting
746 state
= TASK_STOPPED
;
747 if (sig_user_defined(t
, SIGCONT
) && !sigismember(&t
->blocked
, SIGCONT
)) {
748 set_tsk_thread_flag(t
, TIF_SIGPENDING
);
749 state
|= TASK_INTERRUPTIBLE
;
751 wake_up_state(t
, state
);
756 if (p
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
758 * We were in fact stopped, and are now continued.
759 * Notify the parent with CLD_CONTINUED.
761 p
->signal
->flags
= SIGNAL_STOP_CONTINUED
;
762 p
->signal
->group_exit_code
= 0;
763 spin_unlock(&p
->sighand
->siglock
);
764 if (p
->ptrace
& PT_PTRACED
)
765 do_notify_parent_cldstop(p
, p
->parent
,
768 do_notify_parent_cldstop(
770 p
->group_leader
->real_parent
,
772 spin_lock(&p
->sighand
->siglock
);
775 * We are not stopped, but there could be a stop
776 * signal in the middle of being processed after
777 * being removed from the queue. Clear that too.
779 p
->signal
->flags
= 0;
781 } else if (sig
== SIGKILL
) {
783 * Make sure that any pending stop signal already dequeued
784 * is undone by the wakeup for SIGKILL.
786 p
->signal
->flags
= 0;
790 static int send_signal(int sig
, struct siginfo
*info
, struct task_struct
*t
,
791 struct sigpending
*signals
)
793 struct sigqueue
* q
= NULL
;
797 * fast-pathed signals for kernel-internal things like SIGSTOP
800 if ((unsigned long)info
== 2)
803 /* Real-time signals must be queued if sent by sigqueue, or
804 some other real-time mechanism. It is implementation
805 defined whether kill() does so. We attempt to do so, on
806 the principle of least surprise, but since kill is not
807 allowed to fail with EAGAIN when low on memory we just
808 make sure at least one signal gets delivered and don't
809 pass on the info struct. */
811 q
= __sigqueue_alloc(t
, GFP_ATOMIC
, (sig
< SIGRTMIN
&&
812 ((unsigned long) info
< 2 ||
813 info
->si_code
>= 0)));
815 list_add_tail(&q
->list
, &signals
->list
);
816 switch ((unsigned long) info
) {
818 q
->info
.si_signo
= sig
;
819 q
->info
.si_errno
= 0;
820 q
->info
.si_code
= SI_USER
;
821 q
->info
.si_pid
= current
->pid
;
822 q
->info
.si_uid
= current
->uid
;
825 q
->info
.si_signo
= sig
;
826 q
->info
.si_errno
= 0;
827 q
->info
.si_code
= SI_KERNEL
;
832 copy_siginfo(&q
->info
, info
);
836 if (sig
>= SIGRTMIN
&& info
&& (unsigned long)info
!= 1
837 && info
->si_code
!= SI_USER
)
839 * Queue overflow, abort. We may abort if the signal was rt
840 * and sent by user using something other than kill().
843 if (((unsigned long)info
> 1) && (info
->si_code
== SI_TIMER
))
845 * Set up a return to indicate that we dropped
848 ret
= info
->si_sys_private
;
852 sigaddset(&signals
->signal
, sig
);
856 #define LEGACY_QUEUE(sigptr, sig) \
857 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
861 specific_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
865 if (!irqs_disabled())
867 assert_spin_locked(&t
->sighand
->siglock
);
869 if (((unsigned long)info
> 2) && (info
->si_code
== SI_TIMER
))
871 * Set up a return to indicate that we dropped the signal.
873 ret
= info
->si_sys_private
;
875 /* Short-circuit ignored signals. */
876 if (sig_ignored(t
, sig
))
879 /* Support queueing exactly one non-rt signal, so that we
880 can get more detailed information about the cause of
882 if (LEGACY_QUEUE(&t
->pending
, sig
))
885 ret
= send_signal(sig
, info
, t
, &t
->pending
);
886 if (!ret
&& !sigismember(&t
->blocked
, sig
))
887 signal_wake_up(t
, sig
== SIGKILL
);
893 * Force a signal that the process can't ignore: if necessary
894 * we unblock the signal and change any SIG_IGN to SIG_DFL.
898 force_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*t
)
900 unsigned long int flags
;
903 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
904 if (sigismember(&t
->blocked
, sig
) || t
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
) {
905 t
->sighand
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
906 sigdelset(&t
->blocked
, sig
);
907 recalc_sigpending_tsk(t
);
909 ret
= specific_send_sig_info(sig
, info
, t
);
910 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
916 force_sig_specific(int sig
, struct task_struct
*t
)
918 unsigned long int flags
;
920 spin_lock_irqsave(&t
->sighand
->siglock
, flags
);
921 if (t
->sighand
->action
[sig
-1].sa
.sa_handler
== SIG_IGN
)
922 t
->sighand
->action
[sig
-1].sa
.sa_handler
= SIG_DFL
;
923 sigdelset(&t
->blocked
, sig
);
924 recalc_sigpending_tsk(t
);
925 specific_send_sig_info(sig
, (void *)2, t
);
926 spin_unlock_irqrestore(&t
->sighand
->siglock
, flags
);
930 * Test if P wants to take SIG. After we've checked all threads with this,
931 * it's equivalent to finding no threads not blocking SIG. Any threads not
932 * blocking SIG were ruled out because they are not running and already
933 * have pending signals. Such threads will dequeue from the shared queue
934 * as soon as they're available, so putting the signal on the shared queue
935 * will be equivalent to sending it to one such thread.
937 #define wants_signal(sig, p, mask) \
938 (!sigismember(&(p)->blocked, sig) \
939 && !((p)->state & mask) \
940 && !((p)->flags & PF_EXITING) \
941 && (task_curr(p) || !signal_pending(p)))
945 __group_complete_signal(int sig
, struct task_struct
*p
)
948 struct task_struct
*t
;
951 * Don't bother traced and stopped tasks (but
952 * SIGKILL will punch through that).
954 mask
= TASK_STOPPED
| TASK_TRACED
;
959 * Now find a thread we can wake up to take the signal off the queue.
961 * If the main thread wants the signal, it gets first crack.
962 * Probably the least surprising to the average bear.
964 if (wants_signal(sig
, p
, mask
))
966 else if (thread_group_empty(p
))
968 * There is just one thread and it does not need to be woken.
969 * It will dequeue unblocked signals before it runs again.
974 * Otherwise try to find a suitable thread.
976 t
= p
->signal
->curr_target
;
978 /* restart balancing at this thread */
979 t
= p
->signal
->curr_target
= p
;
980 BUG_ON(t
->tgid
!= p
->tgid
);
982 while (!wants_signal(sig
, t
, mask
)) {
984 if (t
== p
->signal
->curr_target
)
986 * No thread needs to be woken.
987 * Any eligible threads will see
988 * the signal in the queue soon.
992 p
->signal
->curr_target
= t
;
996 * Found a killable thread. If the signal will be fatal,
997 * then start taking the whole group down immediately.
999 if (sig_fatal(p
, sig
) && !(p
->signal
->flags
& SIGNAL_GROUP_EXIT
) &&
1000 !sigismember(&t
->real_blocked
, sig
) &&
1001 (sig
== SIGKILL
|| !(t
->ptrace
& PT_PTRACED
))) {
1003 * This signal will be fatal to the whole group.
1005 if (!sig_kernel_coredump(sig
)) {
1007 * Start a group exit and wake everybody up.
1008 * This way we don't have other threads
1009 * running and doing things after a slower
1010 * thread has the fatal signal pending.
1012 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1013 p
->signal
->group_exit_code
= sig
;
1014 p
->signal
->group_stop_count
= 0;
1017 sigaddset(&t
->pending
.signal
, SIGKILL
);
1018 signal_wake_up(t
, 1);
1025 * There will be a core dump. We make all threads other
1026 * than the chosen one go into a group stop so that nothing
1027 * happens until it gets scheduled, takes the signal off
1028 * the shared queue, and does the core dump. This is a
1029 * little more complicated than strictly necessary, but it
1030 * keeps the signal state that winds up in the core dump
1031 * unchanged from the death state, e.g. which thread had
1032 * the core-dump signal unblocked.
1034 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
1035 rm_from_queue(SIG_KERNEL_STOP_MASK
, &p
->signal
->shared_pending
);
1036 p
->signal
->group_stop_count
= 0;
1037 p
->signal
->group_exit_task
= t
;
1040 p
->signal
->group_stop_count
++;
1041 signal_wake_up(t
, 0);
1044 wake_up_process(p
->signal
->group_exit_task
);
1049 * The signal is already in the shared-pending queue.
1050 * Tell the chosen thread to wake up and dequeue it.
1052 signal_wake_up(t
, sig
== SIGKILL
);
1057 __group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1061 assert_spin_locked(&p
->sighand
->siglock
);
1062 handle_stop_signal(sig
, p
);
1064 if (((unsigned long)info
> 2) && (info
->si_code
== SI_TIMER
))
1066 * Set up a return to indicate that we dropped the signal.
1068 ret
= info
->si_sys_private
;
1070 /* Short-circuit ignored signals. */
1071 if (sig_ignored(p
, sig
))
1074 if (LEGACY_QUEUE(&p
->signal
->shared_pending
, sig
))
1075 /* This is a non-RT signal and we already have one queued. */
1079 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1080 * We always use the shared queue for process-wide signals,
1081 * to avoid several races.
1083 ret
= send_signal(sig
, info
, p
, &p
->signal
->shared_pending
);
1087 __group_complete_signal(sig
, p
);
1092 * Nuke all other threads in the group.
1094 void zap_other_threads(struct task_struct
*p
)
1096 struct task_struct
*t
;
1098 p
->signal
->flags
= SIGNAL_GROUP_EXIT
;
1099 p
->signal
->group_stop_count
= 0;
1101 if (thread_group_empty(p
))
1104 for (t
= next_thread(p
); t
!= p
; t
= next_thread(t
)) {
1106 * Don't bother with already dead threads
1112 * We don't want to notify the parent, since we are
1113 * killed as part of a thread group due to another
1114 * thread doing an execve() or similar. So set the
1115 * exit signal to -1 to allow immediate reaping of
1116 * the process. But don't detach the thread group
1119 if (t
!= p
->group_leader
)
1120 t
->exit_signal
= -1;
1122 sigaddset(&t
->pending
.signal
, SIGKILL
);
1123 rm_from_queue(SIG_KERNEL_STOP_MASK
, &t
->pending
);
1124 signal_wake_up(t
, 1);
1129 * Must be called with the tasklist_lock held for reading!
1131 int group_send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1133 unsigned long flags
;
1136 ret
= check_kill_permission(sig
, info
, p
);
1137 if (!ret
&& sig
&& p
->sighand
) {
1138 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1139 ret
= __group_send_sig_info(sig
, info
, p
);
1140 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1147 * kill_pg_info() sends a signal to a process group: this is what the tty
1148 * control characters do (^C, ^Z etc)
1151 int __kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
1153 struct task_struct
*p
= NULL
;
1154 int retval
, success
;
1161 do_each_task_pid(pgrp
, PIDTYPE_PGID
, p
) {
1162 int err
= group_send_sig_info(sig
, info
, p
);
1165 } while_each_task_pid(pgrp
, PIDTYPE_PGID
, p
);
1166 return success
? 0 : retval
;
1170 kill_pg_info(int sig
, struct siginfo
*info
, pid_t pgrp
)
1174 read_lock(&tasklist_lock
);
1175 retval
= __kill_pg_info(sig
, info
, pgrp
);
1176 read_unlock(&tasklist_lock
);
1182 kill_proc_info(int sig
, struct siginfo
*info
, pid_t pid
)
1185 struct task_struct
*p
;
1187 read_lock(&tasklist_lock
);
1188 p
= find_task_by_pid(pid
);
1191 error
= group_send_sig_info(sig
, info
, p
);
1192 read_unlock(&tasklist_lock
);
1198 * kill_something_info() interprets pid in interesting ways just like kill(2).
1200 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1201 * is probably wrong. Should make it like BSD or SYSV.
1204 static int kill_something_info(int sig
, struct siginfo
*info
, int pid
)
1207 return kill_pg_info(sig
, info
, process_group(current
));
1208 } else if (pid
== -1) {
1209 int retval
= 0, count
= 0;
1210 struct task_struct
* p
;
1212 read_lock(&tasklist_lock
);
1213 for_each_process(p
) {
1214 if (p
->pid
> 1 && p
->tgid
!= current
->tgid
) {
1215 int err
= group_send_sig_info(sig
, info
, p
);
1221 read_unlock(&tasklist_lock
);
1222 return count
? retval
: -ESRCH
;
1223 } else if (pid
< 0) {
1224 return kill_pg_info(sig
, info
, -pid
);
1226 return kill_proc_info(sig
, info
, pid
);
1231 * These are for backward compatibility with the rest of the kernel source.
1235 * These two are the most common entry points. They send a signal
1236 * just to the specific thread.
1239 send_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1242 unsigned long flags
;
1245 * Make sure legacy kernel users don't send in bad values
1246 * (normal paths check this in check_kill_permission).
1248 if (sig
< 0 || sig
> _NSIG
)
1252 * We need the tasklist lock even for the specific
1253 * thread case (when we don't need to follow the group
1254 * lists) in order to avoid races with "p->sighand"
1255 * going away or changing from under us.
1257 read_lock(&tasklist_lock
);
1258 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1259 ret
= specific_send_sig_info(sig
, info
, p
);
1260 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1261 read_unlock(&tasklist_lock
);
1266 send_sig(int sig
, struct task_struct
*p
, int priv
)
1268 return send_sig_info(sig
, (void*)(long)(priv
!= 0), p
);
1272 * This is the entry point for "process-wide" signals.
1273 * They will go to an appropriate thread in the thread group.
1276 send_group_sig_info(int sig
, struct siginfo
*info
, struct task_struct
*p
)
1279 read_lock(&tasklist_lock
);
1280 ret
= group_send_sig_info(sig
, info
, p
);
1281 read_unlock(&tasklist_lock
);
1286 force_sig(int sig
, struct task_struct
*p
)
1288 force_sig_info(sig
, (void*)1L, p
);
1292 * When things go south during signal handling, we
1293 * will force a SIGSEGV. And if the signal that caused
1294 * the problem was already a SIGSEGV, we'll want to
1295 * make sure we don't even try to deliver the signal..
1298 force_sigsegv(int sig
, struct task_struct
*p
)
1300 if (sig
== SIGSEGV
) {
1301 unsigned long flags
;
1302 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1303 p
->sighand
->action
[sig
- 1].sa
.sa_handler
= SIG_DFL
;
1304 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1306 force_sig(SIGSEGV
, p
);
1311 kill_pg(pid_t pgrp
, int sig
, int priv
)
1313 return kill_pg_info(sig
, (void *)(long)(priv
!= 0), pgrp
);
1317 kill_proc(pid_t pid
, int sig
, int priv
)
1319 return kill_proc_info(sig
, (void *)(long)(priv
!= 0), pid
);
1323 * These functions support sending signals using preallocated sigqueue
1324 * structures. This is needed "because realtime applications cannot
1325 * afford to lose notifications of asynchronous events, like timer
1326 * expirations or I/O completions". In the case of Posix Timers
1327 * we allocate the sigqueue structure from the timer_create. If this
1328 * allocation fails we are able to report the failure to the application
1329 * with an EAGAIN error.
1332 struct sigqueue
*sigqueue_alloc(void)
1336 if ((q
= __sigqueue_alloc(current
, GFP_KERNEL
, 0)))
1337 q
->flags
|= SIGQUEUE_PREALLOC
;
1341 void sigqueue_free(struct sigqueue
*q
)
1343 unsigned long flags
;
1344 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1346 * If the signal is still pending remove it from the
1349 if (unlikely(!list_empty(&q
->list
))) {
1350 read_lock(&tasklist_lock
);
1351 spin_lock_irqsave(q
->lock
, flags
);
1352 if (!list_empty(&q
->list
))
1353 list_del_init(&q
->list
);
1354 spin_unlock_irqrestore(q
->lock
, flags
);
1355 read_unlock(&tasklist_lock
);
1357 q
->flags
&= ~SIGQUEUE_PREALLOC
;
1362 send_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1364 unsigned long flags
;
1368 * We need the tasklist lock even for the specific
1369 * thread case (when we don't need to follow the group
1370 * lists) in order to avoid races with "p->sighand"
1371 * going away or changing from under us.
1373 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1374 read_lock(&tasklist_lock
);
1375 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1377 if (unlikely(!list_empty(&q
->list
))) {
1379 * If an SI_TIMER entry is already queue just increment
1380 * the overrun count.
1382 if (q
->info
.si_code
!= SI_TIMER
)
1384 q
->info
.si_overrun
++;
1387 /* Short-circuit ignored signals. */
1388 if (sig_ignored(p
, sig
)) {
1393 q
->lock
= &p
->sighand
->siglock
;
1394 list_add_tail(&q
->list
, &p
->pending
.list
);
1395 sigaddset(&p
->pending
.signal
, sig
);
1396 if (!sigismember(&p
->blocked
, sig
))
1397 signal_wake_up(p
, sig
== SIGKILL
);
1400 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1401 read_unlock(&tasklist_lock
);
1406 send_group_sigqueue(int sig
, struct sigqueue
*q
, struct task_struct
*p
)
1408 unsigned long flags
;
1411 BUG_ON(!(q
->flags
& SIGQUEUE_PREALLOC
));
1412 read_lock(&tasklist_lock
);
1413 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
1414 handle_stop_signal(sig
, p
);
1416 /* Short-circuit ignored signals. */
1417 if (sig_ignored(p
, sig
)) {
1422 if (unlikely(!list_empty(&q
->list
))) {
1424 * If an SI_TIMER entry is already queue just increment
1425 * the overrun count. Other uses should not try to
1426 * send the signal multiple times.
1428 if (q
->info
.si_code
!= SI_TIMER
)
1430 q
->info
.si_overrun
++;
1435 * Put this signal on the shared-pending queue.
1436 * We always use the shared queue for process-wide signals,
1437 * to avoid several races.
1439 q
->lock
= &p
->sighand
->siglock
;
1440 list_add_tail(&q
->list
, &p
->signal
->shared_pending
.list
);
1441 sigaddset(&p
->signal
->shared_pending
.signal
, sig
);
1443 __group_complete_signal(sig
, p
);
1445 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
1446 read_unlock(&tasklist_lock
);
1451 * Wake up any threads in the parent blocked in wait* syscalls.
1453 static inline void __wake_up_parent(struct task_struct
*p
,
1454 struct task_struct
*parent
)
1456 wake_up_interruptible_sync(&parent
->signal
->wait_chldexit
);
1460 * Let a parent know about the death of a child.
1461 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1464 void do_notify_parent(struct task_struct
*tsk
, int sig
)
1466 struct siginfo info
;
1467 unsigned long flags
;
1468 struct sighand_struct
*psig
;
1472 /* do_notify_parent_cldstop should have been called instead. */
1473 BUG_ON(tsk
->state
& (TASK_STOPPED
|TASK_TRACED
));
1475 BUG_ON(!tsk
->ptrace
&&
1476 (tsk
->group_leader
!= tsk
|| !thread_group_empty(tsk
)));
1478 info
.si_signo
= sig
;
1480 info
.si_pid
= tsk
->pid
;
1481 info
.si_uid
= tsk
->uid
;
1483 /* FIXME: find out whether or not this is supposed to be c*time. */
1484 info
.si_utime
= cputime_to_jiffies(cputime_add(tsk
->utime
,
1485 tsk
->signal
->utime
));
1486 info
.si_stime
= cputime_to_jiffies(cputime_add(tsk
->stime
,
1487 tsk
->signal
->stime
));
1489 info
.si_status
= tsk
->exit_code
& 0x7f;
1490 if (tsk
->exit_code
& 0x80)
1491 info
.si_code
= CLD_DUMPED
;
1492 else if (tsk
->exit_code
& 0x7f)
1493 info
.si_code
= CLD_KILLED
;
1495 info
.si_code
= CLD_EXITED
;
1496 info
.si_status
= tsk
->exit_code
>> 8;
1499 psig
= tsk
->parent
->sighand
;
1500 spin_lock_irqsave(&psig
->siglock
, flags
);
1501 if (sig
== SIGCHLD
&&
1502 (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
||
1503 (psig
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDWAIT
))) {
1505 * We are exiting and our parent doesn't care. POSIX.1
1506 * defines special semantics for setting SIGCHLD to SIG_IGN
1507 * or setting the SA_NOCLDWAIT flag: we should be reaped
1508 * automatically and not left for our parent's wait4 call.
1509 * Rather than having the parent do it as a magic kind of
1510 * signal handler, we just set this to tell do_exit that we
1511 * can be cleaned up without becoming a zombie. Note that
1512 * we still call __wake_up_parent in this case, because a
1513 * blocked sys_wait4 might now return -ECHILD.
1515 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1516 * is implementation-defined: we do (if you don't want
1517 * it, just use SIG_IGN instead).
1519 tsk
->exit_signal
= -1;
1520 if (psig
->action
[SIGCHLD
-1].sa
.sa_handler
== SIG_IGN
)
1523 if (sig
> 0 && sig
<= _NSIG
)
1524 __group_send_sig_info(sig
, &info
, tsk
->parent
);
1525 __wake_up_parent(tsk
, tsk
->parent
);
1526 spin_unlock_irqrestore(&psig
->siglock
, flags
);
1530 do_notify_parent_cldstop(struct task_struct
*tsk
, struct task_struct
*parent
,
1533 struct siginfo info
;
1534 unsigned long flags
;
1535 struct sighand_struct
*sighand
;
1537 info
.si_signo
= SIGCHLD
;
1539 info
.si_pid
= tsk
->pid
;
1540 info
.si_uid
= tsk
->uid
;
1542 /* FIXME: find out whether or not this is supposed to be c*time. */
1543 info
.si_utime
= cputime_to_jiffies(tsk
->utime
);
1544 info
.si_stime
= cputime_to_jiffies(tsk
->stime
);
1549 info
.si_status
= SIGCONT
;
1552 info
.si_status
= tsk
->signal
->group_exit_code
& 0x7f;
1555 info
.si_status
= tsk
->exit_code
& 0x7f;
1561 sighand
= parent
->sighand
;
1562 spin_lock_irqsave(&sighand
->siglock
, flags
);
1563 if (sighand
->action
[SIGCHLD
-1].sa
.sa_handler
!= SIG_IGN
&&
1564 !(sighand
->action
[SIGCHLD
-1].sa
.sa_flags
& SA_NOCLDSTOP
))
1565 __group_send_sig_info(SIGCHLD
, &info
, parent
);
1567 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1569 __wake_up_parent(tsk
, parent
);
1570 spin_unlock_irqrestore(&sighand
->siglock
, flags
);
1574 * This must be called with current->sighand->siglock held.
1576 * This should be the path for all ptrace stops.
1577 * We always set current->last_siginfo while stopped here.
1578 * That makes it a way to test a stopped process for
1579 * being ptrace-stopped vs being job-control-stopped.
1581 * If we actually decide not to stop at all because the tracer is gone,
1582 * we leave nostop_code in current->exit_code.
1584 static void ptrace_stop(int exit_code
, int nostop_code
, siginfo_t
*info
)
1587 * If there is a group stop in progress,
1588 * we must participate in the bookkeeping.
1590 if (current
->signal
->group_stop_count
> 0)
1591 --current
->signal
->group_stop_count
;
1593 current
->last_siginfo
= info
;
1594 current
->exit_code
= exit_code
;
1596 /* Let the debugger run. */
1597 set_current_state(TASK_TRACED
);
1598 spin_unlock_irq(¤t
->sighand
->siglock
);
1599 read_lock(&tasklist_lock
);
1600 if (likely(current
->ptrace
& PT_PTRACED
) &&
1601 likely(current
->parent
!= current
->real_parent
||
1602 !(current
->ptrace
& PT_ATTACHED
)) &&
1603 (likely(current
->parent
->signal
!= current
->signal
) ||
1604 !unlikely(current
->signal
->flags
& SIGNAL_GROUP_EXIT
))) {
1605 do_notify_parent_cldstop(current
, current
->parent
,
1607 read_unlock(&tasklist_lock
);
1611 * By the time we got the lock, our tracer went away.
1614 read_unlock(&tasklist_lock
);
1615 set_current_state(TASK_RUNNING
);
1616 current
->exit_code
= nostop_code
;
1620 * We are back. Now reacquire the siglock before touching
1621 * last_siginfo, so that we are sure to have synchronized with
1622 * any signal-sending on another CPU that wants to examine it.
1624 spin_lock_irq(¤t
->sighand
->siglock
);
1625 current
->last_siginfo
= NULL
;
1628 * Queued signals ignored us while we were stopped for tracing.
1629 * So check for any that we should take before resuming user mode.
1631 recalc_sigpending();
1634 void ptrace_notify(int exit_code
)
1638 BUG_ON((exit_code
& (0x7f | ~0xffff)) != SIGTRAP
);
1640 memset(&info
, 0, sizeof info
);
1641 info
.si_signo
= SIGTRAP
;
1642 info
.si_code
= exit_code
;
1643 info
.si_pid
= current
->pid
;
1644 info
.si_uid
= current
->uid
;
1646 /* Let the debugger run. */
1647 spin_lock_irq(¤t
->sighand
->siglock
);
1648 ptrace_stop(exit_code
, 0, &info
);
1649 spin_unlock_irq(¤t
->sighand
->siglock
);
1652 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1655 finish_stop(int stop_count
)
1658 * If there are no other threads in the group, or if there is
1659 * a group stop in progress and we are the last to stop,
1660 * report to the parent. When ptraced, every thread reports itself.
1662 if (stop_count
< 0 || (current
->ptrace
& PT_PTRACED
)) {
1663 read_lock(&tasklist_lock
);
1664 do_notify_parent_cldstop(current
, current
->parent
,
1666 read_unlock(&tasklist_lock
);
1668 else if (stop_count
== 0) {
1669 read_lock(&tasklist_lock
);
1670 do_notify_parent_cldstop(current
->group_leader
,
1671 current
->group_leader
->real_parent
,
1673 read_unlock(&tasklist_lock
);
1678 * Now we don't run again until continued.
1680 current
->exit_code
= 0;
1684 * This performs the stopping for SIGSTOP and other stop signals.
1685 * We have to stop all threads in the thread group.
1686 * Returns nonzero if we've actually stopped and released the siglock.
1687 * Returns zero if we didn't stop and still hold the siglock.
1690 do_signal_stop(int signr
)
1692 struct signal_struct
*sig
= current
->signal
;
1693 struct sighand_struct
*sighand
= current
->sighand
;
1694 int stop_count
= -1;
1696 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
))
1699 if (sig
->group_stop_count
> 0) {
1701 * There is a group stop in progress. We don't need to
1702 * start another one.
1704 signr
= sig
->group_exit_code
;
1705 stop_count
= --sig
->group_stop_count
;
1706 current
->exit_code
= signr
;
1707 set_current_state(TASK_STOPPED
);
1708 if (stop_count
== 0)
1709 sig
->flags
= SIGNAL_STOP_STOPPED
;
1710 spin_unlock_irq(&sighand
->siglock
);
1712 else if (thread_group_empty(current
)) {
1714 * Lock must be held through transition to stopped state.
1716 current
->exit_code
= current
->signal
->group_exit_code
= signr
;
1717 set_current_state(TASK_STOPPED
);
1718 sig
->flags
= SIGNAL_STOP_STOPPED
;
1719 spin_unlock_irq(&sighand
->siglock
);
1723 * There is no group stop already in progress.
1724 * We must initiate one now, but that requires
1725 * dropping siglock to get both the tasklist lock
1726 * and siglock again in the proper order. Note that
1727 * this allows an intervening SIGCONT to be posted.
1728 * We need to check for that and bail out if necessary.
1730 struct task_struct
*t
;
1732 spin_unlock_irq(&sighand
->siglock
);
1734 /* signals can be posted during this window */
1736 read_lock(&tasklist_lock
);
1737 spin_lock_irq(&sighand
->siglock
);
1739 if (!likely(sig
->flags
& SIGNAL_STOP_DEQUEUED
)) {
1741 * Another stop or continue happened while we
1742 * didn't have the lock. We can just swallow this
1743 * signal now. If we raced with a SIGCONT, that
1744 * should have just cleared it now. If we raced
1745 * with another processor delivering a stop signal,
1746 * then the SIGCONT that wakes us up should clear it.
1748 read_unlock(&tasklist_lock
);
1752 if (sig
->group_stop_count
== 0) {
1753 sig
->group_exit_code
= signr
;
1755 for (t
= next_thread(current
); t
!= current
;
1758 * Setting state to TASK_STOPPED for a group
1759 * stop is always done with the siglock held,
1760 * so this check has no races.
1762 if (t
->state
< TASK_STOPPED
) {
1764 signal_wake_up(t
, 0);
1766 sig
->group_stop_count
= stop_count
;
1769 /* A race with another thread while unlocked. */
1770 signr
= sig
->group_exit_code
;
1771 stop_count
= --sig
->group_stop_count
;
1774 current
->exit_code
= signr
;
1775 set_current_state(TASK_STOPPED
);
1776 if (stop_count
== 0)
1777 sig
->flags
= SIGNAL_STOP_STOPPED
;
1779 spin_unlock_irq(&sighand
->siglock
);
1780 read_unlock(&tasklist_lock
);
1783 finish_stop(stop_count
);
1788 * Do appropriate magic when group_stop_count > 0.
1789 * We return nonzero if we stopped, after releasing the siglock.
1790 * We return zero if we still hold the siglock and should look
1791 * for another signal without checking group_stop_count again.
1793 static inline int handle_group_stop(void)
1797 if (current
->signal
->group_exit_task
== current
) {
1799 * Group stop is so we can do a core dump,
1800 * We are the initiating thread, so get on with it.
1802 current
->signal
->group_exit_task
= NULL
;
1806 if (current
->signal
->flags
& SIGNAL_GROUP_EXIT
)
1808 * Group stop is so another thread can do a core dump,
1809 * or else we are racing against a death signal.
1810 * Just punt the stop so we can get the next signal.
1815 * There is a group stop in progress. We stop
1816 * without any associated signal being in our queue.
1818 stop_count
= --current
->signal
->group_stop_count
;
1819 if (stop_count
== 0)
1820 current
->signal
->flags
= SIGNAL_STOP_STOPPED
;
1821 current
->exit_code
= current
->signal
->group_exit_code
;
1822 set_current_state(TASK_STOPPED
);
1823 spin_unlock_irq(¤t
->sighand
->siglock
);
1824 finish_stop(stop_count
);
1828 int get_signal_to_deliver(siginfo_t
*info
, struct k_sigaction
*return_ka
,
1829 struct pt_regs
*regs
, void *cookie
)
1831 sigset_t
*mask
= ¤t
->blocked
;
1835 spin_lock_irq(¤t
->sighand
->siglock
);
1837 struct k_sigaction
*ka
;
1839 if (unlikely(current
->signal
->group_stop_count
> 0) &&
1840 handle_group_stop())
1843 signr
= dequeue_signal(current
, mask
, info
);
1846 break; /* will return 0 */
1848 if ((current
->ptrace
& PT_PTRACED
) && signr
!= SIGKILL
) {
1849 ptrace_signal_deliver(regs
, cookie
);
1851 /* Let the debugger run. */
1852 ptrace_stop(signr
, signr
, info
);
1854 /* We're back. Did the debugger cancel the sig? */
1855 signr
= current
->exit_code
;
1859 current
->exit_code
= 0;
1861 /* Update the siginfo structure if the signal has
1862 changed. If the debugger wanted something
1863 specific in the siginfo structure then it should
1864 have updated *info via PTRACE_SETSIGINFO. */
1865 if (signr
!= info
->si_signo
) {
1866 info
->si_signo
= signr
;
1868 info
->si_code
= SI_USER
;
1869 info
->si_pid
= current
->parent
->pid
;
1870 info
->si_uid
= current
->parent
->uid
;
1873 /* If the (new) signal is now blocked, requeue it. */
1874 if (sigismember(¤t
->blocked
, signr
)) {
1875 specific_send_sig_info(signr
, info
, current
);
1880 ka
= ¤t
->sighand
->action
[signr
-1];
1881 if (ka
->sa
.sa_handler
== SIG_IGN
) /* Do nothing. */
1883 if (ka
->sa
.sa_handler
!= SIG_DFL
) {
1884 /* Run the handler. */
1887 if (ka
->sa
.sa_flags
& SA_ONESHOT
)
1888 ka
->sa
.sa_handler
= SIG_DFL
;
1890 break; /* will return non-zero "signr" value */
1894 * Now we are doing the default action for this signal.
1896 if (sig_kernel_ignore(signr
)) /* Default is nothing. */
1899 /* Init gets no signals it doesn't want. */
1900 if (current
->pid
== 1)
1903 if (sig_kernel_stop(signr
)) {
1905 * The default action is to stop all threads in
1906 * the thread group. The job control signals
1907 * do nothing in an orphaned pgrp, but SIGSTOP
1908 * always works. Note that siglock needs to be
1909 * dropped during the call to is_orphaned_pgrp()
1910 * because of lock ordering with tasklist_lock.
1911 * This allows an intervening SIGCONT to be posted.
1912 * We need to check for that and bail out if necessary.
1914 if (signr
!= SIGSTOP
) {
1915 spin_unlock_irq(¤t
->sighand
->siglock
);
1917 /* signals can be posted during this window */
1919 if (is_orphaned_pgrp(process_group(current
)))
1922 spin_lock_irq(¤t
->sighand
->siglock
);
1925 if (likely(do_signal_stop(signr
))) {
1926 /* It released the siglock. */
1931 * We didn't actually stop, due to a race
1932 * with SIGCONT or something like that.
1937 spin_unlock_irq(¤t
->sighand
->siglock
);
1940 * Anything else is fatal, maybe with a core dump.
1942 current
->flags
|= PF_SIGNALED
;
1943 if (sig_kernel_coredump(signr
)) {
1945 * If it was able to dump core, this kills all
1946 * other threads in the group and synchronizes with
1947 * their demise. If we lost the race with another
1948 * thread getting here, it set group_exit_code
1949 * first and our do_group_exit call below will use
1950 * that value and ignore the one we pass it.
1952 do_coredump((long)signr
, signr
, regs
);
1956 * Death signals, no core dump.
1958 do_group_exit(signr
);
1961 spin_unlock_irq(¤t
->sighand
->siglock
);
1967 EXPORT_SYMBOL(recalc_sigpending
);
1968 EXPORT_SYMBOL_GPL(dequeue_signal
);
1969 EXPORT_SYMBOL(flush_signals
);
1970 EXPORT_SYMBOL(force_sig
);
1971 EXPORT_SYMBOL(kill_pg
);
1972 EXPORT_SYMBOL(kill_proc
);
1973 EXPORT_SYMBOL(ptrace_notify
);
1974 EXPORT_SYMBOL(send_sig
);
1975 EXPORT_SYMBOL(send_sig_info
);
1976 EXPORT_SYMBOL(sigprocmask
);
1977 EXPORT_SYMBOL(block_all_signals
);
1978 EXPORT_SYMBOL(unblock_all_signals
);
1982 * System call entry points.
1985 asmlinkage
long sys_restart_syscall(void)
1987 struct restart_block
*restart
= ¤t_thread_info()->restart_block
;
1988 return restart
->fn(restart
);
1991 long do_no_restart_syscall(struct restart_block
*param
)
1997 * We don't need to get the kernel lock - this is all local to this
1998 * particular thread.. (and that's good, because this is _heavily_
1999 * used by various programs)
2003 * This is also useful for kernel threads that want to temporarily
2004 * (or permanently) block certain signals.
2006 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2007 * interface happily blocks "unblockable" signals like SIGKILL
2010 int sigprocmask(int how
, sigset_t
*set
, sigset_t
*oldset
)
2015 spin_lock_irq(¤t
->sighand
->siglock
);
2016 old_block
= current
->blocked
;
2020 sigorsets(¤t
->blocked
, ¤t
->blocked
, set
);
2023 signandsets(¤t
->blocked
, ¤t
->blocked
, set
);
2026 current
->blocked
= *set
;
2031 recalc_sigpending();
2032 spin_unlock_irq(¤t
->sighand
->siglock
);
2034 *oldset
= old_block
;
2039 sys_rt_sigprocmask(int how
, sigset_t __user
*set
, sigset_t __user
*oset
, size_t sigsetsize
)
2041 int error
= -EINVAL
;
2042 sigset_t old_set
, new_set
;
2044 /* XXX: Don't preclude handling different sized sigset_t's. */
2045 if (sigsetsize
!= sizeof(sigset_t
))
2050 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2052 sigdelsetmask(&new_set
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2054 error
= sigprocmask(how
, &new_set
, &old_set
);
2060 spin_lock_irq(¤t
->sighand
->siglock
);
2061 old_set
= current
->blocked
;
2062 spin_unlock_irq(¤t
->sighand
->siglock
);
2066 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2074 long do_sigpending(void __user
*set
, unsigned long sigsetsize
)
2076 long error
= -EINVAL
;
2079 if (sigsetsize
> sizeof(sigset_t
))
2082 spin_lock_irq(¤t
->sighand
->siglock
);
2083 sigorsets(&pending
, ¤t
->pending
.signal
,
2084 ¤t
->signal
->shared_pending
.signal
);
2085 spin_unlock_irq(¤t
->sighand
->siglock
);
2087 /* Outside the lock because only this thread touches it. */
2088 sigandsets(&pending
, ¤t
->blocked
, &pending
);
2091 if (!copy_to_user(set
, &pending
, sigsetsize
))
2099 sys_rt_sigpending(sigset_t __user
*set
, size_t sigsetsize
)
2101 return do_sigpending(set
, sigsetsize
);
2104 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2106 int copy_siginfo_to_user(siginfo_t __user
*to
, siginfo_t
*from
)
2110 if (!access_ok (VERIFY_WRITE
, to
, sizeof(siginfo_t
)))
2112 if (from
->si_code
< 0)
2113 return __copy_to_user(to
, from
, sizeof(siginfo_t
))
2116 * If you change siginfo_t structure, please be sure
2117 * this code is fixed accordingly.
2118 * It should never copy any pad contained in the structure
2119 * to avoid security leaks, but must copy the generic
2120 * 3 ints plus the relevant union member.
2122 err
= __put_user(from
->si_signo
, &to
->si_signo
);
2123 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
2124 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
2125 switch (from
->si_code
& __SI_MASK
) {
2127 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2128 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2131 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
2132 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
2133 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2136 err
|= __put_user(from
->si_band
, &to
->si_band
);
2137 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
2140 err
|= __put_user(from
->si_addr
, &to
->si_addr
);
2141 #ifdef __ARCH_SI_TRAPNO
2142 err
|= __put_user(from
->si_trapno
, &to
->si_trapno
);
2146 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2147 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2148 err
|= __put_user(from
->si_status
, &to
->si_status
);
2149 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
2150 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
2152 case __SI_RT
: /* This is not generated by the kernel as of now. */
2153 case __SI_MESGQ
: /* But this is */
2154 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2155 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2156 err
|= __put_user(from
->si_ptr
, &to
->si_ptr
);
2158 default: /* this is just in case for now ... */
2159 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
2160 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
2169 sys_rt_sigtimedwait(const sigset_t __user
*uthese
,
2170 siginfo_t __user
*uinfo
,
2171 const struct timespec __user
*uts
,
2180 /* XXX: Don't preclude handling different sized sigset_t's. */
2181 if (sigsetsize
!= sizeof(sigset_t
))
2184 if (copy_from_user(&these
, uthese
, sizeof(these
)))
2188 * Invert the set of allowed signals to get those we
2191 sigdelsetmask(&these
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2195 if (copy_from_user(&ts
, uts
, sizeof(ts
)))
2197 if (ts
.tv_nsec
>= 1000000000L || ts
.tv_nsec
< 0
2202 spin_lock_irq(¤t
->sighand
->siglock
);
2203 sig
= dequeue_signal(current
, &these
, &info
);
2205 timeout
= MAX_SCHEDULE_TIMEOUT
;
2207 timeout
= (timespec_to_jiffies(&ts
)
2208 + (ts
.tv_sec
|| ts
.tv_nsec
));
2211 /* None ready -- temporarily unblock those we're
2212 * interested while we are sleeping in so that we'll
2213 * be awakened when they arrive. */
2214 current
->real_blocked
= current
->blocked
;
2215 sigandsets(¤t
->blocked
, ¤t
->blocked
, &these
);
2216 recalc_sigpending();
2217 spin_unlock_irq(¤t
->sighand
->siglock
);
2219 current
->state
= TASK_INTERRUPTIBLE
;
2220 timeout
= schedule_timeout(timeout
);
2222 if (current
->flags
& PF_FREEZE
)
2223 refrigerator(PF_FREEZE
);
2224 spin_lock_irq(¤t
->sighand
->siglock
);
2225 sig
= dequeue_signal(current
, &these
, &info
);
2226 current
->blocked
= current
->real_blocked
;
2227 siginitset(¤t
->real_blocked
, 0);
2228 recalc_sigpending();
2231 spin_unlock_irq(¤t
->sighand
->siglock
);
2236 if (copy_siginfo_to_user(uinfo
, &info
))
2249 sys_kill(int pid
, int sig
)
2251 struct siginfo info
;
2253 info
.si_signo
= sig
;
2255 info
.si_code
= SI_USER
;
2256 info
.si_pid
= current
->tgid
;
2257 info
.si_uid
= current
->uid
;
2259 return kill_something_info(sig
, &info
, pid
);
2263 * sys_tgkill - send signal to one specific thread
2264 * @tgid: the thread group ID of the thread
2265 * @pid: the PID of the thread
2266 * @sig: signal to be sent
2268 * This syscall also checks the tgid and returns -ESRCH even if the PID
2269 * exists but it's not belonging to the target process anymore. This
2270 * method solves the problem of threads exiting and PIDs getting reused.
2272 asmlinkage
long sys_tgkill(int tgid
, int pid
, int sig
)
2274 struct siginfo info
;
2276 struct task_struct
*p
;
2278 /* This is only valid for single tasks */
2279 if (pid
<= 0 || tgid
<= 0)
2282 info
.si_signo
= sig
;
2284 info
.si_code
= SI_TKILL
;
2285 info
.si_pid
= current
->tgid
;
2286 info
.si_uid
= current
->uid
;
2288 read_lock(&tasklist_lock
);
2289 p
= find_task_by_pid(pid
);
2291 if (p
&& (p
->tgid
== tgid
)) {
2292 error
= check_kill_permission(sig
, &info
, p
);
2294 * The null signal is a permissions and process existence
2295 * probe. No signal is actually delivered.
2297 if (!error
&& sig
&& p
->sighand
) {
2298 spin_lock_irq(&p
->sighand
->siglock
);
2299 handle_stop_signal(sig
, p
);
2300 error
= specific_send_sig_info(sig
, &info
, p
);
2301 spin_unlock_irq(&p
->sighand
->siglock
);
2304 read_unlock(&tasklist_lock
);
2309 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2312 sys_tkill(int pid
, int sig
)
2314 struct siginfo info
;
2316 struct task_struct
*p
;
2318 /* This is only valid for single tasks */
2322 info
.si_signo
= sig
;
2324 info
.si_code
= SI_TKILL
;
2325 info
.si_pid
= current
->tgid
;
2326 info
.si_uid
= current
->uid
;
2328 read_lock(&tasklist_lock
);
2329 p
= find_task_by_pid(pid
);
2332 error
= check_kill_permission(sig
, &info
, p
);
2334 * The null signal is a permissions and process existence
2335 * probe. No signal is actually delivered.
2337 if (!error
&& sig
&& p
->sighand
) {
2338 spin_lock_irq(&p
->sighand
->siglock
);
2339 handle_stop_signal(sig
, p
);
2340 error
= specific_send_sig_info(sig
, &info
, p
);
2341 spin_unlock_irq(&p
->sighand
->siglock
);
2344 read_unlock(&tasklist_lock
);
2349 sys_rt_sigqueueinfo(int pid
, int sig
, siginfo_t __user
*uinfo
)
2353 if (copy_from_user(&info
, uinfo
, sizeof(siginfo_t
)))
2356 /* Not even root can pretend to send signals from the kernel.
2357 Nor can they impersonate a kill(), which adds source info. */
2358 if (info
.si_code
>= 0)
2360 info
.si_signo
= sig
;
2362 /* POSIX.1b doesn't mention process groups. */
2363 return kill_proc_info(sig
, &info
, pid
);
2367 do_sigaction(int sig
, const struct k_sigaction
*act
, struct k_sigaction
*oact
)
2369 struct k_sigaction
*k
;
2371 if (sig
< 1 || sig
> _NSIG
|| (act
&& sig_kernel_only(sig
)))
2374 k
= ¤t
->sighand
->action
[sig
-1];
2376 spin_lock_irq(¤t
->sighand
->siglock
);
2377 if (signal_pending(current
)) {
2379 * If there might be a fatal signal pending on multiple
2380 * threads, make sure we take it before changing the action.
2382 spin_unlock_irq(¤t
->sighand
->siglock
);
2383 return -ERESTARTNOINTR
;
2392 * "Setting a signal action to SIG_IGN for a signal that is
2393 * pending shall cause the pending signal to be discarded,
2394 * whether or not it is blocked."
2396 * "Setting a signal action to SIG_DFL for a signal that is
2397 * pending and whose default action is to ignore the signal
2398 * (for example, SIGCHLD), shall cause the pending signal to
2399 * be discarded, whether or not it is blocked"
2401 if (act
->sa
.sa_handler
== SIG_IGN
||
2402 (act
->sa
.sa_handler
== SIG_DFL
&&
2403 sig_kernel_ignore(sig
))) {
2405 * This is a fairly rare case, so we only take the
2406 * tasklist_lock once we're sure we'll need it.
2407 * Now we must do this little unlock and relock
2408 * dance to maintain the lock hierarchy.
2410 struct task_struct
*t
= current
;
2411 spin_unlock_irq(&t
->sighand
->siglock
);
2412 read_lock(&tasklist_lock
);
2413 spin_lock_irq(&t
->sighand
->siglock
);
2415 sigdelsetmask(&k
->sa
.sa_mask
,
2416 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2417 rm_from_queue(sigmask(sig
), &t
->signal
->shared_pending
);
2419 rm_from_queue(sigmask(sig
), &t
->pending
);
2420 recalc_sigpending_tsk(t
);
2422 } while (t
!= current
);
2423 spin_unlock_irq(¤t
->sighand
->siglock
);
2424 read_unlock(&tasklist_lock
);
2429 sigdelsetmask(&k
->sa
.sa_mask
,
2430 sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2433 spin_unlock_irq(¤t
->sighand
->siglock
);
2438 do_sigaltstack (const stack_t __user
*uss
, stack_t __user
*uoss
, unsigned long sp
)
2444 oss
.ss_sp
= (void __user
*) current
->sas_ss_sp
;
2445 oss
.ss_size
= current
->sas_ss_size
;
2446 oss
.ss_flags
= sas_ss_flags(sp
);
2455 if (!access_ok(VERIFY_READ
, uss
, sizeof(*uss
))
2456 || __get_user(ss_sp
, &uss
->ss_sp
)
2457 || __get_user(ss_flags
, &uss
->ss_flags
)
2458 || __get_user(ss_size
, &uss
->ss_size
))
2462 if (on_sig_stack(sp
))
2468 * Note - this code used to test ss_flags incorrectly
2469 * old code may have been written using ss_flags==0
2470 * to mean ss_flags==SS_ONSTACK (as this was the only
2471 * way that worked) - this fix preserves that older
2474 if (ss_flags
!= SS_DISABLE
&& ss_flags
!= SS_ONSTACK
&& ss_flags
!= 0)
2477 if (ss_flags
== SS_DISABLE
) {
2482 if (ss_size
< MINSIGSTKSZ
)
2486 current
->sas_ss_sp
= (unsigned long) ss_sp
;
2487 current
->sas_ss_size
= ss_size
;
2492 if (copy_to_user(uoss
, &oss
, sizeof(oss
)))
2501 #ifdef __ARCH_WANT_SYS_SIGPENDING
2504 sys_sigpending(old_sigset_t __user
*set
)
2506 return do_sigpending(set
, sizeof(*set
));
2511 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2512 /* Some platforms have their own version with special arguments others
2513 support only sys_rt_sigprocmask. */
2516 sys_sigprocmask(int how
, old_sigset_t __user
*set
, old_sigset_t __user
*oset
)
2519 old_sigset_t old_set
, new_set
;
2523 if (copy_from_user(&new_set
, set
, sizeof(*set
)))
2525 new_set
&= ~(sigmask(SIGKILL
) | sigmask(SIGSTOP
));
2527 spin_lock_irq(¤t
->sighand
->siglock
);
2528 old_set
= current
->blocked
.sig
[0];
2536 sigaddsetmask(¤t
->blocked
, new_set
);
2539 sigdelsetmask(¤t
->blocked
, new_set
);
2542 current
->blocked
.sig
[0] = new_set
;
2546 recalc_sigpending();
2547 spin_unlock_irq(¤t
->sighand
->siglock
);
2553 old_set
= current
->blocked
.sig
[0];
2556 if (copy_to_user(oset
, &old_set
, sizeof(*oset
)))
2563 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2565 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2567 sys_rt_sigaction(int sig
,
2568 const struct sigaction __user
*act
,
2569 struct sigaction __user
*oact
,
2572 struct k_sigaction new_sa
, old_sa
;
2575 /* XXX: Don't preclude handling different sized sigset_t's. */
2576 if (sigsetsize
!= sizeof(sigset_t
))
2580 if (copy_from_user(&new_sa
.sa
, act
, sizeof(new_sa
.sa
)))
2584 ret
= do_sigaction(sig
, act
? &new_sa
: NULL
, oact
? &old_sa
: NULL
);
2587 if (copy_to_user(oact
, &old_sa
.sa
, sizeof(old_sa
.sa
)))
2593 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2595 #ifdef __ARCH_WANT_SYS_SGETMASK
2598 * For backwards compatibility. Functionality superseded by sigprocmask.
2604 return current
->blocked
.sig
[0];
2608 sys_ssetmask(int newmask
)
2612 spin_lock_irq(¤t
->sighand
->siglock
);
2613 old
= current
->blocked
.sig
[0];
2615 siginitset(¤t
->blocked
, newmask
& ~(sigmask(SIGKILL
)|
2617 recalc_sigpending();
2618 spin_unlock_irq(¤t
->sighand
->siglock
);
2622 #endif /* __ARCH_WANT_SGETMASK */
2624 #ifdef __ARCH_WANT_SYS_SIGNAL
2626 * For backwards compatibility. Functionality superseded by sigaction.
2628 asmlinkage
unsigned long
2629 sys_signal(int sig
, __sighandler_t handler
)
2631 struct k_sigaction new_sa
, old_sa
;
2634 new_sa
.sa
.sa_handler
= handler
;
2635 new_sa
.sa
.sa_flags
= SA_ONESHOT
| SA_NOMASK
;
2637 ret
= do_sigaction(sig
, &new_sa
, &old_sa
);
2639 return ret
? ret
: (unsigned long)old_sa
.sa
.sa_handler
;
2641 #endif /* __ARCH_WANT_SYS_SIGNAL */
2643 #ifdef __ARCH_WANT_SYS_PAUSE
2648 current
->state
= TASK_INTERRUPTIBLE
;
2650 return -ERESTARTNOHAND
;
2655 void __init
signals_init(void)
2658 kmem_cache_create("sigqueue",
2659 sizeof(struct sigqueue
),
2660 __alignof__(struct sigqueue
),
2661 SLAB_PANIC
, NULL
, NULL
);