btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / signal.cpp
blob4191c4f4c35feeedfc06700f0e40fb0d81db6edb
1 /*
2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
5 * Copyright 2002, Angelo Mottola, a.mottola@libero.it.
7 * Distributed under the terms of the MIT License.
8 */
11 /*! POSIX signals handling routines */
14 #include <ksignal.h>
16 #include <errno.h>
17 #include <stddef.h>
18 #include <string.h>
20 #include <OS.h>
21 #include <KernelExport.h>
23 #include <cpu.h>
24 #include <core_dump.h>
25 #include <debug.h>
26 #include <kernel.h>
27 #include <kscheduler.h>
28 #include <sem.h>
29 #include <syscall_restart.h>
30 #include <syscall_utils.h>
31 #include <team.h>
32 #include <thread.h>
33 #include <tracing.h>
34 #include <user_debugger.h>
35 #include <user_thread.h>
36 #include <util/AutoLock.h>
39 //#define TRACE_SIGNAL
40 #ifdef TRACE_SIGNAL
41 # define TRACE(x) dprintf x
42 #else
43 # define TRACE(x) ;
44 #endif
47 #define BLOCKABLE_SIGNALS \
48 (~(KILL_SIGNALS | SIGNAL_TO_MASK(SIGSTOP) \
49 | SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD) \
50 | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
51 | SIGNAL_TO_MASK(SIGNAL_CANCEL_THREAD)))
52 #define STOP_SIGNALS \
53 (SIGNAL_TO_MASK(SIGSTOP) | SIGNAL_TO_MASK(SIGTSTP) \
54 | SIGNAL_TO_MASK(SIGTTIN) | SIGNAL_TO_MASK(SIGTTOU))
55 #define CONTINUE_SIGNALS \
56 (SIGNAL_TO_MASK(SIGCONT) | SIGNAL_TO_MASK(SIGNAL_CONTINUE_THREAD) \
57 | SIGNAL_TO_MASK(SIGNAL_DEBUG_THREAD))
58 #define DEFAULT_IGNORE_SIGNALS \
59 (SIGNAL_TO_MASK(SIGCHLD) | SIGNAL_TO_MASK(SIGWINCH) \
60 | SIGNAL_TO_MASK(SIGCONT) \
61 | SIGNAL_RANGE_TO_MASK(SIGNAL_REALTIME_MIN, SIGNAL_REALTIME_MAX))
62 #define NON_DEFERRABLE_SIGNALS \
63 (KILL_SIGNALS \
64 | SIGNAL_TO_MASK(SIGILL) \
65 | SIGNAL_TO_MASK(SIGFPE) \
66 | SIGNAL_TO_MASK(SIGSEGV))
69 static const struct {
70 const char* name;
71 int32 priority;
72 } kSignalInfos[__MAX_SIGNO + 1] = {
73 {"NONE", -1},
74 {"HUP", 0},
75 {"INT", 0},
76 {"QUIT", 0},
77 {"ILL", 0},
78 {"CHLD", 0},
79 {"ABRT", 0},
80 {"PIPE", 0},
81 {"FPE", 0},
82 {"KILL", 100},
83 {"STOP", 0},
84 {"SEGV", 0},
85 {"CONT", 0},
86 {"TSTP", 0},
87 {"ALRM", 0},
88 {"TERM", 0},
89 {"TTIN", 0},
90 {"TTOU", 0},
91 {"USR1", 0},
92 {"USR2", 0},
93 {"WINCH", 0},
94 {"KILLTHR", 100},
95 {"TRAP", 0},
96 {"POLL", 0},
97 {"PROF", 0},
98 {"SYS", 0},
99 {"URG", 0},
100 {"VTALRM", 0},
101 {"XCPU", 0},
102 {"XFSZ", 0},
103 {"SIGBUS", 0},
104 {"SIGRESERVED1", 0},
105 {"SIGRESERVED2", 0},
106 {"SIGRT1", 8},
107 {"SIGRT2", 7},
108 {"SIGRT3", 6},
109 {"SIGRT4", 5},
110 {"SIGRT5", 4},
111 {"SIGRT6", 3},
112 {"SIGRT7", 2},
113 {"SIGRT8", 1},
114 {"invalid 41", 0},
115 {"invalid 42", 0},
116 {"invalid 43", 0},
117 {"invalid 44", 0},
118 {"invalid 45", 0},
119 {"invalid 46", 0},
120 {"invalid 47", 0},
121 {"invalid 48", 0},
122 {"invalid 49", 0},
123 {"invalid 50", 0},
124 {"invalid 51", 0},
125 {"invalid 52", 0},
126 {"invalid 53", 0},
127 {"invalid 54", 0},
128 {"invalid 55", 0},
129 {"invalid 56", 0},
130 {"invalid 57", 0},
131 {"invalid 58", 0},
132 {"invalid 59", 0},
133 {"invalid 60", 0},
134 {"invalid 61", 0},
135 {"invalid 62", 0},
136 {"CANCEL_THREAD", 0},
137 {"CONTINUE_THREAD", 0} // priority must be <= that of SIGSTOP
141 static inline const char*
142 signal_name(uint32 number)
144 return number <= __MAX_SIGNO ? kSignalInfos[number].name : "invalid";
148 // #pragma mark - SignalHandledCaller
151 struct SignalHandledCaller {
152 SignalHandledCaller(Signal* signal)
154 fSignal(signal)
158 ~SignalHandledCaller()
160 Done();
163 void Done()
165 if (fSignal != NULL) {
166 fSignal->Handled();
167 fSignal = NULL;
171 private:
172 Signal* fSignal;
176 // #pragma mark - QueuedSignalsCounter
179 /*! Creates a counter with the given limit.
180 The limit defines the maximum the counter may reach. Since the
181 BReferenceable's reference count is used, it is assumed that the owning
182 team holds a reference and the reference count is one greater than the
183 counter value.
184 \param limit The maximum allowed value the counter may have. When
185 \code < 0 \endcode, the value is not limited.
187 QueuedSignalsCounter::QueuedSignalsCounter(int32 limit)
189 fLimit(limit)
194 /*! Increments the counter, if the limit allows that.
195 \return \c true, if incrementing the counter succeeded, \c false otherwise.
197 bool
198 QueuedSignalsCounter::Increment()
200 // no limit => no problem
201 if (fLimit < 0) {
202 AcquireReference();
203 return true;
206 // Increment the reference count manually, so we can check atomically. We
207 // compare the old value > fLimit, assuming that our (primary) owner has a
208 // reference, we don't want to count.
209 if (atomic_add(&fReferenceCount, 1) > fLimit) {
210 ReleaseReference();
211 return false;
214 return true;
218 // #pragma mark - Signal
221 Signal::Signal()
223 fCounter(NULL),
224 fPending(false)
229 Signal::Signal(const Signal& other)
231 fCounter(NULL),
232 fNumber(other.fNumber),
233 fSignalCode(other.fSignalCode),
234 fErrorCode(other.fErrorCode),
235 fSendingProcess(other.fSendingProcess),
236 fSendingUser(other.fSendingUser),
237 fStatus(other.fStatus),
238 fPollBand(other.fPollBand),
239 fAddress(other.fAddress),
240 fUserValue(other.fUserValue),
241 fPending(false)
246 Signal::Signal(uint32 number, int32 signalCode, int32 errorCode,
247 pid_t sendingProcess)
249 fCounter(NULL),
250 fNumber(number),
251 fSignalCode(signalCode),
252 fErrorCode(errorCode),
253 fSendingProcess(sendingProcess),
254 fSendingUser(getuid()),
255 fStatus(0),
256 fPollBand(0),
257 fAddress(NULL),
258 fPending(false)
260 fUserValue.sival_ptr = NULL;
264 Signal::~Signal()
266 if (fCounter != NULL)
267 fCounter->ReleaseReference();
271 /*! Creates a queuable clone of the given signal.
272 Also enforces the current team's signal queuing limit.
274 \param signal The signal to clone.
275 \param queuingRequired If \c true, the function will return an error code
276 when creating the clone fails for any reason. Otherwise, the function
277 will set \a _signalToQueue to \c NULL, but still return \c B_OK.
278 \param _signalToQueue Return parameter. Set to the clone of the signal.
279 \return When \c queuingRequired is \c false, always \c B_OK. Otherwise
280 \c B_OK, when creating the signal clone succeeds, another error code,
281 when it fails.
283 /*static*/ status_t
284 Signal::CreateQueuable(const Signal& signal, bool queuingRequired,
285 Signal*& _signalToQueue)
287 _signalToQueue = NULL;
289 // If interrupts are disabled, we can't allocate a signal.
290 if (!are_interrupts_enabled())
291 return queuingRequired ? B_BAD_VALUE : B_OK;
293 // increment the queued signals counter
294 QueuedSignalsCounter* counter
295 = thread_get_current_thread()->team->QueuedSignalsCounter();
296 if (!counter->Increment())
297 return queuingRequired ? EAGAIN : B_OK;
299 // allocate the signal
300 Signal* signalToQueue = new(std::nothrow) Signal(signal);
301 if (signalToQueue == NULL) {
302 counter->Decrement();
303 return queuingRequired ? B_NO_MEMORY : B_OK;
306 signalToQueue->fCounter = counter;
308 _signalToQueue = signalToQueue;
309 return B_OK;
312 void
313 Signal::SetTo(uint32 number)
315 Team* team = thread_get_current_thread()->team;
317 fNumber = number;
318 fSignalCode = SI_USER;
319 fErrorCode = 0;
320 fSendingProcess = team->id;
321 fSendingUser = team->effective_uid;
322 fStatus = 0;
323 fPollBand = 0;
324 fAddress = NULL;
325 fUserValue.sival_ptr = NULL;
329 int32
330 Signal::Priority() const
332 return kSignalInfos[fNumber].priority;
336 void
337 Signal::Handled()
339 ReleaseReference();
343 void
344 Signal::LastReferenceReleased()
346 if (are_interrupts_enabled())
347 delete this;
348 else
349 deferred_delete(this);
353 // #pragma mark - PendingSignals
356 PendingSignals::PendingSignals()
358 fQueuedSignalsMask(0),
359 fUnqueuedSignalsMask(0)
364 PendingSignals::~PendingSignals()
366 Clear();
370 /*! Of the signals in \a nonBlocked returns the priority of that with the
371 highest priority.
372 \param nonBlocked The mask with the non-blocked signals.
373 \return The priority of the highest priority non-blocked signal, or, if all
374 signals are blocked, \c -1.
376 int32
377 PendingSignals::HighestSignalPriority(sigset_t nonBlocked) const
379 Signal* queuedSignal;
380 int32 unqueuedSignal;
381 return _GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal);
385 void
386 PendingSignals::Clear()
388 // release references of all queued signals
389 while (Signal* signal = fQueuedSignals.RemoveHead())
390 signal->Handled();
392 fQueuedSignalsMask = 0;
393 fUnqueuedSignalsMask = 0;
397 /*! Adds a signal.
398 Takes over the reference to the signal from the caller.
400 void
401 PendingSignals::AddSignal(Signal* signal)
403 // queue according to priority
404 int32 priority = signal->Priority();
405 Signal* otherSignal = NULL;
406 for (SignalList::Iterator it = fQueuedSignals.GetIterator();
407 (otherSignal = it.Next()) != NULL;) {
408 if (priority > otherSignal->Priority())
409 break;
412 fQueuedSignals.InsertBefore(otherSignal, signal);
413 signal->SetPending(true);
415 fQueuedSignalsMask |= SIGNAL_TO_MASK(signal->Number());
419 void
420 PendingSignals::RemoveSignal(Signal* signal)
422 signal->SetPending(false);
423 fQueuedSignals.Remove(signal);
424 _UpdateQueuedSignalMask();
428 void
429 PendingSignals::RemoveSignals(sigset_t mask)
431 // remove from queued signals
432 if ((fQueuedSignalsMask & mask) != 0) {
433 for (SignalList::Iterator it = fQueuedSignals.GetIterator();
434 Signal* signal = it.Next();) {
435 // remove signal, if in mask
436 if ((SIGNAL_TO_MASK(signal->Number()) & mask) != 0) {
437 it.Remove();
438 signal->SetPending(false);
439 signal->Handled();
443 fQueuedSignalsMask &= ~mask;
446 // remove from unqueued signals
447 fUnqueuedSignalsMask &= ~mask;
451 /*! Removes and returns a signal in \a nonBlocked that has the highest priority.
452 The caller gets a reference to the returned signal, if any.
453 \param nonBlocked The mask of non-blocked signals.
454 \param buffer If the signal is not queued this buffer is returned. In this
455 case the method acquires a reference to \a buffer, so that the caller
456 gets a reference also in this case.
457 \return The removed signal or \c NULL, if all signals are blocked.
459 Signal*
460 PendingSignals::DequeueSignal(sigset_t nonBlocked, Signal& buffer)
462 // find the signal with the highest priority
463 Signal* queuedSignal;
464 int32 unqueuedSignal;
465 if (_GetHighestPrioritySignal(nonBlocked, queuedSignal, unqueuedSignal) < 0)
466 return NULL;
468 // if it is a queued signal, dequeue it
469 if (queuedSignal != NULL) {
470 fQueuedSignals.Remove(queuedSignal);
471 queuedSignal->SetPending(false);
472 _UpdateQueuedSignalMask();
473 return queuedSignal;
476 // it is unqueued -- remove from mask
477 fUnqueuedSignalsMask &= ~SIGNAL_TO_MASK(unqueuedSignal);
479 // init buffer
480 buffer.SetTo(unqueuedSignal);
481 buffer.AcquireReference();
482 return &buffer;
486 /*! Of the signals not it \a blocked returns the priority of that with the
487 highest priority.
488 \param blocked The mask with the non-blocked signals.
489 \param _queuedSignal If the found signal is a queued signal, the variable
490 will be set to that signal, otherwise to \c NULL.
491 \param _unqueuedSignal If the found signal is an unqueued signal, the
492 variable is set to that signal's number, otherwise to \c -1.
493 \return The priority of the highest priority non-blocked signal, or, if all
494 signals are blocked, \c -1.
496 int32
497 PendingSignals::_GetHighestPrioritySignal(sigset_t nonBlocked,
498 Signal*& _queuedSignal, int32& _unqueuedSignal) const
500 // check queued signals
501 Signal* queuedSignal = NULL;
502 int32 queuedPriority = -1;
504 if ((fQueuedSignalsMask & nonBlocked) != 0) {
505 for (SignalList::ConstIterator it = fQueuedSignals.GetIterator();
506 Signal* signal = it.Next();) {
507 if ((SIGNAL_TO_MASK(signal->Number()) & nonBlocked) != 0) {
508 queuedPriority = signal->Priority();
509 queuedSignal = signal;
510 break;
515 // check unqueued signals
516 int32 unqueuedSignal = -1;
517 int32 unqueuedPriority = -1;
519 sigset_t unqueuedSignals = fUnqueuedSignalsMask & nonBlocked;
520 if (unqueuedSignals != 0) {
521 int32 signal = 1;
522 while (unqueuedSignals != 0) {
523 sigset_t mask = SIGNAL_TO_MASK(signal);
524 if ((unqueuedSignals & mask) != 0) {
525 int32 priority = kSignalInfos[signal].priority;
526 if (priority > unqueuedPriority) {
527 unqueuedSignal = signal;
528 unqueuedPriority = priority;
530 unqueuedSignals &= ~mask;
533 signal++;
537 // Return found queued or unqueued signal, whichever has the higher
538 // priority.
539 if (queuedPriority >= unqueuedPriority) {
540 _queuedSignal = queuedSignal;
541 _unqueuedSignal = -1;
542 return queuedPriority;
545 _queuedSignal = NULL;
546 _unqueuedSignal = unqueuedSignal;
547 return unqueuedPriority;
551 void
552 PendingSignals::_UpdateQueuedSignalMask()
554 sigset_t mask = 0;
555 for (SignalList::Iterator it = fQueuedSignals.GetIterator();
556 Signal* signal = it.Next();) {
557 mask |= SIGNAL_TO_MASK(signal->Number());
560 fQueuedSignalsMask = mask;
564 // #pragma mark - signal tracing
567 #if SIGNAL_TRACING
569 namespace SignalTracing {
572 class HandleSignal : public AbstractTraceEntry {
573 public:
574 HandleSignal(uint32 signal)
576 fSignal(signal)
578 Initialized();
581 virtual void AddDump(TraceOutput& out)
583 out.Print("signal handle: %" B_PRIu32 " (%s)" , fSignal,
584 signal_name(fSignal));
587 private:
588 uint32 fSignal;
592 class ExecuteSignalHandler : public AbstractTraceEntry {
593 public:
594 ExecuteSignalHandler(uint32 signal, struct sigaction* handler)
596 fSignal(signal),
597 fHandler((void*)handler->sa_handler)
599 Initialized();
602 virtual void AddDump(TraceOutput& out)
604 out.Print("signal exec handler: signal: %" B_PRIu32 " (%s), "
605 "handler: %p", fSignal, signal_name(fSignal), fHandler);
608 private:
609 uint32 fSignal;
610 void* fHandler;
614 class SendSignal : public AbstractTraceEntry {
615 public:
616 SendSignal(pid_t target, uint32 signal, uint32 flags)
618 fTarget(target),
619 fSignal(signal),
620 fFlags(flags)
622 Initialized();
625 virtual void AddDump(TraceOutput& out)
627 out.Print("signal send: target: %" B_PRId32 ", signal: %" B_PRIu32
628 " (%s), flags: %#" B_PRIx32, fTarget, fSignal,
629 signal_name(fSignal), fFlags);
632 private:
633 pid_t fTarget;
634 uint32 fSignal;
635 uint32 fFlags;
639 class SigAction : public AbstractTraceEntry {
640 public:
641 SigAction(uint32 signal, const struct sigaction* act)
643 fSignal(signal),
644 fAction(*act)
646 Initialized();
649 virtual void AddDump(TraceOutput& out)
651 out.Print("signal action: signal: %" B_PRIu32 " (%s), "
652 "action: {handler: %p, flags: %#x, mask: %#" B_PRIx64 "}",
653 fSignal, signal_name(fSignal), fAction.sa_handler,
654 fAction.sa_flags, (uint64)fAction.sa_mask);
657 private:
658 uint32 fSignal;
659 struct sigaction fAction;
663 class SigProcMask : public AbstractTraceEntry {
664 public:
665 SigProcMask(int how, sigset_t mask)
667 fHow(how),
668 fMask(mask),
669 fOldMask(thread_get_current_thread()->sig_block_mask)
671 Initialized();
674 virtual void AddDump(TraceOutput& out)
676 const char* how = "invalid";
677 switch (fHow) {
678 case SIG_BLOCK:
679 how = "block";
680 break;
681 case SIG_UNBLOCK:
682 how = "unblock";
683 break;
684 case SIG_SETMASK:
685 how = "set";
686 break;
689 out.Print("signal proc mask: %s 0x%llx, old mask: 0x%llx", how,
690 (long long)fMask, (long long)fOldMask);
693 private:
694 int fHow;
695 sigset_t fMask;
696 sigset_t fOldMask;
700 class SigSuspend : public AbstractTraceEntry {
701 public:
702 SigSuspend(sigset_t mask)
704 fMask(mask),
705 fOldMask(thread_get_current_thread()->sig_block_mask)
707 Initialized();
710 virtual void AddDump(TraceOutput& out)
712 out.Print("signal suspend: %#llx, old mask: %#llx",
713 (long long)fMask, (long long)fOldMask);
716 private:
717 sigset_t fMask;
718 sigset_t fOldMask;
722 class SigSuspendDone : public AbstractTraceEntry {
723 public:
724 SigSuspendDone()
726 fSignals(thread_get_current_thread()->ThreadPendingSignals())
728 Initialized();
731 virtual void AddDump(TraceOutput& out)
733 out.Print("signal suspend done: %#" B_PRIx32, fSignals);
736 private:
737 uint32 fSignals;
740 } // namespace SignalTracing
742 # define T(x) new(std::nothrow) SignalTracing::x
744 #else
745 # define T(x)
746 #endif // SIGNAL_TRACING
749 // #pragma mark -
752 /*! Updates the given thread's Thread::flags field according to what signals are
753 pending.
754 The caller must hold \c team->signal_lock.
756 static void
757 update_thread_signals_flag(Thread* thread)
759 sigset_t mask = ~thread->sig_block_mask;
760 if ((thread->AllPendingSignals() & mask) != 0)
761 atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
762 else
763 atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
767 /*! Updates the current thread's Thread::flags field according to what signals
768 are pending.
769 The caller must hold \c team->signal_lock.
771 static void
772 update_current_thread_signals_flag()
774 update_thread_signals_flag(thread_get_current_thread());
778 /*! Updates all of the given team's threads' Thread::flags fields according to
779 what signals are pending.
780 The caller must hold \c signal_lock.
782 static void
783 update_team_threads_signal_flag(Team* team)
785 for (Thread* thread = team->thread_list; thread != NULL;
786 thread = thread->team_next) {
787 update_thread_signals_flag(thread);
792 /*! Notifies the user debugger about a signal to be handled.
794 The caller must not hold any locks.
796 \param thread The current thread.
797 \param signal The signal to be handled.
798 \param handler The installed signal handler for the signal.
799 \param deadly Indicates whether the signal is deadly.
800 \return \c true, if the signal shall be handled, \c false, if it shall be
801 ignored.
803 static bool
804 notify_debugger(Thread* thread, Signal* signal, struct sigaction& handler,
805 bool deadly)
807 uint64 signalMask = SIGNAL_TO_MASK(signal->Number());
809 // first check the ignore signal masks the debugger specified for the thread
810 InterruptsSpinLocker threadDebugInfoLocker(thread->debug_info.lock);
812 if ((thread->debug_info.ignore_signals_once & signalMask) != 0) {
813 thread->debug_info.ignore_signals_once &= ~signalMask;
814 return true;
817 if ((thread->debug_info.ignore_signals & signalMask) != 0)
818 return true;
820 threadDebugInfoLocker.Unlock();
822 // deliver the event
823 return user_debug_handle_signal(signal->Number(), &handler, deadly);
827 /*! Removes and returns a signal with the highest priority in \a nonBlocked that
828 is pending in the given thread or its team.
829 After dequeuing the signal the Thread::flags field of the affected threads
830 are updated.
831 The caller gets a reference to the returned signal, if any.
832 The caller must hold \c team->signal_lock.
833 \param thread The thread.
834 \param nonBlocked The mask of non-blocked signals.
835 \param buffer If the signal is not queued this buffer is returned. In this
836 case the method acquires a reference to \a buffer, so that the caller
837 gets a reference also in this case.
838 \return The removed signal or \c NULL, if all signals are blocked.
840 static Signal*
841 dequeue_thread_or_team_signal(Thread* thread, sigset_t nonBlocked,
842 Signal& buffer)
844 Team* team = thread->team;
845 Signal* signal;
846 if (team->HighestPendingSignalPriority(nonBlocked)
847 > thread->HighestPendingSignalPriority(nonBlocked)) {
848 signal = team->DequeuePendingSignal(nonBlocked, buffer);
849 update_team_threads_signal_flag(team);
850 } else {
851 signal = thread->DequeuePendingSignal(nonBlocked, buffer);
852 update_thread_signals_flag(thread);
855 return signal;
859 static status_t
860 setup_signal_frame(Thread* thread, struct sigaction* action, Signal* signal,
861 sigset_t signalMask)
863 // prepare the data, we need to copy onto the user stack
864 signal_frame_data frameData;
866 // signal info
867 frameData.info.si_signo = signal->Number();
868 frameData.info.si_code = signal->SignalCode();
869 frameData.info.si_errno = signal->ErrorCode();
870 frameData.info.si_pid = signal->SendingProcess();
871 frameData.info.si_uid = signal->SendingUser();
872 frameData.info.si_addr = signal->Address();
873 frameData.info.si_status = signal->Status();
874 frameData.info.si_band = signal->PollBand();
875 frameData.info.si_value = signal->UserValue();
877 // context
878 frameData.context.uc_link = thread->user_signal_context;
879 frameData.context.uc_sigmask = signalMask;
880 // uc_stack and uc_mcontext are filled in by the architecture specific code.
882 // user data
883 frameData.user_data = action->sa_userdata;
885 // handler function
886 frameData.siginfo_handler = (action->sa_flags & SA_SIGINFO) != 0;
887 frameData.handler = frameData.siginfo_handler
888 ? (void*)action->sa_sigaction : (void*)action->sa_handler;
890 // thread flags -- save the and clear the thread's syscall restart related
891 // flags
892 frameData.thread_flags = atomic_and(&thread->flags,
893 ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
895 // syscall restart related fields
896 memcpy(frameData.syscall_restart_parameters,
897 thread->syscall_restart.parameters,
898 sizeof(frameData.syscall_restart_parameters));
900 // commpage address
901 frameData.commpage_address = thread->team->commpage_address;
903 // syscall_restart_return_value is filled in by the architecture specific
904 // code.
906 return arch_setup_signal_frame(thread, action, &frameData);
910 /*! Actually handles pending signals -- i.e. the thread will exit, a custom
911 signal handler is prepared, or whatever the signal demands.
912 The function will not return, when a deadly signal is encountered. The
913 function will suspend the thread indefinitely, when a stop signal is
914 encountered.
915 Interrupts must be enabled.
916 \param thread The current thread.
918 void
919 handle_signals(Thread* thread)
921 Team* team = thread->team;
923 TeamLocker teamLocker(team);
924 InterruptsSpinLocker locker(thread->team->signal_lock);
926 // If userland requested to defer signals, we check now, if this is
927 // possible.
928 sigset_t nonBlockedMask = ~thread->sig_block_mask;
929 sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
931 if (thread->user_thread->defer_signals > 0
932 && (signalMask & NON_DEFERRABLE_SIGNALS) == 0
933 && thread->sigsuspend_original_unblocked_mask == 0) {
934 thread->user_thread->pending_signals = signalMask;
935 return;
938 thread->user_thread->pending_signals = 0;
940 // determine syscall restart behavior
941 uint32 restartFlags = atomic_and(&thread->flags,
942 ~THREAD_FLAGS_DONT_RESTART_SYSCALL);
943 bool alwaysRestart
944 = (restartFlags & THREAD_FLAGS_ALWAYS_RESTART_SYSCALL) != 0;
945 bool restart = alwaysRestart
946 || (restartFlags & THREAD_FLAGS_DONT_RESTART_SYSCALL) == 0;
948 // Loop until we've handled all signals.
949 bool initialIteration = true;
950 while (true) {
951 if (initialIteration) {
952 initialIteration = false;
953 } else {
954 teamLocker.Lock();
955 locker.Lock();
957 signalMask = thread->AllPendingSignals() & nonBlockedMask;
960 // Unless SIGKILL[THR] are pending, check, if the thread shall stop for
961 // a core dump or for debugging.
962 if ((signalMask & KILL_SIGNALS) == 0) {
963 if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP)
964 != 0) {
965 locker.Unlock();
966 teamLocker.Unlock();
968 core_dump_trap_thread();
969 continue;
972 if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP)
973 != 0) {
974 locker.Unlock();
975 teamLocker.Unlock();
977 user_debug_stop_thread();
978 continue;
982 // We're done, if there aren't any pending signals anymore.
983 if ((signalMask & nonBlockedMask) == 0)
984 break;
986 // get pending non-blocked thread or team signal with the highest
987 // priority
988 Signal stackSignal;
989 Signal* signal = dequeue_thread_or_team_signal(thread, nonBlockedMask,
990 stackSignal);
991 ASSERT(signal != NULL);
992 SignalHandledCaller signalHandledCaller(signal);
994 locker.Unlock();
996 // get the action for the signal
997 struct sigaction handler;
998 if (signal->Number() <= MAX_SIGNAL_NUMBER) {
999 handler = team->SignalActionFor(signal->Number());
1000 } else {
1001 handler.sa_handler = SIG_DFL;
1002 handler.sa_flags = 0;
1005 if ((handler.sa_flags & SA_ONESHOT) != 0
1006 && handler.sa_handler != SIG_IGN && handler.sa_handler != SIG_DFL) {
1007 team->SignalActionFor(signal->Number()).sa_handler = SIG_DFL;
1010 T(HandleSignal(signal->Number()));
1012 teamLocker.Unlock();
1014 // debug the signal, if a debugger is installed and the signal debugging
1015 // flag is set
1016 bool debugSignal = (~atomic_get(&team->debug_info.flags)
1017 & (B_TEAM_DEBUG_DEBUGGER_INSTALLED | B_TEAM_DEBUG_SIGNALS))
1018 == 0;
1020 // handle the signal
1021 TRACE(("Thread %" B_PRId32 " received signal %s\n", thread->id,
1022 kSignalInfos[signal->Number()].name));
1024 if (handler.sa_handler == SIG_IGN) {
1025 // signal is to be ignored
1026 // TODO: apply zombie cleaning on SIGCHLD
1028 // notify the debugger
1029 if (debugSignal)
1030 notify_debugger(thread, signal, handler, false);
1031 continue;
1032 } else if (handler.sa_handler == SIG_DFL) {
1033 // default signal behaviour
1035 // realtime signals are ignored by default
1036 if (signal->Number() >= SIGNAL_REALTIME_MIN
1037 && signal->Number() <= SIGNAL_REALTIME_MAX) {
1038 // notify the debugger
1039 if (debugSignal)
1040 notify_debugger(thread, signal, handler, false);
1041 continue;
1044 bool killTeam = false;
1045 switch (signal->Number()) {
1046 case SIGCHLD:
1047 case SIGWINCH:
1048 case SIGURG:
1049 // notify the debugger
1050 if (debugSignal)
1051 notify_debugger(thread, signal, handler, false);
1052 continue;
1054 case SIGNAL_DEBUG_THREAD:
1055 // ignore -- used together with B_THREAD_DEBUG_STOP, which
1056 // is handled above
1057 continue;
1059 case SIGNAL_CANCEL_THREAD:
1060 // set up the signal handler
1061 handler.sa_handler = thread->cancel_function;
1062 handler.sa_flags = 0;
1063 handler.sa_mask = 0;
1064 handler.sa_userdata = NULL;
1066 restart = false;
1067 // we always want to interrupt
1068 break;
1070 case SIGNAL_CONTINUE_THREAD:
1071 // prevent syscall restart, but otherwise ignore
1072 restart = false;
1073 atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1074 continue;
1076 case SIGCONT:
1077 // notify the debugger
1078 if (debugSignal
1079 && !notify_debugger(thread, signal, handler, false))
1080 continue;
1082 // notify threads waiting for team state changes
1083 if (thread == team->main_thread) {
1084 team->LockTeamAndParent(false);
1086 team_set_job_control_state(team,
1087 JOB_CONTROL_STATE_CONTINUED, signal);
1089 team->UnlockTeamAndParent();
1091 // The standard states that the system *may* send a
1092 // SIGCHLD when a child is continued. I haven't found
1093 // a good reason why we would want to, though.
1095 continue;
1097 case SIGSTOP:
1098 case SIGTSTP:
1099 case SIGTTIN:
1100 case SIGTTOU:
1102 // notify the debugger
1103 if (debugSignal
1104 && !notify_debugger(thread, signal, handler, false))
1105 continue;
1107 // The terminal-sent stop signals are allowed to stop the
1108 // process only, if it doesn't belong to an orphaned process
1109 // group. Otherwise the signal must be discarded.
1110 team->LockProcessGroup();
1111 AutoLocker<ProcessGroup> groupLocker(team->group, true);
1112 if (signal->Number() != SIGSTOP
1113 && team->group->IsOrphaned()) {
1114 continue;
1117 // notify threads waiting for team state changes
1118 if (thread == team->main_thread) {
1119 team->LockTeamAndParent(false);
1121 team_set_job_control_state(team,
1122 JOB_CONTROL_STATE_STOPPED, signal);
1124 // send a SIGCHLD to the parent (if it does have
1125 // SA_NOCLDSTOP defined)
1126 Team* parentTeam = team->parent;
1128 struct sigaction& parentHandler
1129 = parentTeam->SignalActionFor(SIGCHLD);
1130 if ((parentHandler.sa_flags & SA_NOCLDSTOP) == 0) {
1131 Signal childSignal(SIGCHLD, CLD_STOPPED, B_OK,
1132 team->id);
1133 childSignal.SetStatus(signal->Number());
1134 childSignal.SetSendingUser(signal->SendingUser());
1135 send_signal_to_team(parentTeam, childSignal, 0);
1138 team->UnlockTeamAndParent();
1141 groupLocker.Unlock();
1143 // Suspend the thread, unless there's already a signal to
1144 // continue or kill pending.
1145 locker.Lock();
1146 bool resume = (thread->AllPendingSignals()
1147 & (CONTINUE_SIGNALS | KILL_SIGNALS)) != 0;
1148 locker.Unlock();
1150 if (!resume)
1151 thread_suspend();
1153 continue;
1156 case SIGSEGV:
1157 case SIGBUS:
1158 case SIGFPE:
1159 case SIGILL:
1160 case SIGTRAP:
1161 case SIGABRT:
1162 case SIGKILL:
1163 case SIGQUIT:
1164 case SIGPOLL:
1165 case SIGPROF:
1166 case SIGSYS:
1167 case SIGVTALRM:
1168 case SIGXCPU:
1169 case SIGXFSZ:
1170 default:
1171 TRACE(("Shutting down team %" B_PRId32 " due to signal %"
1172 B_PRIu32 " received in thread %" B_PRIu32 " \n",
1173 team->id, signal->Number(), thread->id));
1175 // This signal kills the team regardless which thread
1176 // received it.
1177 killTeam = true;
1179 // fall through
1180 case SIGKILLTHR:
1181 // notify the debugger
1182 if (debugSignal && signal->Number() != SIGKILL
1183 && signal->Number() != SIGKILLTHR
1184 && !notify_debugger(thread, signal, handler, true)) {
1185 continue;
1188 if (killTeam || thread == team->main_thread) {
1189 // The signal is terminal for the team or the thread is
1190 // the main thread. In either case the team is going
1191 // down. Set its exit status, if that didn't happen yet.
1192 teamLocker.Lock();
1194 if (!team->exit.initialized) {
1195 team->exit.reason = CLD_KILLED;
1196 team->exit.signal = signal->Number();
1197 team->exit.signaling_user = signal->SendingUser();
1198 team->exit.status = 0;
1199 team->exit.initialized = true;
1202 teamLocker.Unlock();
1204 // If this is not the main thread, send it a SIGKILLTHR
1205 // so that the team terminates.
1206 if (thread != team->main_thread) {
1207 Signal childSignal(SIGKILLTHR, SI_USER, B_OK,
1208 team->id);
1209 send_signal_to_thread_id(team->id, childSignal, 0);
1213 // explicitly get rid of the signal reference, since
1214 // thread_exit() won't return
1215 signalHandledCaller.Done();
1217 thread_exit();
1218 // won't return
1222 // User defined signal handler
1224 // notify the debugger
1225 if (debugSignal && !notify_debugger(thread, signal, handler, false))
1226 continue;
1228 if (!restart
1229 || (!alwaysRestart && (handler.sa_flags & SA_RESTART) == 0)) {
1230 atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1233 T(ExecuteSignalHandler(signal->Number(), &handler));
1235 TRACE(("### Setting up custom signal handler frame...\n"));
1237 // save the old block mask -- we may need to adjust it for the handler
1238 locker.Lock();
1240 sigset_t oldBlockMask = thread->sigsuspend_original_unblocked_mask != 0
1241 ? ~thread->sigsuspend_original_unblocked_mask
1242 : thread->sig_block_mask;
1244 // Update the block mask while the signal handler is running -- it
1245 // will be automatically restored when the signal frame is left.
1246 thread->sig_block_mask |= handler.sa_mask & BLOCKABLE_SIGNALS;
1248 if ((handler.sa_flags & SA_NOMASK) == 0) {
1249 thread->sig_block_mask
1250 |= SIGNAL_TO_MASK(signal->Number()) & BLOCKABLE_SIGNALS;
1253 update_current_thread_signals_flag();
1255 locker.Unlock();
1257 setup_signal_frame(thread, &handler, signal, oldBlockMask);
1259 // Reset sigsuspend_original_unblocked_mask. It would have been set by
1260 // sigsuspend_internal(). In that case, above we set oldBlockMask
1261 // accordingly so that after the handler returns the thread's signal
1262 // mask is reset.
1263 thread->sigsuspend_original_unblocked_mask = 0;
1265 return;
1268 // We have not handled any signal (respectively only ignored ones).
1270 // If sigsuspend_original_unblocked_mask is non-null, we came from a
1271 // sigsuspend_internal(). Not having handled any signal, we should restart
1272 // the syscall.
1273 if (thread->sigsuspend_original_unblocked_mask != 0) {
1274 restart = true;
1275 atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
1276 } else if (!restart) {
1277 // clear syscall restart thread flag, if we're not supposed to restart
1278 // the syscall
1279 atomic_and(&thread->flags, ~THREAD_FLAGS_RESTART_SYSCALL);
1284 /*! Checks whether the given signal is blocked for the given team (i.e. all of
1285 its threads).
1286 The caller must hold the team's lock and \c signal_lock.
1288 bool
1289 is_team_signal_blocked(Team* team, int signal)
1291 sigset_t mask = SIGNAL_TO_MASK(signal);
1293 for (Thread* thread = team->thread_list; thread != NULL;
1294 thread = thread->team_next) {
1295 if ((thread->sig_block_mask & mask) == 0)
1296 return false;
1299 return true;
1303 /*! Gets (guesses) the current thread's currently used stack from the given
1304 stack pointer.
1305 Fills in \a stack with either the signal stack or the thread's user stack.
1306 \param address A stack pointer address to be used to determine the used
1307 stack.
1308 \param stack Filled in by the function.
1310 void
1311 signal_get_user_stack(addr_t address, stack_t* stack)
1313 // If a signal stack is enabled for the stack and the address is within it,
1314 // return the signal stack. In all other cases return the thread's user
1315 // stack, even if the address doesn't lie within it.
1316 Thread* thread = thread_get_current_thread();
1317 if (thread->signal_stack_enabled && address >= thread->signal_stack_base
1318 && address < thread->signal_stack_base + thread->signal_stack_size) {
1319 stack->ss_sp = (void*)thread->signal_stack_base;
1320 stack->ss_size = thread->signal_stack_size;
1321 } else {
1322 stack->ss_sp = (void*)thread->user_stack_base;
1323 stack->ss_size = thread->user_stack_size;
1326 stack->ss_flags = 0;
1330 /*! Checks whether any non-blocked signal is pending for the current thread.
1331 The caller must hold \c team->signal_lock.
1332 \param thread The current thread.
1334 static bool
1335 has_signals_pending(Thread* thread)
1337 return (thread->AllPendingSignals() & ~thread->sig_block_mask) != 0;
1341 /*! Checks whether the current user has permission to send a signal to the given
1342 target team.
1344 \param team The target team.
1346 static bool
1347 has_permission_to_signal(Team* team)
1349 // get the current user
1350 uid_t currentUser = thread_get_current_thread()->team->effective_uid;
1352 // root is omnipotent -- in the other cases the current user must match the
1353 // target team's
1354 return currentUser == 0 || currentUser == team->effective_uid;
1358 /*! Delivers a signal to the \a thread, but doesn't handle the signal -- it just
1359 makes sure the thread gets the signal, i.e. unblocks it if needed.
1361 The caller must hold \c team->signal_lock.
1363 \param thread The thread the signal shall be delivered to.
1364 \param signalNumber The number of the signal to be delivered. If \c 0, no
1365 actual signal will be delivered. Only delivery checks will be performed.
1366 \param signal If non-NULL the signal to be queued (has number
1367 \a signalNumber in this case). The caller transfers an object reference
1368 to this function. If \c NULL an unqueued signal will be delivered to the
1369 thread.
1370 \param flags A bitwise combination of any number of the following:
1371 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1372 target thread the signal.
1373 \return \c B_OK, when the signal was delivered successfully, another error
1374 code otherwise.
1376 status_t
1377 send_signal_to_thread_locked(Thread* thread, uint32 signalNumber,
1378 Signal* signal, uint32 flags)
1380 ASSERT(signal == NULL || signalNumber == signal->Number());
1382 T(SendSignal(thread->id, signalNumber, flags));
1384 // The caller transferred a reference to the signal to us.
1385 BReference<Signal> signalReference(signal, true);
1387 if ((flags & B_CHECK_PERMISSION) != 0) {
1388 if (!has_permission_to_signal(thread->team))
1389 return EPERM;
1392 if (signalNumber == 0)
1393 return B_OK;
1395 if (thread->team == team_get_kernel_team()) {
1396 // Signals to kernel threads will only wake them up
1397 thread_continue(thread);
1398 return B_OK;
1401 if (signal != NULL)
1402 thread->AddPendingSignal(signal);
1403 else
1404 thread->AddPendingSignal(signalNumber);
1406 // the thread has the signal reference, now
1407 signalReference.Detach();
1409 switch (signalNumber) {
1410 case SIGKILL:
1412 // If sent to a thread other than the team's main thread, also send
1413 // a SIGKILLTHR to the main thread to kill the team.
1414 Thread* mainThread = thread->team->main_thread;
1415 if (mainThread != NULL && mainThread != thread) {
1416 mainThread->AddPendingSignal(SIGKILLTHR);
1418 // wake up main thread
1419 thread->going_to_suspend = false;
1421 SpinLocker locker(mainThread->scheduler_lock);
1422 if (mainThread->state == B_THREAD_SUSPENDED)
1423 scheduler_enqueue_in_run_queue(mainThread);
1424 else
1425 thread_interrupt(mainThread, true);
1426 locker.Unlock();
1428 update_thread_signals_flag(mainThread);
1431 // supposed to fall through
1433 case SIGKILLTHR:
1435 // Wake up suspended threads and interrupt waiting ones
1436 thread->going_to_suspend = false;
1438 SpinLocker locker(thread->scheduler_lock);
1439 if (thread->state == B_THREAD_SUSPENDED)
1440 scheduler_enqueue_in_run_queue(thread);
1441 else
1442 thread_interrupt(thread, true);
1444 break;
1446 case SIGNAL_DEBUG_THREAD:
1448 // Wake up thread if it was suspended, otherwise interrupt it.
1449 thread->going_to_suspend = false;
1451 SpinLocker locker(thread->scheduler_lock);
1452 if (thread->state == B_THREAD_SUSPENDED)
1453 scheduler_enqueue_in_run_queue(thread);
1454 else
1455 thread_interrupt(thread, false);
1457 break;
1459 case SIGNAL_CONTINUE_THREAD:
1461 // wake up thread, and interrupt its current syscall
1462 thread->going_to_suspend = false;
1464 SpinLocker locker(thread->scheduler_lock);
1465 if (thread->state == B_THREAD_SUSPENDED)
1466 scheduler_enqueue_in_run_queue(thread);
1468 atomic_or(&thread->flags, THREAD_FLAGS_DONT_RESTART_SYSCALL);
1469 break;
1471 case SIGCONT:
1473 // Wake up thread if it was suspended, otherwise interrupt it, if
1474 // the signal isn't blocked.
1475 thread->going_to_suspend = false;
1477 SpinLocker locker(thread->scheduler_lock);
1478 if (thread->state == B_THREAD_SUSPENDED)
1479 scheduler_enqueue_in_run_queue(thread);
1480 else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask) != 0)
1481 thread_interrupt(thread, false);
1483 // remove any pending stop signals
1484 thread->RemovePendingSignals(STOP_SIGNALS);
1485 break;
1487 default:
1488 // If the signal is not masked, interrupt the thread, if it is
1489 // currently waiting (interruptibly).
1490 if ((thread->AllPendingSignals()
1491 & (~thread->sig_block_mask | SIGNAL_TO_MASK(SIGCHLD)))
1492 != 0) {
1493 // Interrupt thread if it was waiting
1494 SpinLocker locker(thread->scheduler_lock);
1495 thread_interrupt(thread, false);
1497 break;
1500 update_thread_signals_flag(thread);
1502 return B_OK;
1506 /*! Sends the given signal to the given thread.
1508 \param thread The thread the signal shall be sent to.
1509 \param signal The signal to be delivered. If the signal's number is \c 0, no
1510 actual signal will be delivered. Only delivery checks will be performed.
1511 The given object will be copied. The caller retains ownership.
1512 \param flags A bitwise combination of any number of the following:
1513 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1514 target thread the signal.
1515 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1516 woken up, the scheduler will be invoked. If set that will not be
1517 done explicitly, but rescheduling can still happen, e.g. when the
1518 current thread's time slice runs out.
1519 \return \c B_OK, when the signal was delivered successfully, another error
1520 code otherwise.
1522 status_t
1523 send_signal_to_thread(Thread* thread, const Signal& signal, uint32 flags)
1525 // Clone the signal -- the clone will be queued. If something fails and the
1526 // caller doesn't require queuing, we will add an unqueued signal.
1527 Signal* signalToQueue = NULL;
1528 status_t error = Signal::CreateQueuable(signal,
1529 (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1530 if (error != B_OK)
1531 return error;
1533 InterruptsReadSpinLocker teamLocker(thread->team_lock);
1534 SpinLocker locker(thread->team->signal_lock);
1536 error = send_signal_to_thread_locked(thread, signal.Number(), signalToQueue,
1537 flags);
1538 if (error != B_OK)
1539 return error;
1541 locker.Unlock();
1542 teamLocker.Unlock();
1544 if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1545 scheduler_reschedule_if_necessary();
1547 return B_OK;
1551 /*! Sends the given signal to the thread with the given ID.
1553 \param threadID The ID of the thread the signal shall be sent to.
1554 \param signal The signal to be delivered. If the signal's number is \c 0, no
1555 actual signal will be delivered. Only delivery checks will be performed.
1556 The given object will be copied. The caller retains ownership.
1557 \param flags A bitwise combination of any number of the following:
1558 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1559 target thread the signal.
1560 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1561 woken up, the scheduler will be invoked. If set that will not be
1562 done explicitly, but rescheduling can still happen, e.g. when the
1563 current thread's time slice runs out.
1564 \return \c B_OK, when the signal was delivered successfully, another error
1565 code otherwise.
1567 status_t
1568 send_signal_to_thread_id(thread_id threadID, const Signal& signal, uint32 flags)
1570 Thread* thread = Thread::Get(threadID);
1571 if (thread == NULL)
1572 return B_BAD_THREAD_ID;
1573 BReference<Thread> threadReference(thread, true);
1575 return send_signal_to_thread(thread, signal, flags);
1579 /*! Sends the given signal to the given team.
1581 The caller must hold \c signal_lock.
1583 \param team The team the signal shall be sent to.
1584 \param signalNumber The number of the signal to be delivered. If \c 0, no
1585 actual signal will be delivered. Only delivery checks will be performed.
1586 \param signal If non-NULL the signal to be queued (has number
1587 \a signalNumber in this case). The caller transfers an object reference
1588 to this function. If \c NULL an unqueued signal will be delivered to the
1589 thread.
1590 \param flags A bitwise combination of any number of the following:
1591 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1592 target thread the signal.
1593 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1594 woken up, the scheduler will be invoked. If set that will not be
1595 done explicitly, but rescheduling can still happen, e.g. when the
1596 current thread's time slice runs out.
1597 \return \c B_OK, when the signal was delivered successfully, another error
1598 code otherwise.
1600 status_t
1601 send_signal_to_team_locked(Team* team, uint32 signalNumber, Signal* signal,
1602 uint32 flags)
1604 ASSERT(signal == NULL || signalNumber == signal->Number());
1606 T(SendSignal(team->id, signalNumber, flags));
1608 // The caller transferred a reference to the signal to us.
1609 BReference<Signal> signalReference(signal, true);
1611 if ((flags & B_CHECK_PERMISSION) != 0) {
1612 if (!has_permission_to_signal(team))
1613 return EPERM;
1616 if (signalNumber == 0)
1617 return B_OK;
1619 if (team == team_get_kernel_team()) {
1620 // signals to the kernel team are not allowed
1621 return EPERM;
1624 if (signal != NULL)
1625 team->AddPendingSignal(signal);
1626 else
1627 team->AddPendingSignal(signalNumber);
1629 // the team has the signal reference, now
1630 signalReference.Detach();
1632 switch (signalNumber) {
1633 case SIGKILL:
1634 case SIGKILLTHR:
1636 // Also add a SIGKILLTHR to the main thread's signals and wake it
1637 // up/interrupt it, so we get this over with as soon as possible
1638 // (only the main thread shuts down the team).
1639 Thread* mainThread = team->main_thread;
1640 if (mainThread != NULL) {
1641 mainThread->AddPendingSignal(SIGKILLTHR);
1643 // wake up main thread
1644 mainThread->going_to_suspend = false;
1646 SpinLocker _(mainThread->scheduler_lock);
1647 if (mainThread->state == B_THREAD_SUSPENDED)
1648 scheduler_enqueue_in_run_queue(mainThread);
1649 else
1650 thread_interrupt(mainThread, true);
1652 break;
1655 case SIGCONT:
1656 // Wake up any suspended threads, interrupt the others, if they
1657 // don't block the signal.
1658 for (Thread* thread = team->thread_list; thread != NULL;
1659 thread = thread->team_next) {
1660 thread->going_to_suspend = false;
1662 SpinLocker _(thread->scheduler_lock);
1663 if (thread->state == B_THREAD_SUSPENDED) {
1664 scheduler_enqueue_in_run_queue(thread);
1665 } else if ((SIGNAL_TO_MASK(SIGCONT) & ~thread->sig_block_mask)
1666 != 0) {
1667 thread_interrupt(thread, false);
1670 // remove any pending stop signals
1671 thread->RemovePendingSignals(STOP_SIGNALS);
1674 // remove any pending team stop signals
1675 team->RemovePendingSignals(STOP_SIGNALS);
1676 break;
1678 case SIGSTOP:
1679 case SIGTSTP:
1680 case SIGTTIN:
1681 case SIGTTOU:
1682 // send the stop signal to all threads
1683 // TODO: Is that correct or should we only target the main thread?
1684 for (Thread* thread = team->thread_list; thread != NULL;
1685 thread = thread->team_next) {
1686 thread->AddPendingSignal(signalNumber);
1689 // remove the stop signal from the team again
1690 if (signal != NULL) {
1691 team->RemovePendingSignal(signal);
1692 signalReference.SetTo(signal, true);
1693 } else
1694 team->RemovePendingSignal(signalNumber);
1696 // fall through to interrupt threads
1697 default:
1698 // Interrupt all interruptibly waiting threads, if the signal is
1699 // not masked.
1700 for (Thread* thread = team->thread_list; thread != NULL;
1701 thread = thread->team_next) {
1702 sigset_t nonBlocked = ~thread->sig_block_mask
1703 | SIGNAL_TO_MASK(SIGCHLD);
1704 if ((thread->AllPendingSignals() & nonBlocked) != 0) {
1705 SpinLocker _(thread->scheduler_lock);
1706 thread_interrupt(thread, false);
1709 break;
1712 update_team_threads_signal_flag(team);
1714 return B_OK;
1718 /*! Sends the given signal to the given team.
1720 \param team The team the signal shall be sent to.
1721 \param signal The signal to be delivered. If the signal's number is \c 0, no
1722 actual signal will be delivered. Only delivery checks will be performed.
1723 The given object will be copied. The caller retains ownership.
1724 \param flags A bitwise combination of any number of the following:
1725 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1726 target thread the signal.
1727 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1728 woken up, the scheduler will be invoked. If set that will not be
1729 done explicitly, but rescheduling can still happen, e.g. when the
1730 current thread's time slice runs out.
1731 \return \c B_OK, when the signal was delivered successfully, another error
1732 code otherwise.
1734 status_t
1735 send_signal_to_team(Team* team, const Signal& signal, uint32 flags)
1737 // Clone the signal -- the clone will be queued. If something fails and the
1738 // caller doesn't require queuing, we will add an unqueued signal.
1739 Signal* signalToQueue = NULL;
1740 status_t error = Signal::CreateQueuable(signal,
1741 (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0, signalToQueue);
1742 if (error != B_OK)
1743 return error;
1745 InterruptsSpinLocker locker(team->signal_lock);
1747 error = send_signal_to_team_locked(team, signal.Number(), signalToQueue,
1748 flags);
1750 locker.Unlock();
1752 if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1753 scheduler_reschedule_if_necessary();
1755 return error;
1759 /*! Sends the given signal to the team with the given ID.
1761 \param teamID The ID of the team the signal shall be sent to.
1762 \param signal The signal to be delivered. If the signal's number is \c 0, no
1763 actual signal will be delivered. Only delivery checks will be performed.
1764 The given object will be copied. The caller retains ownership.
1765 \param flags A bitwise combination of any number of the following:
1766 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1767 target thread the signal.
1768 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1769 woken up, the scheduler will be invoked. If set that will not be
1770 done explicitly, but rescheduling can still happen, e.g. when the
1771 current thread's time slice runs out.
1772 \return \c B_OK, when the signal was delivered successfully, another error
1773 code otherwise.
1775 status_t
1776 send_signal_to_team_id(team_id teamID, const Signal& signal, uint32 flags)
1778 // get the team
1779 Team* team = Team::Get(teamID);
1780 if (team == NULL)
1781 return B_BAD_TEAM_ID;
1782 BReference<Team> teamReference(team, true);
1784 return send_signal_to_team(team, signal, flags);
1788 /*! Sends the given signal to the given process group.
1790 The caller must hold the process group's lock. Interrupts must be enabled.
1792 \param group The the process group the signal shall be sent to.
1793 \param signal The signal to be delivered. If the signal's number is \c 0, no
1794 actual signal will be delivered. Only delivery checks will be performed.
1795 The given object will be copied. The caller retains ownership.
1796 \param flags A bitwise combination of any number of the following:
1797 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1798 target thread the signal.
1799 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1800 woken up, the scheduler will be invoked. If set that will not be
1801 done explicitly, but rescheduling can still happen, e.g. when the
1802 current thread's time slice runs out.
1803 \return \c B_OK, when the signal was delivered successfully, another error
1804 code otherwise.
1806 status_t
1807 send_signal_to_process_group_locked(ProcessGroup* group, const Signal& signal,
1808 uint32 flags)
1810 T(SendSignal(-group->id, signal.Number(), flags));
1812 bool firstTeam = true;
1814 for (Team* team = group->teams; team != NULL; team = team->group_next) {
1815 status_t error = send_signal_to_team(team, signal,
1816 flags | B_DO_NOT_RESCHEDULE);
1817 // If sending to the first team in the group failed, let the whole call
1818 // fail.
1819 if (firstTeam) {
1820 if (error != B_OK)
1821 return error;
1822 firstTeam = false;
1826 if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1827 scheduler_reschedule_if_necessary();
1829 return B_OK;
1833 /*! Sends the given signal to the process group specified by the given ID.
1835 The caller must not hold any process group, team, or thread lock. Interrupts
1836 must be enabled.
1838 \param groupID The ID of the process group the signal shall be sent to.
1839 \param signal The signal to be delivered. If the signal's number is \c 0, no
1840 actual signal will be delivered. Only delivery checks will be performed.
1841 The given object will be copied. The caller retains ownership.
1842 \param flags A bitwise combination of any number of the following:
1843 - \c B_CHECK_PERMISSION: Check the caller's permission to send the
1844 target thread the signal.
1845 - \c B_DO_NOT_RESCHEDULE: If clear and a higher level thread has been
1846 woken up, the scheduler will be invoked. If set that will not be
1847 done explicitly, but rescheduling can still happen, e.g. when the
1848 current thread's time slice runs out.
1849 \return \c B_OK, when the signal was delivered successfully, another error
1850 code otherwise.
1852 status_t
1853 send_signal_to_process_group(pid_t groupID, const Signal& signal, uint32 flags)
1855 ProcessGroup* group = ProcessGroup::Get(groupID);
1856 if (group == NULL)
1857 return B_BAD_TEAM_ID;
1858 BReference<ProcessGroup> groupReference(group);
1860 T(SendSignal(-group->id, signal.Number(), flags));
1862 AutoLocker<ProcessGroup> groupLocker(group);
1864 status_t error = send_signal_to_process_group_locked(group, signal,
1865 flags | B_DO_NOT_RESCHEDULE);
1866 if (error != B_OK)
1867 return error;
1869 groupLocker.Unlock();
1871 if ((flags & B_DO_NOT_RESCHEDULE) == 0)
1872 scheduler_reschedule_if_necessary();
1874 return B_OK;
1878 static status_t
1879 send_signal_internal(pid_t id, uint signalNumber, union sigval userValue,
1880 uint32 flags)
1882 if (signalNumber > MAX_SIGNAL_NUMBER)
1883 return B_BAD_VALUE;
1885 Thread* thread = thread_get_current_thread();
1887 Signal signal(signalNumber,
1888 (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
1889 B_OK, thread->team->id);
1890 // Note: SI_USER/SI_QUEUE is not correct, if called from within the
1891 // kernel (or a driver), but we don't have any info here.
1892 signal.SetUserValue(userValue);
1894 // If id is > 0, send the signal to the respective thread.
1895 if (id > 0)
1896 return send_signal_to_thread_id(id, signal, flags);
1898 // If id == 0, send the signal to the current thread.
1899 if (id == 0)
1900 return send_signal_to_thread(thread, signal, flags);
1902 // If id == -1, send the signal to all teams the calling team has permission
1903 // to send signals to.
1904 if (id == -1) {
1905 // TODO: Implement correctly!
1906 // currently only send to the current team
1907 return send_signal_to_team_id(thread->team->id, signal, flags);
1910 // Send a signal to the specified process group (the absolute value of the
1911 // id).
1912 return send_signal_to_process_group(-id, signal, flags);
1917 send_signal_etc(pid_t id, uint signalNumber, uint32 flags)
1919 // a dummy user value
1920 union sigval userValue;
1921 userValue.sival_ptr = NULL;
1923 return send_signal_internal(id, signalNumber, userValue, flags);
1928 send_signal(pid_t threadID, uint signal)
1930 // The BeBook states that this function wouldn't be exported
1931 // for drivers, but, of course, it's wrong.
1932 return send_signal_etc(threadID, signal, 0);
1936 static int
1937 sigprocmask_internal(int how, const sigset_t* set, sigset_t* oldSet)
1939 Thread* thread = thread_get_current_thread();
1941 InterruptsSpinLocker _(thread->team->signal_lock);
1943 sigset_t oldMask = thread->sig_block_mask;
1945 if (set != NULL) {
1946 T(SigProcMask(how, *set));
1948 switch (how) {
1949 case SIG_BLOCK:
1950 thread->sig_block_mask |= *set & BLOCKABLE_SIGNALS;
1951 break;
1952 case SIG_UNBLOCK:
1953 thread->sig_block_mask &= ~*set;
1954 break;
1955 case SIG_SETMASK:
1956 thread->sig_block_mask = *set & BLOCKABLE_SIGNALS;
1957 break;
1958 default:
1959 return B_BAD_VALUE;
1962 update_current_thread_signals_flag();
1965 if (oldSet != NULL)
1966 *oldSet = oldMask;
1968 return B_OK;
1973 sigprocmask(int how, const sigset_t* set, sigset_t* oldSet)
1975 RETURN_AND_SET_ERRNO(sigprocmask_internal(how, set, oldSet));
1979 /*! \brief Like sigaction(), but returning the error instead of setting errno.
1981 static status_t
1982 sigaction_internal(int signal, const struct sigaction* act,
1983 struct sigaction* oldAction)
1985 if (signal < 1 || signal > MAX_SIGNAL_NUMBER
1986 || (SIGNAL_TO_MASK(signal) & ~BLOCKABLE_SIGNALS) != 0)
1987 return B_BAD_VALUE;
1989 // get and lock the team
1990 Team* team = thread_get_current_thread()->team;
1991 TeamLocker teamLocker(team);
1993 struct sigaction& teamHandler = team->SignalActionFor(signal);
1994 if (oldAction) {
1995 // save previous sigaction structure
1996 *oldAction = teamHandler;
1999 if (act) {
2000 T(SigAction(signal, act));
2002 // set new sigaction structure
2003 teamHandler = *act;
2004 teamHandler.sa_mask &= BLOCKABLE_SIGNALS;
2007 // Remove pending signal if it should now be ignored and remove pending
2008 // signal for those signals whose default action is to ignore them.
2009 if ((act && act->sa_handler == SIG_IGN)
2010 || (act && act->sa_handler == SIG_DFL
2011 && (SIGNAL_TO_MASK(signal) & DEFAULT_IGNORE_SIGNALS) != 0)) {
2012 InterruptsSpinLocker locker(team->signal_lock);
2014 team->RemovePendingSignal(signal);
2016 for (Thread* thread = team->thread_list; thread != NULL;
2017 thread = thread->team_next) {
2018 thread->RemovePendingSignal(signal);
2022 return B_OK;
2027 sigaction(int signal, const struct sigaction* act, struct sigaction* oldAction)
2029 RETURN_AND_SET_ERRNO(sigaction_internal(signal, act, oldAction));
2033 /*! Wait for the specified signals, and return the information for the retrieved
2034 signal in \a info.
2035 The \c flags and \c timeout combination must either define an infinite
2036 timeout (no timeout flags set), an absolute timeout (\c B_ABSOLUTE_TIMEOUT
2037 set), or a relative timeout \code <= 0 \endcode (\c B_RELATIVE_TIMEOUT set).
2039 static status_t
2040 sigwait_internal(const sigset_t* set, siginfo_t* info, uint32 flags,
2041 bigtime_t timeout)
2043 // restrict mask to blockable signals
2044 sigset_t requestedSignals = *set & BLOCKABLE_SIGNALS;
2046 // make always interruptable
2047 flags |= B_CAN_INTERRUPT;
2049 // check whether we are allowed to wait at all
2050 bool canWait = (flags & B_RELATIVE_TIMEOUT) == 0 || timeout > 0;
2052 Thread* thread = thread_get_current_thread();
2054 InterruptsSpinLocker locker(thread->team->signal_lock);
2056 bool timedOut = false;
2057 status_t error = B_OK;
2059 while (!timedOut) {
2060 sigset_t pendingSignals = thread->AllPendingSignals();
2062 // If a kill signal is pending, just bail out.
2063 if ((pendingSignals & KILL_SIGNALS) != 0)
2064 return B_INTERRUPTED;
2066 if ((pendingSignals & requestedSignals) != 0) {
2067 // get signal with the highest priority
2068 Signal stackSignal;
2069 Signal* signal = dequeue_thread_or_team_signal(thread,
2070 requestedSignals, stackSignal);
2071 ASSERT(signal != NULL);
2073 SignalHandledCaller signalHandledCaller(signal);
2074 locker.Unlock();
2076 info->si_signo = signal->Number();
2077 info->si_code = signal->SignalCode();
2078 info->si_errno = signal->ErrorCode();
2079 info->si_pid = signal->SendingProcess();
2080 info->si_uid = signal->SendingUser();
2081 info->si_addr = signal->Address();
2082 info->si_status = signal->Status();
2083 info->si_band = signal->PollBand();
2084 info->si_value = signal->UserValue();
2086 return B_OK;
2089 if (!canWait)
2090 return B_WOULD_BLOCK;
2092 sigset_t blockedSignals = thread->sig_block_mask;
2093 if ((pendingSignals & ~blockedSignals) != 0) {
2094 // Non-blocked signals are pending -- return to let them be handled.
2095 return B_INTERRUPTED;
2098 // No signals yet. Set the signal block mask to not include the
2099 // requested mask and wait until we're interrupted.
2100 thread->sig_block_mask = blockedSignals & ~requestedSignals;
2102 while (!has_signals_pending(thread)) {
2103 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SIGNAL,
2104 NULL);
2106 locker.Unlock();
2108 if ((flags & B_ABSOLUTE_TIMEOUT) != 0) {
2109 error = thread_block_with_timeout(flags, timeout);
2110 if (error == B_WOULD_BLOCK || error == B_TIMED_OUT) {
2111 error = B_WOULD_BLOCK;
2112 // POSIX requires EAGAIN (B_WOULD_BLOCK) on timeout
2113 timedOut = true;
2115 locker.Lock();
2116 break;
2118 } else
2119 thread_block();
2121 locker.Lock();
2124 // restore the original block mask
2125 thread->sig_block_mask = blockedSignals;
2127 update_current_thread_signals_flag();
2130 // we get here only when timed out
2131 return error;
2135 /*! Replace the current signal block mask and wait for any event to happen.
2136 Before returning, the original signal block mask is reinstantiated.
2138 static status_t
2139 sigsuspend_internal(const sigset_t* _mask)
2141 sigset_t mask = *_mask & BLOCKABLE_SIGNALS;
2143 T(SigSuspend(mask));
2145 Thread* thread = thread_get_current_thread();
2147 InterruptsSpinLocker locker(thread->team->signal_lock);
2149 // Set the new block mask and block until interrupted. We might be here
2150 // after a syscall restart, in which case sigsuspend_original_unblocked_mask
2151 // will still be set.
2152 sigset_t oldMask = thread->sigsuspend_original_unblocked_mask != 0
2153 ? ~thread->sigsuspend_original_unblocked_mask : thread->sig_block_mask;
2154 thread->sig_block_mask = mask & BLOCKABLE_SIGNALS;
2156 update_current_thread_signals_flag();
2158 while (!has_signals_pending(thread)) {
2159 thread_prepare_to_block(thread, B_CAN_INTERRUPT,
2160 THREAD_BLOCK_TYPE_SIGNAL, NULL);
2162 locker.Unlock();
2163 thread_block();
2164 locker.Lock();
2167 // Set sigsuspend_original_unblocked_mask (guaranteed to be non-0 due to
2168 // BLOCKABLE_SIGNALS). This will indicate to handle_signals() that it is
2169 // called after a _user_sigsuspend(). It will reset the field after invoking
2170 // a signal handler, or restart the syscall, if there wasn't anything to
2171 // handle anymore (e.g. because another thread was faster).
2172 thread->sigsuspend_original_unblocked_mask = ~oldMask;
2174 T(SigSuspendDone());
2176 // we're not supposed to actually succeed
2177 return B_INTERRUPTED;
2181 static status_t
2182 sigpending_internal(sigset_t* set)
2184 Thread* thread = thread_get_current_thread();
2186 if (set == NULL)
2187 return B_BAD_VALUE;
2189 InterruptsSpinLocker locker(thread->team->signal_lock);
2191 *set = thread->AllPendingSignals() & thread->sig_block_mask;
2193 return B_OK;
2197 // #pragma mark - syscalls
2200 /*! Sends a signal to a thread, process, or process group.
2201 \param id Specifies the ID of the target:
2202 - \code id > 0 \endcode: If \a toThread is \c true, the target is the
2203 thread with ID \a id, otherwise the team with the ID \a id.
2204 - \code id == 0 \endcode: If toThread is \c true, the target is the
2205 current thread, otherwise the current team.
2206 - \code id == -1 \endcode: The target are all teams the current team has
2207 permission to send signals to. Currently not implemented correctly.
2208 - \code id < -1 \endcode: The target are is the process group with ID
2209 \c -id.
2210 \param signalNumber The signal number. \c 0 to just perform checks, but not
2211 actually send any signal.
2212 \param userUserValue A user value to be associated with the signal. Might be
2213 ignored unless signal queuing is forced. Can be \c NULL.
2214 \param flags A bitwise or of any number of the following:
2215 - \c SIGNAL_FLAG_QUEUING_REQUIRED: Signal queuing is required. Fail
2216 instead of falling back to unqueued signals, when queuing isn't
2217 possible.
2218 - \c SIGNAL_FLAG_SEND_TO_THREAD: Interpret the the given ID as a
2219 \c thread_id rather than a \c team_id. Ignored when the \a id is
2220 \code < 0 \endcode -- then the target is a process group.
2221 \return \c B_OK on success, another error code otherwise.
2223 status_t
2224 _user_send_signal(int32 id, uint32 signalNumber,
2225 const union sigval* userUserValue, uint32 flags)
2227 // restrict flags to the allowed ones and add B_CHECK_PERMISSION
2228 flags &= SIGNAL_FLAG_QUEUING_REQUIRED | SIGNAL_FLAG_SEND_TO_THREAD;
2229 flags |= B_CHECK_PERMISSION;
2231 // Copy the user value from userland. If not given, use a dummy value.
2232 union sigval userValue;
2233 if (userUserValue != NULL) {
2234 if (!IS_USER_ADDRESS(userUserValue)
2235 || user_memcpy(&userValue, userUserValue, sizeof(userValue))
2236 != B_OK) {
2237 return B_BAD_ADDRESS;
2239 } else
2240 userValue.sival_ptr = NULL;
2242 // If to be sent to a thread, delegate to send_signal_internal(). Also do
2243 // that when id < 0, since in this case the semantics is the same as well.
2244 if ((flags & SIGNAL_FLAG_SEND_TO_THREAD) != 0 || id < 0)
2245 return send_signal_internal(id, signalNumber, userValue, flags);
2247 // kill() semantics for id >= 0
2248 if (signalNumber > MAX_SIGNAL_NUMBER)
2249 return B_BAD_VALUE;
2251 Thread* thread = thread_get_current_thread();
2253 Signal signal(signalNumber,
2254 (flags & SIGNAL_FLAG_QUEUING_REQUIRED) != 0 ? SI_QUEUE : SI_USER,
2255 B_OK, thread->team->id);
2256 signal.SetUserValue(userValue);
2258 // send to current team for id == 0, otherwise to the respective team
2259 return send_signal_to_team_id(id == 0 ? team_get_current_team_id() : id,
2260 signal, flags);
2264 status_t
2265 _user_set_signal_mask(int how, const sigset_t *userSet, sigset_t *userOldSet)
2267 sigset_t set, oldSet;
2268 status_t status;
2270 if ((userSet != NULL && user_memcpy(&set, userSet, sizeof(sigset_t)) < B_OK)
2271 || (userOldSet != NULL && user_memcpy(&oldSet, userOldSet,
2272 sizeof(sigset_t)) < B_OK))
2273 return B_BAD_ADDRESS;
2275 status = sigprocmask_internal(how, userSet ? &set : NULL,
2276 userOldSet ? &oldSet : NULL);
2278 // copy old set if asked for
2279 if (status >= B_OK && userOldSet != NULL
2280 && user_memcpy(userOldSet, &oldSet, sizeof(sigset_t)) < B_OK)
2281 return B_BAD_ADDRESS;
2283 return status;
2287 status_t
2288 _user_sigaction(int signal, const struct sigaction *userAction,
2289 struct sigaction *userOldAction)
2291 struct sigaction act, oact;
2292 status_t status;
2294 if ((userAction != NULL && user_memcpy(&act, userAction,
2295 sizeof(struct sigaction)) < B_OK)
2296 || (userOldAction != NULL && user_memcpy(&oact, userOldAction,
2297 sizeof(struct sigaction)) < B_OK))
2298 return B_BAD_ADDRESS;
2300 status = sigaction_internal(signal, userAction ? &act : NULL,
2301 userOldAction ? &oact : NULL);
2303 // only copy the old action if a pointer has been given
2304 if (status >= B_OK && userOldAction != NULL
2305 && user_memcpy(userOldAction, &oact, sizeof(struct sigaction)) < B_OK)
2306 return B_BAD_ADDRESS;
2308 return status;
2312 status_t
2313 _user_sigwait(const sigset_t *userSet, siginfo_t *userInfo, uint32 flags,
2314 bigtime_t timeout)
2316 // copy userSet to stack
2317 sigset_t set;
2318 if (userSet == NULL || !IS_USER_ADDRESS(userSet)
2319 || user_memcpy(&set, userSet, sizeof(sigset_t)) != B_OK) {
2320 return B_BAD_ADDRESS;
2323 // userInfo is optional, but must be a user address when given
2324 if (userInfo != NULL && !IS_USER_ADDRESS(userInfo))
2325 return B_BAD_ADDRESS;
2327 syscall_restart_handle_timeout_pre(flags, timeout);
2329 flags |= B_CAN_INTERRUPT;
2331 siginfo_t info;
2332 status_t status = sigwait_internal(&set, &info, flags, timeout);
2333 if (status == B_OK) {
2334 // copy the info back to userland, if userSet is non-NULL
2335 if (userInfo != NULL)
2336 status = user_memcpy(userInfo, &info, sizeof(info));
2337 } else if (status == B_INTERRUPTED) {
2338 // make sure we'll be restarted
2339 Thread* thread = thread_get_current_thread();
2340 atomic_or(&thread->flags, THREAD_FLAGS_ALWAYS_RESTART_SYSCALL);
2343 return syscall_restart_handle_timeout_post(status, timeout);
2347 status_t
2348 _user_sigsuspend(const sigset_t *userMask)
2350 sigset_t mask;
2352 if (userMask == NULL)
2353 return B_BAD_VALUE;
2354 if (user_memcpy(&mask, userMask, sizeof(sigset_t)) < B_OK)
2355 return B_BAD_ADDRESS;
2357 return sigsuspend_internal(&mask);
2361 status_t
2362 _user_sigpending(sigset_t *userSet)
2364 sigset_t set;
2365 int status;
2367 if (userSet == NULL)
2368 return B_BAD_VALUE;
2369 if (!IS_USER_ADDRESS(userSet))
2370 return B_BAD_ADDRESS;
2372 status = sigpending_internal(&set);
2373 if (status == B_OK
2374 && user_memcpy(userSet, &set, sizeof(sigset_t)) < B_OK)
2375 return B_BAD_ADDRESS;
2377 return status;
2381 status_t
2382 _user_set_signal_stack(const stack_t* newUserStack, stack_t* oldUserStack)
2384 Thread *thread = thread_get_current_thread();
2385 struct stack_t newStack, oldStack;
2386 bool onStack = false;
2388 if ((newUserStack != NULL && user_memcpy(&newStack, newUserStack,
2389 sizeof(stack_t)) < B_OK)
2390 || (oldUserStack != NULL && user_memcpy(&oldStack, oldUserStack,
2391 sizeof(stack_t)) < B_OK))
2392 return B_BAD_ADDRESS;
2394 if (thread->signal_stack_enabled) {
2395 // determine whether or not the user thread is currently
2396 // on the active signal stack
2397 onStack = arch_on_signal_stack(thread);
2400 if (oldUserStack != NULL) {
2401 oldStack.ss_sp = (void *)thread->signal_stack_base;
2402 oldStack.ss_size = thread->signal_stack_size;
2403 oldStack.ss_flags = (thread->signal_stack_enabled ? 0 : SS_DISABLE)
2404 | (onStack ? SS_ONSTACK : 0);
2407 if (newUserStack != NULL) {
2408 // no flags other than SS_DISABLE are allowed
2409 if ((newStack.ss_flags & ~SS_DISABLE) != 0)
2410 return B_BAD_VALUE;
2412 if ((newStack.ss_flags & SS_DISABLE) == 0) {
2413 // check if the size is valid
2414 if (newStack.ss_size < MINSIGSTKSZ)
2415 return B_NO_MEMORY;
2416 if (onStack)
2417 return B_NOT_ALLOWED;
2418 if (!IS_USER_ADDRESS(newStack.ss_sp))
2419 return B_BAD_VALUE;
2421 thread->signal_stack_base = (addr_t)newStack.ss_sp;
2422 thread->signal_stack_size = newStack.ss_size;
2423 thread->signal_stack_enabled = true;
2424 } else
2425 thread->signal_stack_enabled = false;
2428 // only copy the old stack info if a pointer has been given
2429 if (oldUserStack != NULL
2430 && user_memcpy(oldUserStack, &oldStack, sizeof(stack_t)) < B_OK)
2431 return B_BAD_ADDRESS;
2433 return B_OK;
2437 /*! Restores the environment of a function that was interrupted by a signal
2438 handler call.
2439 This syscall is invoked when a signal handler function returns. It
2440 deconstructs the signal handler frame and restores the stack and register
2441 state of the function that was interrupted by a signal. The syscall is
2442 therefore somewhat unusual, since it does not return to the calling
2443 function, but to someplace else. In case the signal interrupted a syscall,
2444 it will appear as if the syscall just returned. That is also the reason, why
2445 this syscall returns an int64, since it needs to return the value the
2446 interrupted syscall returns, which is potentially 64 bits wide.
2448 \param userSignalFrameData The signal frame data created for the signal
2449 handler. Potentially some data (e.g. registers) have been modified by
2450 the signal handler.
2451 \return In case the signal interrupted a syscall, the return value of that
2452 syscall. Otherwise (i.e. in case of a (hardware) interrupt/exception)
2453 the value might need to be tailored such that after a return to userland
2454 the interrupted environment is identical to the interrupted one (unless
2455 explicitly modified). E.g. for x86 to achieve that, the return value
2456 must contain the eax|edx values of the interrupted environment.
2458 int64
2459 _user_restore_signal_frame(struct signal_frame_data* userSignalFrameData)
2461 syscall_64_bit_return_value();
2463 Thread *thread = thread_get_current_thread();
2465 // copy the signal frame data from userland
2466 signal_frame_data signalFrameData;
2467 if (userSignalFrameData == NULL || !IS_USER_ADDRESS(userSignalFrameData)
2468 || user_memcpy(&signalFrameData, userSignalFrameData,
2469 sizeof(signalFrameData)) != B_OK) {
2470 // We failed to copy the signal frame data from userland. This is a
2471 // serious problem. Kill the thread.
2472 dprintf("_user_restore_signal_frame(): thread %" B_PRId32 ": Failed to "
2473 "copy signal frame data (%p) from userland. Killing thread...\n",
2474 thread->id, userSignalFrameData);
2475 kill_thread(thread->id);
2476 return B_BAD_ADDRESS;
2479 // restore the signal block mask
2480 InterruptsSpinLocker locker(thread->team->signal_lock);
2482 thread->sig_block_mask
2483 = signalFrameData.context.uc_sigmask & BLOCKABLE_SIGNALS;
2484 update_current_thread_signals_flag();
2486 locker.Unlock();
2488 // restore the syscall restart related thread flags and the syscall restart
2489 // parameters
2490 atomic_and(&thread->flags,
2491 ~(THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2492 atomic_or(&thread->flags, signalFrameData.thread_flags
2493 & (THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_64_BIT_SYSCALL_RETURN));
2495 memcpy(thread->syscall_restart.parameters,
2496 signalFrameData.syscall_restart_parameters,
2497 sizeof(thread->syscall_restart.parameters));
2499 // restore the previously stored Thread::user_signal_context
2500 thread->user_signal_context = signalFrameData.context.uc_link;
2501 if (thread->user_signal_context != NULL
2502 && !IS_USER_ADDRESS(thread->user_signal_context)) {
2503 thread->user_signal_context = NULL;
2506 // let the architecture specific code restore the registers
2507 return arch_restore_signal_frame(&signalFrameData);