2 * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
25 #include <AutoDeleter.h>
26 #include <FindDirectory.h>
28 #include <extended_system_info_defs.h>
31 #include <boot_device.h>
33 #include <file_cache.h>
34 #include <find_directory_private.h>
40 #include <kscheduler.h>
42 #include <Notifications.h>
44 #include <posix/realtime_sem.h>
45 #include <posix/xsi_semaphore.h>
47 #include <syscall_process_info.h>
48 #include <syscall_restart.h>
52 #include <user_runtime.h>
53 #include <user_thread.h>
54 #include <usergroup.h>
57 #include <vm/VMAddressSpace.h>
58 #include <util/AutoLock.h>
60 #include "TeamThreadTables.h"
65 # define TRACE(x) dprintf x
78 size_t flat_args_size
;
87 #define TEAM_ARGS_FLAG_NO_ASLR 0x01
93 class TeamNotificationService
: public DefaultNotificationService
{
95 TeamNotificationService();
97 void Notify(uint32 eventCode
, Team
* team
);
101 // #pragma mark - TeamTable
104 typedef BKernel::TeamThreadTable
<Team
> TeamTable
;
107 // #pragma mark - ProcessGroupHashDefinition
110 struct ProcessGroupHashDefinition
{
111 typedef pid_t KeyType
;
112 typedef ProcessGroup ValueType
;
114 size_t HashKey(pid_t key
) const
119 size_t Hash(ProcessGroup
* value
) const
121 return HashKey(value
->id
);
124 bool Compare(pid_t key
, ProcessGroup
* value
) const
126 return value
->id
== key
;
129 ProcessGroup
*& GetLink(ProcessGroup
* value
) const
135 typedef BOpenHashTable
<ProcessGroupHashDefinition
> ProcessGroupHashTable
;
138 } // unnamed namespace
144 // the team_id -> Team hash table and the lock protecting it
145 static TeamTable sTeamHash
;
146 static spinlock sTeamHashLock
= B_SPINLOCK_INITIALIZER
;
148 // the pid_t -> ProcessGroup hash table and the lock protecting it
149 static ProcessGroupHashTable sGroupHash
;
150 static spinlock sGroupHashLock
= B_SPINLOCK_INITIALIZER
;
152 static Team
* sKernelTeam
= NULL
;
154 // A list of process groups of children of dying session leaders that need to
155 // be signalled, if they have become orphaned and contain stopped processes.
156 static ProcessGroupList sOrphanedCheckProcessGroups
;
157 static mutex sOrphanedCheckLock
158 = MUTEX_INITIALIZER("orphaned process group check");
160 // some arbitrarily chosen limits -- should probably depend on the available
161 // memory (the limit is not yet enforced)
162 static int32 sMaxTeams
= 2048;
163 static int32 sUsedTeams
= 1;
165 static TeamNotificationService sNotificationService
;
167 static const size_t kTeamUserDataReservedSize
= 128 * B_PAGE_SIZE
;
168 static const size_t kTeamUserDataInitialSize
= 4 * B_PAGE_SIZE
;
171 // #pragma mark - TeamListIterator
174 TeamListIterator::TeamListIterator()
177 InterruptsSpinLocker
locker(sTeamHashLock
);
178 sTeamHash
.InsertIteratorEntry(&fEntry
);
182 TeamListIterator::~TeamListIterator()
185 InterruptsSpinLocker
locker(sTeamHashLock
);
186 sTeamHash
.RemoveIteratorEntry(&fEntry
);
191 TeamListIterator::Next()
193 // get the next team -- if there is one, get reference for it
194 InterruptsSpinLocker
locker(sTeamHashLock
);
195 Team
* team
= sTeamHash
.NextElement(&fEntry
);
197 team
->AcquireReference();
203 // #pragma mark - Tracing
207 namespace TeamTracing
{
209 class TeamForked
: public AbstractTraceEntry
{
211 TeamForked(thread_id forkedThread
)
213 fForkedThread(forkedThread
)
218 virtual void AddDump(TraceOutput
& out
)
220 out
.Print("team forked, new thread %" B_PRId32
, fForkedThread
);
224 thread_id fForkedThread
;
228 class ExecTeam
: public AbstractTraceEntry
{
230 ExecTeam(const char* path
, int32 argCount
, const char* const* args
,
231 int32 envCount
, const char* const* env
)
236 fPath
= alloc_tracing_buffer_strcpy(path
, B_PATH_NAME_LENGTH
,
239 // determine the buffer size we need for the args
240 size_t argBufferSize
= 0;
241 for (int32 i
= 0; i
< argCount
; i
++)
242 argBufferSize
+= strlen(args
[i
]) + 1;
245 fArgs
= (char*)alloc_tracing_buffer(argBufferSize
);
247 char* buffer
= fArgs
;
248 for (int32 i
= 0; i
< argCount
; i
++) {
249 size_t argSize
= strlen(args
[i
]) + 1;
250 memcpy(buffer
, args
[i
], argSize
);
255 // ignore env for the time being
262 virtual void AddDump(TraceOutput
& out
)
264 out
.Print("team exec, \"%p\", args:", fPath
);
268 for (int32 i
= 0; !out
.IsFull() && i
< fArgCount
; i
++) {
269 out
.Print(" \"%s\"", args
);
270 args
+= strlen(args
) + 1;
273 out
.Print(" <too long>");
284 job_control_state_name(job_control_state state
)
287 case JOB_CONTROL_STATE_NONE
:
289 case JOB_CONTROL_STATE_STOPPED
:
291 case JOB_CONTROL_STATE_CONTINUED
:
293 case JOB_CONTROL_STATE_DEAD
:
301 class SetJobControlState
: public AbstractTraceEntry
{
303 SetJobControlState(team_id team
, job_control_state newState
, Signal
* signal
)
307 fSignal(signal
!= NULL
? signal
->Number() : 0)
312 virtual void AddDump(TraceOutput
& out
)
314 out
.Print("team set job control state, team %" B_PRId32
", "
315 "new state: %s, signal: %d",
316 fTeam
, job_control_state_name(fNewState
), fSignal
);
321 job_control_state fNewState
;
326 class WaitForChild
: public AbstractTraceEntry
{
328 WaitForChild(pid_t child
, uint32 flags
)
336 virtual void AddDump(TraceOutput
& out
)
338 out
.Print("team wait for child, child: %" B_PRId32
", "
339 "flags: %#" B_PRIx32
, fChild
, fFlags
);
348 class WaitForChildDone
: public AbstractTraceEntry
{
350 WaitForChildDone(const job_control_entry
& entry
)
354 fStatus(entry
.status
),
355 fReason(entry
.reason
),
356 fSignal(entry
.signal
)
361 WaitForChildDone(status_t error
)
368 virtual void AddDump(TraceOutput
& out
)
371 out
.Print("team wait for child done, team: %" B_PRId32
", "
372 "state: %s, status: %#" B_PRIx32
", reason: %#x, signal: %d\n",
373 fTeam
, job_control_state_name(fState
), fStatus
, fReason
,
376 out
.Print("team wait for child failed, error: "
377 "%#" B_PRIx32
", ", fTeam
);
382 job_control_state fState
;
389 } // namespace TeamTracing
391 # define T(x) new(std::nothrow) TeamTracing::x;
397 // #pragma mark - TeamNotificationService
400 TeamNotificationService::TeamNotificationService()
401 : DefaultNotificationService("teams")
407 TeamNotificationService::Notify(uint32 eventCode
, Team
* team
)
409 char eventBuffer
[128];
411 event
.SetTo(eventBuffer
, sizeof(eventBuffer
), TEAM_MONITOR
);
412 event
.AddInt32("event", eventCode
);
413 event
.AddInt32("team", team
->id
);
414 event
.AddPointer("teamStruct", team
);
416 DefaultNotificationService::Notify(event
, eventCode
);
420 // #pragma mark - Team
423 Team::Team(team_id id
, bool kernel
)
432 mutex_init(&fLock
, "Team:kernel");
435 snprintf(lockName
, sizeof(lockName
), "Team:%" B_PRId32
, id
);
436 mutex_init_etc(&fLock
, lockName
, MUTEX_FLAG_CLONE_NAME
);
439 hash_next
= siblings_next
= children
= parent
= NULL
;
444 address_space
= NULL
;
445 realtime_sem_context
= NULL
;
446 xsi_sem_context
= NULL
;
450 state
= TEAM_STATE_BIRTH
;
457 free_user_threads
= NULL
;
459 commpage_address
= NULL
;
461 supplementary_groups
= NULL
;
462 supplementary_group_count
= 0;
464 dead_threads_kernel_time
= 0;
465 dead_threads_user_time
= 0;
466 cpu_clock_offset
= 0;
469 list_init(&dead_threads
);
470 dead_threads_count
= 0;
473 dead_children
.count
= 0;
474 dead_children
.kernel_time
= 0;
475 dead_children
.user_time
= 0;
478 job_control_entry
= new(nothrow
) ::job_control_entry
;
479 if (job_control_entry
!= NULL
) {
480 job_control_entry
->state
= JOB_CONTROL_STATE_NONE
;
481 job_control_entry
->thread
= id
;
482 job_control_entry
->team
= this;
485 // exit status -- setting initialized to false suffices
486 exit
.initialized
= false;
488 list_init(&sem_list
);
489 list_init_etc(&port_list
, port_team_link_offset());
490 list_init(&image_list
);
491 list_init(&watcher_list
);
493 clear_team_debug_info(&debug_info
, true);
495 // init dead/stopped/continued children condition vars
496 dead_children
.condition_variable
.Init(&dead_children
, "team children");
498 B_INITIALIZE_SPINLOCK(&time_lock
);
499 B_INITIALIZE_SPINLOCK(&signal_lock
);
501 fQueuedSignalsCounter
= new(std::nothrow
) BKernel::QueuedSignalsCounter(
502 kernel
? -1 : MAX_QUEUED_SIGNALS
);
503 memset(fSignalActions
, 0, sizeof(fSignalActions
));
505 fUserDefinedTimerCount
= 0;
507 fCoreDumpCondition
= NULL
;
513 // get rid of all associated data
514 PrepareForDeletion();
516 if (io_context
!= NULL
)
517 vfs_put_io_context(io_context
);
518 delete_owned_ports(this);
519 sem_delete_owned_sems(this);
521 DeleteUserTimers(false);
523 fPendingSignals
.Clear();
525 if (fQueuedSignalsCounter
!= NULL
)
526 fQueuedSignalsCounter
->ReleaseReference();
528 while (thread_death_entry
* threadDeathEntry
529 = (thread_death_entry
*)list_remove_head_item(&dead_threads
)) {
530 free(threadDeathEntry
);
533 while (::job_control_entry
* entry
= dead_children
.entries
.RemoveHead())
536 while (free_user_thread
* entry
= free_user_threads
) {
537 free_user_threads
= entry
->next
;
541 malloc_referenced_release(supplementary_groups
);
543 delete job_control_entry
;
544 // usually already NULL and transferred to the parent
546 mutex_destroy(&fLock
);
551 Team::Create(team_id id
, const char* name
, bool kernel
)
553 // create the team object
554 Team
* team
= new(std::nothrow
) Team(id
, kernel
);
557 ObjectDeleter
<Team
> teamDeleter(team
);
562 // check initialization
563 if (team
->job_control_entry
== NULL
|| team
->fQueuedSignalsCounter
== NULL
)
566 // finish initialization (arch specifics)
567 if (arch_team_init_team_struct(team
, kernel
) != B_OK
)
571 status_t error
= user_timer_create_team_timers(team
);
576 // everything went fine
577 return teamDeleter
.Detach();
581 /*! \brief Returns the team with the given ID.
582 Returns a reference to the team.
583 Team and thread spinlock must not be held.
586 Team::Get(team_id id
)
588 if (id
== B_CURRENT_TEAM
) {
589 Team
* team
= thread_get_current_thread()->team
;
590 team
->AcquireReference();
594 InterruptsSpinLocker
locker(sTeamHashLock
);
595 Team
* team
= sTeamHash
.Lookup(id
);
597 team
->AcquireReference();
602 /*! \brief Returns the team with the given ID in a locked state.
603 Returns a reference to the team.
604 Team and thread spinlock must not be held.
607 Team::GetAndLock(team_id id
)
610 Team
* team
= Get(id
);
617 // only return the team, when it isn't already dying
618 if (team
->state
>= TEAM_STATE_SHUTDOWN
) {
620 team
->ReleaseReference();
628 /*! Locks the team and its parent team (if any).
629 The caller must hold a reference to the team or otherwise make sure that
631 If the team doesn't have a parent, only the team itself is locked. If the
632 team's parent is the kernel team and \a dontLockParentIfKernel is \c true,
633 only the team itself is locked.
635 \param dontLockParentIfKernel If \c true, the team's parent team is only
636 locked, if it is not the kernel team.
639 Team::LockTeamAndParent(bool dontLockParentIfKernel
)
641 // The locking order is parent -> child. Since the parent can change as long
642 // as we don't lock the team, we need to do a trial and error loop.
646 // If the team doesn't have a parent, we're done. Otherwise try to lock
647 // the parent.This will succeed in most cases, simplifying things.
648 Team
* parent
= this->parent
;
649 if (parent
== NULL
|| (dontLockParentIfKernel
&& parent
== sKernelTeam
)
650 || parent
->TryLock()) {
654 // get a temporary reference to the parent, unlock this team, lock the
655 // parent, and re-lock this team
656 BReference
<Team
> parentReference(parent
);
662 // If the parent hasn't changed in the meantime, we're done.
663 if (this->parent
== parent
)
666 // The parent has changed -- unlock and retry.
672 /*! Unlocks the team and its parent team (if any).
675 Team::UnlockTeamAndParent()
684 /*! Locks the team, its parent team (if any), and the team's process group.
685 The caller must hold a reference to the team or otherwise make sure that
687 If the team doesn't have a parent, only the team itself is locked.
690 Team::LockTeamParentAndProcessGroup()
692 LockTeamAndProcessGroup();
694 // We hold the group's and the team's lock, but not the parent team's lock.
695 // If we have a parent, try to lock it.
696 if (this->parent
== NULL
|| this->parent
->TryLock())
699 // No success -- unlock the team and let LockTeamAndParent() do the rest of
702 LockTeamAndParent(false);
706 /*! Unlocks the team, its parent team (if any), and the team's process group.
709 Team::UnlockTeamParentAndProcessGroup()
721 Team::LockTeamAndProcessGroup()
723 // The locking order is process group -> child. Since the process group can
724 // change as long as we don't lock the team, we need to do a trial and error
729 // Try to lock the group. This will succeed in most cases, simplifying
731 ProcessGroup
* group
= this->group
;
732 if (group
->TryLock())
735 // get a temporary reference to the group, unlock this team, lock the
736 // group, and re-lock this team
737 BReference
<ProcessGroup
> groupReference(group
);
743 // If the group hasn't changed in the meantime, we're done.
744 if (this->group
== group
)
747 // The group has changed -- unlock and retry.
754 Team::UnlockTeamAndProcessGroup()
762 Team::SetName(const char* name
)
764 if (const char* lastSlash
= strrchr(name
, '/'))
765 name
= lastSlash
+ 1;
767 strlcpy(fName
, name
, B_OS_NAME_LENGTH
);
772 Team::SetArgs(const char* args
)
774 strlcpy(fArgs
, args
, sizeof(fArgs
));
779 Team::SetArgs(const char* path
, const char* const* otherArgs
, int otherArgCount
)
782 strlcpy(fArgs
, path
, sizeof(fArgs
));
783 for (int i
= 0; i
< otherArgCount
; i
++) {
784 strlcat(fArgs
, " ", sizeof(fArgs
));
785 strlcat(fArgs
, otherArgs
[i
], sizeof(fArgs
));
791 Team::ResetSignalsOnExec()
793 // We are supposed to keep pending signals. Signal actions shall be reset
794 // partially: SIG_IGN and SIG_DFL dispositions shall be kept as they are
795 // (for SIGCHLD it's implementation-defined). Others shall be reset to
796 // SIG_DFL. SA_ONSTACK shall be cleared. There's no mention of the other
797 // flags, but since there aren't any handlers, they make little sense, so
800 for (uint32 i
= 1; i
<= MAX_SIGNAL_NUMBER
; i
++) {
801 struct sigaction
& action
= SignalActionFor(i
);
802 if (action
.sa_handler
!= SIG_IGN
&& action
.sa_handler
!= SIG_DFL
)
803 action
.sa_handler
= SIG_DFL
;
807 action
.sa_userdata
= NULL
;
813 Team::InheritSignalActions(Team
* parent
)
815 memcpy(fSignalActions
, parent
->fSignalActions
, sizeof(fSignalActions
));
819 /*! Adds the given user timer to the team and, if user-defined, assigns it an
822 The caller must hold the team's lock.
824 \param timer The timer to be added. If it doesn't have an ID yet, it is
825 considered user-defined and will be assigned an ID.
826 \return \c B_OK, if the timer was added successfully, another error code
830 Team::AddUserTimer(UserTimer
* timer
)
832 // don't allow addition of timers when already shutting the team down
833 if (state
>= TEAM_STATE_SHUTDOWN
)
834 return B_BAD_TEAM_ID
;
836 // If the timer is user-defined, check timer limit and increment
837 // user-defined count.
838 if (timer
->ID() < 0 && !CheckAddUserDefinedTimer())
841 fUserTimers
.AddTimer(timer
);
847 /*! Removes the given user timer from the team.
849 The caller must hold the team's lock.
851 \param timer The timer to be removed.
855 Team::RemoveUserTimer(UserTimer
* timer
)
857 fUserTimers
.RemoveTimer(timer
);
859 if (timer
->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID
)
860 UserDefinedTimersRemoved(1);
864 /*! Deletes all (or all user-defined) user timers of the team.
866 Timer's belonging to the team's threads are not affected.
867 The caller must hold the team's lock.
869 \param userDefinedOnly If \c true, only the user-defined timers are deleted,
870 otherwise all timers are deleted.
873 Team::DeleteUserTimers(bool userDefinedOnly
)
875 int32 count
= fUserTimers
.DeleteTimers(userDefinedOnly
);
876 UserDefinedTimersRemoved(count
);
880 /*! If not at the limit yet, increments the team's user-defined timer count.
881 \return \c true, if the limit wasn't reached yet, \c false otherwise.
884 Team::CheckAddUserDefinedTimer()
886 int32 oldCount
= atomic_add(&fUserDefinedTimerCount
, 1);
887 if (oldCount
>= MAX_USER_TIMERS_PER_TEAM
) {
888 atomic_add(&fUserDefinedTimerCount
, -1);
896 /*! Subtracts the given count for the team's user-defined timer count.
897 \param count The count to subtract.
900 Team::UserDefinedTimersRemoved(int32 count
)
902 atomic_add(&fUserDefinedTimerCount
, -count
);
907 Team::DeactivateCPUTimeUserTimers()
909 while (TeamTimeUserTimer
* timer
= fCPUTimeUserTimers
.Head())
912 while (TeamUserTimeUserTimer
* timer
= fUserTimeUserTimers
.Head())
917 /*! Returns the team's current total CPU time (kernel + user + offset).
919 The caller must hold \c time_lock.
921 \param ignoreCurrentRun If \c true and the current thread is one team's
922 threads, don't add the time since the last time \c last_time was
923 updated. Should be used in "thread unscheduled" scheduler callbacks,
924 since although the thread is still running at that time, its time has
925 already been stopped.
926 \return The team's current total CPU time.
929 Team::CPUTime(bool ignoreCurrentRun
, Thread
* lockedThread
) const
931 bigtime_t time
= cpu_clock_offset
+ dead_threads_kernel_time
932 + dead_threads_user_time
;
934 Thread
* currentThread
= thread_get_current_thread();
935 bigtime_t now
= system_time();
937 for (Thread
* thread
= thread_list
; thread
!= NULL
;
938 thread
= thread
->team_next
) {
939 bool alreadyLocked
= thread
== lockedThread
;
940 SpinLocker
threadTimeLocker(thread
->time_lock
, alreadyLocked
);
941 time
+= thread
->kernel_time
+ thread
->user_time
;
943 if (thread
->last_time
!= 0) {
944 if (!ignoreCurrentRun
|| thread
!= currentThread
)
945 time
+= now
- thread
->last_time
;
949 threadTimeLocker
.Detach();
956 /*! Returns the team's current user CPU time.
958 The caller must hold \c time_lock.
960 \return The team's current user CPU time.
963 Team::UserCPUTime() const
965 bigtime_t time
= dead_threads_user_time
;
967 bigtime_t now
= system_time();
969 for (Thread
* thread
= thread_list
; thread
!= NULL
;
970 thread
= thread
->team_next
) {
971 SpinLocker
threadTimeLocker(thread
->time_lock
);
972 time
+= thread
->user_time
;
974 if (thread
->last_time
!= 0 && !thread
->in_kernel
)
975 time
+= now
- thread
->last_time
;
982 // #pragma mark - ProcessGroup
985 ProcessGroup::ProcessGroup(pid_t id
)
990 fInOrphanedCheckList(false)
993 snprintf(lockName
, sizeof(lockName
), "Group:%" B_PRId32
, id
);
994 mutex_init_etc(&fLock
, lockName
, MUTEX_FLAG_CLONE_NAME
);
998 ProcessGroup::~ProcessGroup()
1000 TRACE(("ProcessGroup::~ProcessGroup(): id = %" B_PRId32
"\n", id
));
1002 // If the group is in the orphaned check list, remove it.
1003 MutexLocker
orphanedCheckLocker(sOrphanedCheckLock
);
1005 if (fInOrphanedCheckList
)
1006 sOrphanedCheckProcessGroups
.Remove(this);
1008 orphanedCheckLocker
.Unlock();
1010 // remove group from the hash table and from the session
1011 if (fSession
!= NULL
) {
1012 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
1013 sGroupHash
.RemoveUnchecked(this);
1014 groupHashLocker
.Unlock();
1016 fSession
->ReleaseReference();
1019 mutex_destroy(&fLock
);
1023 /*static*/ ProcessGroup
*
1024 ProcessGroup::Get(pid_t id
)
1026 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
1027 ProcessGroup
* group
= sGroupHash
.Lookup(id
);
1029 group
->AcquireReference();
1034 /*! Adds the group the given session and makes it publicly accessible.
1035 The caller must not hold the process group hash lock.
1038 ProcessGroup::Publish(ProcessSession
* session
)
1040 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
1041 PublishLocked(session
);
1045 /*! Adds the group to the given session and makes it publicly accessible.
1046 The caller must hold the process group hash lock.
1049 ProcessGroup::PublishLocked(ProcessSession
* session
)
1051 ASSERT(sGroupHash
.Lookup(this->id
) == NULL
);
1054 fSession
->AcquireReference();
1056 sGroupHash
.InsertUnchecked(this);
1060 /*! Checks whether the process group is orphaned.
1061 The caller must hold the group's lock.
1062 \return \c true, if the group is orphaned, \c false otherwise.
1065 ProcessGroup::IsOrphaned() const
1067 // Orphaned Process Group: "A process group in which the parent of every
1068 // member is either itself a member of the group or is not a member of the
1069 // group's session." (Open Group Base Specs Issue 7)
1070 bool orphaned
= true;
1073 while (orphaned
&& team
!= NULL
) {
1074 team
->LockTeamAndParent(false);
1076 Team
* parent
= team
->parent
;
1077 if (parent
!= NULL
&& parent
->group_id
!= id
1078 && parent
->session_id
== fSession
->id
) {
1082 team
->UnlockTeamAndParent();
1084 team
= team
->group_next
;
1092 ProcessGroup::ScheduleOrphanedCheck()
1094 MutexLocker
orphanedCheckLocker(sOrphanedCheckLock
);
1096 if (!fInOrphanedCheckList
) {
1097 sOrphanedCheckProcessGroups
.Add(this);
1098 fInOrphanedCheckList
= true;
1104 ProcessGroup::UnsetOrphanedCheck()
1106 fInOrphanedCheckList
= false;
1110 // #pragma mark - ProcessSession
1113 ProcessSession::ProcessSession(pid_t id
)
1116 controlling_tty(-1),
1117 foreground_group(-1)
1120 snprintf(lockName
, sizeof(lockName
), "Session:%" B_PRId32
, id
);
1121 mutex_init_etc(&fLock
, lockName
, MUTEX_FLAG_CLONE_NAME
);
1125 ProcessSession::~ProcessSession()
1127 mutex_destroy(&fLock
);
1131 // #pragma mark - KDL functions
1135 _dump_team_info(Team
* team
)
1137 kprintf("TEAM: %p\n", team
);
1138 kprintf("id: %" B_PRId32
" (%#" B_PRIx32
")\n", team
->id
,
1140 kprintf("serial_number: %" B_PRId64
"\n", team
->serial_number
);
1141 kprintf("name: '%s'\n", team
->Name());
1142 kprintf("args: '%s'\n", team
->Args());
1143 kprintf("hash_next: %p\n", team
->hash_next
);
1144 kprintf("parent: %p", team
->parent
);
1145 if (team
->parent
!= NULL
) {
1146 kprintf(" (id = %" B_PRId32
")\n", team
->parent
->id
);
1150 kprintf("children: %p\n", team
->children
);
1151 kprintf("num_threads: %d\n", team
->num_threads
);
1152 kprintf("state: %d\n", team
->state
);
1153 kprintf("flags: 0x%" B_PRIx32
"\n", team
->flags
);
1154 kprintf("io_context: %p\n", team
->io_context
);
1155 if (team
->address_space
)
1156 kprintf("address_space: %p\n", team
->address_space
);
1157 kprintf("user data: %p (area %" B_PRId32
")\n",
1158 (void*)team
->user_data
, team
->user_data_area
);
1159 kprintf("free user thread: %p\n", team
->free_user_threads
);
1160 kprintf("main_thread: %p\n", team
->main_thread
);
1161 kprintf("thread_list: %p\n", team
->thread_list
);
1162 kprintf("group_id: %" B_PRId32
"\n", team
->group_id
);
1163 kprintf("session_id: %" B_PRId32
"\n", team
->session_id
);
1168 dump_team_info(int argc
, char** argv
)
1174 Thread
* thread
= thread_get_current_thread();
1175 if (thread
!= NULL
&& thread
->team
!= NULL
)
1176 _dump_team_info(thread
->team
);
1178 kprintf("No current team!\n");
1182 arg
= strtoul(argv
[1], NULL
, 0);
1183 if (IS_KERNEL_ADDRESS(arg
)) {
1185 _dump_team_info((Team
*)arg
);
1189 // walk through the thread list, trying to match name or id
1190 for (TeamTable::Iterator it
= sTeamHash
.GetIterator();
1191 Team
* team
= it
.Next();) {
1192 if ((team
->Name() && strcmp(argv
[1], team
->Name()) == 0)
1193 || team
->id
== (team_id
)arg
) {
1194 _dump_team_info(team
);
1201 kprintf("team \"%s\" (%" B_PRId32
") doesn't exist!\n", argv
[1], (team_id
)arg
);
1207 dump_teams(int argc
, char** argv
)
1209 kprintf("%-*s id %-*s name\n", B_PRINTF_POINTER_WIDTH
, "team",
1210 B_PRINTF_POINTER_WIDTH
, "parent");
1212 for (TeamTable::Iterator it
= sTeamHash
.GetIterator();
1213 Team
* team
= it
.Next();) {
1214 kprintf("%p%7" B_PRId32
" %p %s\n", team
, team
->id
, team
->parent
, team
->Name());
1221 // #pragma mark - Private functions
1224 /*! Inserts team \a team into the child list of team \a parent.
1226 The caller must hold the lock of both \a parent and \a team.
1228 \param parent The parent team.
1229 \param team The team to be inserted into \a parent's child list.
1232 insert_team_into_parent(Team
* parent
, Team
* team
)
1234 ASSERT(parent
!= NULL
);
1236 team
->siblings_next
= parent
->children
;
1237 parent
->children
= team
;
1238 team
->parent
= parent
;
1242 /*! Removes team \a team from the child list of team \a parent.
1244 The caller must hold the lock of both \a parent and \a team.
1246 \param parent The parent team.
1247 \param team The team to be removed from \a parent's child list.
1250 remove_team_from_parent(Team
* parent
, Team
* team
)
1255 for (child
= parent
->children
; child
!= NULL
;
1256 child
= child
->siblings_next
) {
1257 if (child
== team
) {
1259 parent
->children
= child
->siblings_next
;
1261 last
->siblings_next
= child
->siblings_next
;
1263 team
->parent
= NULL
;
1271 /*! Returns whether the given team is a session leader.
1272 The caller must hold the team's lock or its process group's lock.
1275 is_session_leader(Team
* team
)
1277 return team
->session_id
== team
->id
;
1281 /*! Returns whether the given team is a process group leader.
1282 The caller must hold the team's lock or its process group's lock.
1285 is_process_group_leader(Team
* team
)
1287 return team
->group_id
== team
->id
;
1291 /*! Inserts the given team into the given process group.
1292 The caller must hold the process group's lock, the team's lock, and the
1293 team's parent's lock.
1296 insert_team_into_group(ProcessGroup
* group
, Team
* team
)
1298 team
->group
= group
;
1299 team
->group_id
= group
->id
;
1300 team
->session_id
= group
->Session()->id
;
1302 team
->group_next
= group
->teams
;
1303 group
->teams
= team
;
1304 group
->AcquireReference();
1308 /*! Removes the given team from its process group.
1310 The caller must hold the process group's lock, the team's lock, and the
1311 team's parent's lock. Interrupts must be enabled.
1313 \param team The team that'll be removed from its process group.
1316 remove_team_from_group(Team
* team
)
1318 ProcessGroup
* group
= team
->group
;
1322 // the team must be in a process group to let this function have any effect
1326 for (current
= group
->teams
; current
!= NULL
;
1327 current
= current
->group_next
) {
1328 if (current
== team
) {
1330 group
->teams
= current
->group_next
;
1332 last
->group_next
= current
->group_next
;
1341 team
->group_next
= NULL
;
1343 group
->ReleaseReference();
1348 create_team_user_data(Team
* team
, void* exactAddress
= NULL
)
1353 if (exactAddress
!= NULL
) {
1354 address
= exactAddress
;
1355 addressSpec
= B_EXACT_ADDRESS
;
1357 address
= (void*)KERNEL_USER_DATA_BASE
;
1358 addressSpec
= B_RANDOMIZED_BASE_ADDRESS
;
1361 status_t result
= vm_reserve_address_range(team
->id
, &address
, addressSpec
,
1362 kTeamUserDataReservedSize
, RESERVED_AVOID_BASE
);
1364 virtual_address_restrictions virtualRestrictions
= {};
1365 if (result
== B_OK
|| exactAddress
!= NULL
) {
1366 if (exactAddress
!= NULL
)
1367 virtualRestrictions
.address
= exactAddress
;
1369 virtualRestrictions
.address
= address
;
1370 virtualRestrictions
.address_specification
= B_EXACT_ADDRESS
;
1372 virtualRestrictions
.address
= (void*)KERNEL_USER_DATA_BASE
;
1373 virtualRestrictions
.address_specification
= B_RANDOMIZED_BASE_ADDRESS
;
1376 physical_address_restrictions physicalRestrictions
= {};
1377 team
->user_data_area
= create_area_etc(team
->id
, "user area",
1378 kTeamUserDataInitialSize
, B_FULL_LOCK
, B_READ_AREA
| B_WRITE_AREA
, 0, 0,
1379 &virtualRestrictions
, &physicalRestrictions
, &address
);
1380 if (team
->user_data_area
< 0)
1381 return team
->user_data_area
;
1383 team
->user_data
= (addr_t
)address
;
1384 team
->used_user_data
= 0;
1385 team
->user_data_size
= kTeamUserDataInitialSize
;
1386 team
->free_user_threads
= NULL
;
1393 delete_team_user_data(Team
* team
)
1395 if (team
->user_data_area
>= 0) {
1396 vm_delete_area(team
->id
, team
->user_data_area
, true);
1397 vm_unreserve_address_range(team
->id
, (void*)team
->user_data
,
1398 kTeamUserDataReservedSize
);
1400 team
->user_data
= 0;
1401 team
->used_user_data
= 0;
1402 team
->user_data_size
= 0;
1403 team
->user_data_area
= -1;
1404 while (free_user_thread
* entry
= team
->free_user_threads
) {
1405 team
->free_user_threads
= entry
->next
;
1413 copy_user_process_args(const char* const* userFlatArgs
, size_t flatArgsSize
,
1414 int32 argCount
, int32 envCount
, char**& _flatArgs
)
1416 if (argCount
< 0 || envCount
< 0)
1419 if (flatArgsSize
> MAX_PROCESS_ARGS_SIZE
)
1420 return B_TOO_MANY_ARGS
;
1421 if ((argCount
+ envCount
+ 2) * sizeof(char*) > flatArgsSize
)
1424 if (!IS_USER_ADDRESS(userFlatArgs
))
1425 return B_BAD_ADDRESS
;
1427 // allocate kernel memory
1428 char** flatArgs
= (char**)malloc(_ALIGN(flatArgsSize
));
1429 if (flatArgs
== NULL
)
1432 if (user_memcpy(flatArgs
, userFlatArgs
, flatArgsSize
) != B_OK
) {
1434 return B_BAD_ADDRESS
;
1437 // check and relocate the array
1438 status_t error
= B_OK
;
1439 const char* stringBase
= (char*)flatArgs
+ argCount
+ envCount
+ 2;
1440 const char* stringEnd
= (char*)flatArgs
+ flatArgsSize
;
1441 for (int32 i
= 0; i
< argCount
+ envCount
+ 2; i
++) {
1442 if (i
== argCount
|| i
== argCount
+ envCount
+ 1) {
1443 // check array null termination
1444 if (flatArgs
[i
] != NULL
) {
1445 error
= B_BAD_VALUE
;
1450 char* arg
= (char*)flatArgs
+ (flatArgs
[i
] - (char*)userFlatArgs
);
1451 size_t maxLen
= stringEnd
- arg
;
1452 if (arg
< stringBase
|| arg
>= stringEnd
1453 || strnlen(arg
, maxLen
) == maxLen
) {
1454 error
= B_BAD_VALUE
;
1463 _flatArgs
= flatArgs
;
1472 free_team_arg(struct team_arg
* teamArg
)
1474 if (teamArg
!= NULL
) {
1475 free(teamArg
->flat_args
);
1476 free(teamArg
->path
);
1483 create_team_arg(struct team_arg
** _teamArg
, const char* path
, char** flatArgs
,
1484 size_t flatArgsSize
, int32 argCount
, int32 envCount
, mode_t umask
,
1485 port_id port
, uint32 token
)
1487 struct team_arg
* teamArg
= (struct team_arg
*)malloc(sizeof(team_arg
));
1488 if (teamArg
== NULL
)
1491 teamArg
->path
= strdup(path
);
1492 if (teamArg
->path
== NULL
) {
1497 // copy the args over
1498 teamArg
->flat_args
= flatArgs
;
1499 teamArg
->flat_args_size
= flatArgsSize
;
1500 teamArg
->arg_count
= argCount
;
1501 teamArg
->env_count
= envCount
;
1503 teamArg
->umask
= umask
;
1504 teamArg
->error_port
= port
;
1505 teamArg
->error_token
= token
;
1507 // determine the flags from the environment
1508 const char* const* env
= flatArgs
+ argCount
+ 1;
1509 for (int32 i
= 0; i
< envCount
; i
++) {
1510 if (strcmp(env
[i
], "DISABLE_ASLR=1") == 0) {
1511 teamArg
->flags
|= TEAM_ARGS_FLAG_NO_ASLR
;
1516 *_teamArg
= teamArg
;
1522 team_create_thread_start_internal(void* args
)
1527 struct team_arg
* teamArgs
= (struct team_arg
*)args
;
1532 struct user_space_program_args
* programArgs
;
1533 uint32 argCount
, envCount
;
1535 thread
= thread_get_current_thread();
1536 team
= thread
->team
;
1537 cache_node_launched(teamArgs
->arg_count
, teamArgs
->flat_args
);
1539 TRACE(("team_create_thread_start: entry thread %" B_PRId32
"\n",
1542 // Main stack area layout is currently as follows (starting from 0):
1545 // ---------------------------------+--------------------------------
1546 // USER_MAIN_THREAD_STACK_SIZE | actual stack
1547 // TLS_SIZE | TLS data
1548 // sizeof(user_space_program_args) | argument structure for the runtime
1550 // flat arguments size | flat process arguments and environment
1552 // TODO: ENV_SIZE is a) limited, and b) not used after libroot copied it to
1554 // TODO: we could reserve the whole USER_STACK_REGION upfront...
1556 argCount
= teamArgs
->arg_count
;
1557 envCount
= teamArgs
->env_count
;
1559 programArgs
= (struct user_space_program_args
*)(thread
->user_stack_base
1560 + thread
->user_stack_size
+ TLS_SIZE
);
1562 userArgs
= (char**)(programArgs
+ 1);
1563 userEnv
= userArgs
+ argCount
+ 1;
1564 path
= teamArgs
->path
;
1566 if (user_strlcpy(programArgs
->program_path
, path
,
1567 sizeof(programArgs
->program_path
)) < B_OK
1568 || user_memcpy(&programArgs
->arg_count
, &argCount
, sizeof(int32
)) < B_OK
1569 || user_memcpy(&programArgs
->args
, &userArgs
, sizeof(char**)) < B_OK
1570 || user_memcpy(&programArgs
->env_count
, &envCount
, sizeof(int32
)) < B_OK
1571 || user_memcpy(&programArgs
->env
, &userEnv
, sizeof(char**)) < B_OK
1572 || user_memcpy(&programArgs
->error_port
, &teamArgs
->error_port
,
1573 sizeof(port_id
)) < B_OK
1574 || user_memcpy(&programArgs
->error_token
, &teamArgs
->error_token
,
1575 sizeof(uint32
)) < B_OK
1576 || user_memcpy(&programArgs
->umask
, &teamArgs
->umask
, sizeof(mode_t
)) < B_OK
1577 || user_memcpy(userArgs
, teamArgs
->flat_args
,
1578 teamArgs
->flat_args_size
) < B_OK
) {
1579 // the team deletion process will clean this mess
1580 free_team_arg(teamArgs
);
1581 return B_BAD_ADDRESS
;
1584 TRACE(("team_create_thread_start: loading elf binary '%s'\n", path
));
1586 // set team args and update state
1588 team
->SetArgs(path
, teamArgs
->flat_args
+ 1, argCount
- 1);
1589 team
->state
= TEAM_STATE_NORMAL
;
1592 free_team_arg(teamArgs
);
1593 // the arguments are already on the user stack, we no longer need
1594 // them in this form
1596 // Clone commpage area
1597 area_id commPageArea
= clone_commpage_area(team
->id
,
1598 &team
->commpage_address
);
1599 if (commPageArea
< B_OK
) {
1600 TRACE(("team_create_thread_start: clone_commpage_area() failed: %s\n",
1601 strerror(commPageArea
)));
1602 return commPageArea
;
1605 // Register commpage image
1606 image_id commPageImage
= get_commpage_image();
1607 extended_image_info imageInfo
;
1608 err
= get_image_info(commPageImage
, &imageInfo
.basic_info
);
1610 TRACE(("team_create_thread_start: get_image_info() failed: %s\n",
1614 imageInfo
.basic_info
.text
= team
->commpage_address
;
1615 imageInfo
.text_delta
= (ssize_t
)(addr_t
)team
->commpage_address
;
1616 imageInfo
.symbol_table
= NULL
;
1617 imageInfo
.symbol_hash
= NULL
;
1618 imageInfo
.string_table
= NULL
;
1619 image_id image
= register_image(team
, &imageInfo
, sizeof(imageInfo
));
1621 TRACE(("team_create_thread_start: register_image() failed: %s\n",
1626 // NOTE: Normally arch_thread_enter_userspace() never returns, that is
1627 // automatic variables with function scope will never be destroyed.
1629 // find runtime_loader path
1630 KPath runtimeLoaderPath
;
1631 err
= __find_directory(B_SYSTEM_DIRECTORY
, gBootDevice
, false,
1632 runtimeLoaderPath
.LockBuffer(), runtimeLoaderPath
.BufferSize());
1634 TRACE(("team_create_thread_start: find_directory() failed: %s\n",
1638 runtimeLoaderPath
.UnlockBuffer();
1639 err
= runtimeLoaderPath
.Append("runtime_loader");
1642 err
= elf_load_user_image(runtimeLoaderPath
.Path(), team
, 0,
1648 // Luckily, we don't have to clean up the mess we created - that's
1649 // done for us by the normal team deletion process
1650 TRACE(("team_create_thread_start: elf_load_user_image() failed: "
1651 "%s\n", strerror(err
)));
1655 TRACE(("team_create_thread_start: loaded elf. entry = %#lx\n", entry
));
1657 // enter userspace -- returns only in case of error
1658 return thread_enter_userspace_new_team(thread
, (addr_t
)entry
,
1659 programArgs
, team
->commpage_address
);
1664 team_create_thread_start(void* args
)
1666 team_create_thread_start_internal(args
);
1667 team_init_exit_info_on_error(thread_get_current_thread()->team
);
1675 load_image_internal(char**& _flatArgs
, size_t flatArgsSize
, int32 argCount
,
1676 int32 envCount
, int32 priority
, team_id parentID
, uint32 flags
,
1677 port_id errorPort
, uint32 errorToken
)
1679 char** flatArgs
= _flatArgs
;
1682 struct team_arg
* teamArgs
;
1683 struct team_loading_info loadingInfo
;
1684 io_context
* parentIOContext
= NULL
;
1686 bool teamLimitReached
= false;
1688 if (flatArgs
== NULL
|| argCount
== 0)
1691 const char* path
= flatArgs
[0];
1693 TRACE(("load_image_internal: name '%s', args = %p, argCount = %" B_PRId32
1694 "\n", path
, flatArgs
, argCount
));
1696 // cut the path from the main thread name
1697 const char* threadName
= strrchr(path
, '/');
1698 if (threadName
!= NULL
)
1703 // create the main thread object
1705 status
= Thread::Create(threadName
, mainThread
);
1708 BReference
<Thread
> mainThreadReference(mainThread
, true);
1710 // create team object
1711 Team
* team
= Team::Create(mainThread
->id
, path
, false);
1714 BReference
<Team
> teamReference(team
, true);
1716 if (flags
& B_WAIT_TILL_LOADED
) {
1717 loadingInfo
.thread
= thread_get_current_thread();
1718 loadingInfo
.result
= B_ERROR
;
1719 loadingInfo
.done
= false;
1720 team
->loading_info
= &loadingInfo
;
1723 // get the parent team
1724 Team
* parent
= Team::Get(parentID
);
1726 return B_BAD_TEAM_ID
;
1727 BReference
<Team
> parentReference(parent
, true);
1729 parent
->LockTeamAndProcessGroup();
1732 // inherit the parent's user/group
1733 inherit_parent_user_and_group(team
, parent
);
1735 // get a reference to the parent's I/O context -- we need it to create ours
1736 parentIOContext
= parent
->io_context
;
1737 vfs_get_io_context(parentIOContext
);
1740 parent
->UnlockTeamAndProcessGroup();
1742 // check the executable's set-user/group-id permission
1743 update_set_id_user_and_group(team
, path
);
1745 status
= create_team_arg(&teamArgs
, path
, flatArgs
, flatArgsSize
, argCount
,
1746 envCount
, (mode_t
)-1, errorPort
, errorToken
);
1751 // args are owned by the team_arg structure now
1753 // create a new io_context for this team
1754 team
->io_context
= vfs_new_io_context(parentIOContext
, true);
1755 if (!team
->io_context
) {
1756 status
= B_NO_MEMORY
;
1760 // We don't need the parent's I/O context any longer.
1761 vfs_put_io_context(parentIOContext
);
1762 parentIOContext
= NULL
;
1764 // remove any fds that have the CLOEXEC flag set (emulating BeOS behaviour)
1765 vfs_exec_io_context(team
->io_context
);
1767 // create an address space for this team
1768 status
= VMAddressSpace::Create(team
->id
, USER_BASE
, USER_SIZE
, false,
1769 &team
->address_space
);
1773 team
->address_space
->SetRandomizingEnabled(
1774 (teamArgs
->flags
& TEAM_ARGS_FLAG_NO_ASLR
) == 0);
1776 // create the user data area
1777 status
= create_team_user_data(team
);
1781 // insert the team into its parent and the teams hash
1782 parent
->LockTeamAndProcessGroup();
1786 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
1788 sTeamHash
.Insert(team
);
1789 teamLimitReached
= sUsedTeams
>= sMaxTeams
;
1790 if (!teamLimitReached
)
1794 insert_team_into_parent(parent
, team
);
1795 insert_team_into_group(parent
->group
, team
);
1798 parent
->UnlockTeamAndProcessGroup();
1800 // notify team listeners
1801 sNotificationService
.Notify(TEAM_ADDED
, team
);
1803 if (teamLimitReached
) {
1804 status
= B_NO_MORE_TEAMS
;
1808 // In case we start the main thread, we shouldn't access the team object
1809 // afterwards, so cache the team's ID.
1812 // Create a kernel thread, but under the context of the new team
1813 // The new thread will take over ownership of teamArgs.
1815 ThreadCreationAttributes
threadAttributes(team_create_thread_start
,
1816 threadName
, B_NORMAL_PRIORITY
, teamArgs
, teamID
, mainThread
);
1817 threadAttributes
.additional_stack_size
= sizeof(user_space_program_args
)
1818 + teamArgs
->flat_args_size
;
1819 thread
= thread_create_thread(threadAttributes
, false);
1826 // The team has been created successfully, so we keep the reference. Or
1827 // more precisely: It's owned by the team's main thread, now.
1828 teamReference
.Detach();
1830 // wait for the loader of the new team to finish its work
1831 if ((flags
& B_WAIT_TILL_LOADED
) != 0) {
1832 if (mainThread
!= NULL
) {
1833 // resume the team's main thread
1834 thread_continue(mainThread
);
1837 // Now suspend ourselves until loading is finished. We will be woken
1838 // either by the thread, when it finished or aborted loading, or when
1839 // the team is going to die (e.g. is killed). In either case the one
1840 // setting `loadingInfo.done' is responsible for removing the info from
1841 // the team structure.
1842 while (!loadingInfo
.done
)
1845 if (loadingInfo
.result
< B_OK
)
1846 return loadingInfo
.result
;
1849 // notify the debugger
1850 user_debug_team_created(teamID
);
1855 // Remove the team structure from the process group, the parent team, and
1856 // the team hash table and delete the team structure.
1857 parent
->LockTeamAndProcessGroup();
1860 remove_team_from_group(team
);
1861 remove_team_from_parent(team
->parent
, team
);
1864 parent
->UnlockTeamAndProcessGroup();
1867 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
1868 sTeamHash
.Remove(team
);
1869 if (!teamLimitReached
)
1873 sNotificationService
.Notify(TEAM_REMOVED
, team
);
1875 delete_team_user_data(team
);
1877 team
->address_space
->Put();
1879 free_team_arg(teamArgs
);
1881 if (parentIOContext
!= NULL
)
1882 vfs_put_io_context(parentIOContext
);
1888 /*! Almost shuts down the current team and loads a new image into it.
1889 If successful, this function does not return and will takeover ownership of
1890 the arguments provided.
1891 This function may only be called in a userland team (caused by one of the
1895 exec_team(const char* path
, char**& _flatArgs
, size_t flatArgsSize
,
1896 int32 argCount
, int32 envCount
, mode_t umask
)
1898 // NOTE: Since this function normally doesn't return, don't use automatic
1899 // variables that need destruction in the function scope.
1900 char** flatArgs
= _flatArgs
;
1901 Team
* team
= thread_get_current_thread()->team
;
1902 struct team_arg
* teamArgs
;
1903 const char* threadName
;
1904 thread_id nubThreadID
= -1;
1906 TRACE(("exec_team(path = \"%s\", argc = %" B_PRId32
", envCount = %"
1907 B_PRId32
"): team %" B_PRId32
"\n", path
, argCount
, envCount
,
1910 T(ExecTeam(path
, argCount
, flatArgs
, envCount
, flatArgs
+ argCount
+ 1));
1912 // switching the kernel at run time is probably not a good idea :)
1913 if (team
== team_get_kernel_team())
1914 return B_NOT_ALLOWED
;
1916 // we currently need to be single threaded here
1917 // TODO: maybe we should just kill all other threads and
1918 // make the current thread the team's main thread?
1919 Thread
* currentThread
= thread_get_current_thread();
1920 if (currentThread
!= team
->main_thread
)
1921 return B_NOT_ALLOWED
;
1923 // The debug nub thread, a pure kernel thread, is allowed to survive.
1924 // We iterate through the thread list to make sure that there's no other
1926 TeamLocker
teamLocker(team
);
1927 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
1929 if (team
->debug_info
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
)
1930 nubThreadID
= team
->debug_info
.nub_thread
;
1932 debugInfoLocker
.Unlock();
1934 for (Thread
* thread
= team
->thread_list
; thread
!= NULL
;
1935 thread
= thread
->team_next
) {
1936 if (thread
!= team
->main_thread
&& thread
->id
!= nubThreadID
)
1937 return B_NOT_ALLOWED
;
1940 team
->DeleteUserTimers(true);
1941 team
->ResetSignalsOnExec();
1943 teamLocker
.Unlock();
1945 status_t status
= create_team_arg(&teamArgs
, path
, flatArgs
, flatArgsSize
,
1946 argCount
, envCount
, umask
, -1, 0);
1951 // args are owned by the team_arg structure now
1953 // TODO: remove team resources if there are any left
1954 // thread_atkernel_exit() might not be called at all
1956 thread_reset_for_exec();
1958 user_debug_prepare_for_exec();
1960 delete_team_user_data(team
);
1961 vm_delete_areas(team
->address_space
, false);
1963 delete_owned_ports(team
);
1964 sem_delete_owned_sems(team
);
1965 remove_images(team
);
1966 vfs_exec_io_context(team
->io_context
);
1967 delete_realtime_sem_context(team
->realtime_sem_context
);
1968 team
->realtime_sem_context
= NULL
;
1971 team
->address_space
->SetRandomizingEnabled(
1972 (teamArgs
->flags
& TEAM_ARGS_FLAG_NO_ASLR
) == 0);
1974 status
= create_team_user_data(team
);
1975 if (status
!= B_OK
) {
1976 // creating the user data failed -- we're toast
1977 free_team_arg(teamArgs
);
1978 exit_thread(status
);
1982 user_debug_finish_after_exec();
1987 team
->SetName(path
);
1990 // cut the path from the team name and rename the main thread, too
1991 threadName
= strrchr(path
, '/');
1992 if (threadName
!= NULL
)
1996 rename_thread(thread_get_current_thread_id(), threadName
);
1998 atomic_or(&team
->flags
, TEAM_FLAG_EXEC_DONE
);
2000 // Update user/group according to the executable's set-user/group-id
2002 update_set_id_user_and_group(team
, path
);
2004 user_debug_team_exec();
2006 // notify team listeners
2007 sNotificationService
.Notify(TEAM_EXEC
, team
);
2009 // get a user thread for the thread
2010 user_thread
* userThread
= team_allocate_user_thread(team
);
2011 // cannot fail (the allocation for the team would have failed already)
2012 ThreadLocker
currentThreadLocker(currentThread
);
2013 currentThread
->user_thread
= userThread
;
2014 currentThreadLocker
.Unlock();
2016 // create the user stack for the thread
2017 status
= thread_create_user_stack(currentThread
->team
, currentThread
, NULL
,
2018 0, sizeof(user_space_program_args
) + teamArgs
->flat_args_size
);
2019 if (status
== B_OK
) {
2020 // prepare the stack, load the runtime loader, and enter userspace
2021 team_create_thread_start(teamArgs
);
2022 // does never return
2024 free_team_arg(teamArgs
);
2026 // Sorry, we have to kill ourselves, there is no way out anymore
2027 // (without any areas left and all that).
2028 exit_thread(status
);
2030 // We return a status here since the signal that is sent by the
2031 // call above is not immediately handled.
2039 Thread
* parentThread
= thread_get_current_thread();
2040 Team
* parentTeam
= parentThread
->team
;
2042 arch_fork_arg
* forkArgs
;
2043 struct area_info info
;
2047 bool teamLimitReached
= false;
2049 TRACE(("fork_team(): team %" B_PRId32
"\n", parentTeam
->id
));
2051 if (parentTeam
== team_get_kernel_team())
2052 return B_NOT_ALLOWED
;
2054 // create a new team
2055 // TODO: this is very similar to load_image_internal() - maybe we can do
2056 // something about it :)
2058 // create the main thread object
2060 status
= Thread::Create(parentThread
->name
, thread
);
2063 BReference
<Thread
> threadReference(thread
, true);
2065 // create the team object
2066 team
= Team::Create(thread
->id
, NULL
, false);
2070 parentTeam
->LockTeamAndProcessGroup();
2073 team
->SetName(parentTeam
->Name());
2074 team
->SetArgs(parentTeam
->Args());
2076 team
->commpage_address
= parentTeam
->commpage_address
;
2078 // Inherit the parent's user/group.
2079 inherit_parent_user_and_group(team
, parentTeam
);
2081 // inherit signal handlers
2082 team
->InheritSignalActions(parentTeam
);
2085 parentTeam
->UnlockTeamAndProcessGroup();
2087 // inherit some team debug flags
2088 team
->debug_info
.flags
|= atomic_get(&parentTeam
->debug_info
.flags
)
2089 & B_TEAM_DEBUG_INHERITED_FLAGS
;
2091 forkArgs
= (arch_fork_arg
*)malloc(sizeof(arch_fork_arg
));
2092 if (forkArgs
== NULL
) {
2093 status
= B_NO_MEMORY
;
2097 // create a new io_context for this team
2098 team
->io_context
= vfs_new_io_context(parentTeam
->io_context
, false);
2099 if (!team
->io_context
) {
2100 status
= B_NO_MEMORY
;
2104 // duplicate the realtime sem context
2105 if (parentTeam
->realtime_sem_context
) {
2106 team
->realtime_sem_context
= clone_realtime_sem_context(
2107 parentTeam
->realtime_sem_context
);
2108 if (team
->realtime_sem_context
== NULL
) {
2109 status
= B_NO_MEMORY
;
2114 // create an address space for this team
2115 status
= VMAddressSpace::Create(team
->id
, USER_BASE
, USER_SIZE
, false,
2116 &team
->address_space
);
2120 // copy all areas of the team
2121 // TODO: should be able to handle stack areas differently (ie. don't have
2122 // them copy-on-write)
2125 while (get_next_area_info(B_CURRENT_TEAM
, &areaCookie
, &info
) == B_OK
) {
2126 if (info
.area
== parentTeam
->user_data_area
) {
2127 // don't clone the user area; just create a new one
2128 status
= create_team_user_data(team
, info
.address
);
2132 thread
->user_thread
= team_allocate_user_thread(team
);
2135 area_id area
= vm_copy_area(team
->address_space
->ID(), info
.name
,
2136 &address
, B_CLONE_ADDRESS
, info
.protection
, info
.area
);
2142 if (info
.area
== parentThread
->user_stack_area
)
2143 thread
->user_stack_area
= area
;
2150 if (thread
->user_thread
== NULL
) {
2152 panic("user data area not found, parent area is %" B_PRId32
,
2153 parentTeam
->user_data_area
);
2159 thread
->user_stack_base
= parentThread
->user_stack_base
;
2160 thread
->user_stack_size
= parentThread
->user_stack_size
;
2161 thread
->user_local_storage
= parentThread
->user_local_storage
;
2162 thread
->sig_block_mask
= parentThread
->sig_block_mask
;
2163 thread
->signal_stack_base
= parentThread
->signal_stack_base
;
2164 thread
->signal_stack_size
= parentThread
->signal_stack_size
;
2165 thread
->signal_stack_enabled
= parentThread
->signal_stack_enabled
;
2167 arch_store_fork_frame(forkArgs
);
2170 if (copy_images(parentTeam
->id
, team
) != B_OK
)
2173 // insert the team into its parent and the teams hash
2174 parentTeam
->LockTeamAndProcessGroup();
2178 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
2180 sTeamHash
.Insert(team
);
2181 teamLimitReached
= sUsedTeams
>= sMaxTeams
;
2182 if (!teamLimitReached
)
2186 insert_team_into_parent(parentTeam
, team
);
2187 insert_team_into_group(parentTeam
->group
, team
);
2190 parentTeam
->UnlockTeamAndProcessGroup();
2192 // notify team listeners
2193 sNotificationService
.Notify(TEAM_ADDED
, team
);
2195 if (teamLimitReached
) {
2196 status
= B_NO_MORE_TEAMS
;
2200 // create the main thread
2202 ThreadCreationAttributes
threadCreationAttributes(NULL
,
2203 parentThread
->name
, parentThread
->priority
, NULL
, team
->id
, thread
);
2204 threadCreationAttributes
.forkArgs
= forkArgs
;
2205 threadCreationAttributes
.flags
|= THREAD_CREATION_FLAG_DEFER_SIGNALS
;
2206 threadID
= thread_create_thread(threadCreationAttributes
, false);
2213 // notify the debugger
2214 user_debug_team_created(team
->id
);
2216 T(TeamForked(threadID
));
2218 resume_thread(threadID
);
2222 // Remove the team structure from the process group, the parent team, and
2223 // the team hash table and delete the team structure.
2224 parentTeam
->LockTeamAndProcessGroup();
2227 remove_team_from_group(team
);
2228 remove_team_from_parent(team
->parent
, team
);
2231 parentTeam
->UnlockTeamAndProcessGroup();
2234 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
2235 sTeamHash
.Remove(team
);
2236 if (!teamLimitReached
)
2240 sNotificationService
.Notify(TEAM_REMOVED
, team
);
2242 remove_images(team
);
2244 team
->address_space
->RemoveAndPut();
2246 delete_realtime_sem_context(team
->realtime_sem_context
);
2250 team
->ReleaseReference();
2256 /*! Returns if the specified team \a parent has any children belonging to the
2257 process group with the specified ID \a groupID.
2258 The caller must hold \a parent's lock.
2261 has_children_in_group(Team
* parent
, pid_t groupID
)
2263 for (Team
* child
= parent
->children
; child
!= NULL
;
2264 child
= child
->siblings_next
) {
2265 TeamLocker
childLocker(child
);
2266 if (child
->group_id
== groupID
)
2274 /*! Returns the first job control entry from \a children, which matches \a id.
2276 - \code > 0 \endcode: Matching an entry with that team ID.
2277 - \code == -1 \endcode: Matching any entry.
2278 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2279 \c 0 is an invalid value for \a id.
2281 The caller must hold the lock of the team that \a children belongs to.
2283 \param children The job control entry list to check.
2284 \param id The match criterion.
2285 \return The first matching entry or \c NULL, if none matches.
2287 static job_control_entry
*
2288 get_job_control_entry(team_job_control_children
& children
, pid_t id
)
2290 for (JobControlEntryList::Iterator it
= children
.entries
.GetIterator();
2291 job_control_entry
* entry
= it
.Next();) {
2294 if (entry
->thread
== id
)
2296 } else if (id
== -1) {
2300 = (entry
->team
? entry
->team
->group_id
: entry
->group_id
);
2301 if (processGroup
== -id
)
2310 /*! Returns the first job control entry from one of team's dead, continued, or
2311 stopped children which matches \a id.
2313 - \code > 0 \endcode: Matching an entry with that team ID.
2314 - \code == -1 \endcode: Matching any entry.
2315 - \code < -1 \endcode: Matching any entry with a process group ID of \c -id.
2316 \c 0 is an invalid value for \a id.
2318 The caller must hold \a team's lock.
2320 \param team The team whose dead, stopped, and continued child lists shall be
2322 \param id The match criterion.
2323 \param flags Specifies which children shall be considered. Dead children
2324 are considered when \a flags is ORed bitwise with \c WEXITED, stopped
2325 children are considered when \a flags is ORed bitwise with \c WUNTRACED
2326 or \c WSTOPPED, continued children when \a flags is ORed bitwise with
2328 \return The first matching entry or \c NULL, if none matches.
2330 static job_control_entry
*
2331 get_job_control_entry(Team
* team
, pid_t id
, uint32 flags
)
2333 job_control_entry
* entry
= NULL
;
2335 if ((flags
& WEXITED
) != 0)
2336 entry
= get_job_control_entry(team
->dead_children
, id
);
2338 if (entry
== NULL
&& (flags
& WCONTINUED
) != 0)
2339 entry
= get_job_control_entry(team
->continued_children
, id
);
2341 if (entry
== NULL
&& (flags
& (WUNTRACED
| WSTOPPED
)) != 0)
2342 entry
= get_job_control_entry(team
->stopped_children
, id
);
2348 job_control_entry::job_control_entry()
2350 has_group_ref(false)
2355 job_control_entry::~job_control_entry()
2357 if (has_group_ref
) {
2358 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
2360 ProcessGroup
* group
= sGroupHash
.Lookup(group_id
);
2361 if (group
== NULL
) {
2362 panic("job_control_entry::~job_control_entry(): unknown group "
2363 "ID: %" B_PRId32
, group_id
);
2367 groupHashLocker
.Unlock();
2369 group
->ReleaseReference();
2374 /*! Invoked when the owning team is dying, initializing the entry according to
2377 The caller must hold the owning team's lock and the scheduler lock.
2380 job_control_entry::InitDeadState()
2383 ASSERT(team
->exit
.initialized
);
2385 group_id
= team
->group_id
;
2386 team
->group
->AcquireReference();
2387 has_group_ref
= true;
2390 status
= team
->exit
.status
;
2391 reason
= team
->exit
.reason
;
2392 signal
= team
->exit
.signal
;
2393 signaling_user
= team
->exit
.signaling_user
;
2394 user_time
= team
->dead_threads_user_time
2395 + team
->dead_children
.user_time
;
2396 kernel_time
= team
->dead_threads_kernel_time
2397 + team
->dead_children
.kernel_time
;
2405 job_control_entry::operator=(const job_control_entry
& other
)
2407 state
= other
.state
;
2408 thread
= other
.thread
;
2409 signal
= other
.signal
;
2410 has_group_ref
= false;
2411 signaling_user
= other
.signaling_user
;
2413 group_id
= other
.group_id
;
2414 status
= other
.status
;
2415 reason
= other
.reason
;
2416 user_time
= other
.user_time
;
2417 kernel_time
= other
.kernel_time
;
2423 /*! This is the kernel backend for waitid().
2426 wait_for_child(pid_t child
, uint32 flags
, siginfo_t
& _info
,
2427 team_usage_info
& _usage_info
)
2429 Thread
* thread
= thread_get_current_thread();
2430 Team
* team
= thread
->team
;
2431 struct job_control_entry foundEntry
;
2432 struct job_control_entry
* freeDeathEntry
= NULL
;
2433 status_t status
= B_OK
;
2435 TRACE(("wait_for_child(child = %" B_PRId32
", flags = %" B_PRId32
")\n",
2438 T(WaitForChild(child
, flags
));
2440 if ((flags
& (WEXITED
| WUNTRACED
| WSTOPPED
| WCONTINUED
)) == 0) {
2441 T(WaitForChildDone(B_BAD_VALUE
));
2445 pid_t originalChild
= child
;
2447 bool ignoreFoundEntries
= false;
2448 bool ignoreFoundEntriesChecked
= false;
2452 TeamLocker
teamLocker(team
);
2454 // A 0 child argument means to wait for all children in the process
2455 // group of the calling team.
2456 child
= originalChild
== 0 ? -team
->group_id
: originalChild
;
2458 // check whether any condition holds
2459 job_control_entry
* entry
= get_job_control_entry(team
, child
, flags
);
2461 // If we don't have an entry yet, check whether there are any children
2462 // complying to the process group specification at all.
2463 if (entry
== NULL
) {
2464 // No success yet -- check whether there are any children complying
2465 // to the process group specification at all.
2466 bool childrenExist
= false;
2468 childrenExist
= team
->children
!= NULL
;
2469 } else if (child
< -1) {
2470 childrenExist
= has_children_in_group(team
, -child
);
2471 } else if (child
!= team
->id
) {
2472 if (Team
* childTeam
= Team::Get(child
)) {
2473 BReference
<Team
> childTeamReference(childTeam
, true);
2474 TeamLocker
childTeamLocker(childTeam
);
2475 childrenExist
= childTeam
->parent
== team
;
2479 if (!childrenExist
) {
2480 // there is no child we could wait for
2483 // the children we're waiting for are still running
2484 status
= B_WOULD_BLOCK
;
2488 foundEntry
= *entry
;
2490 // unless WNOWAIT has been specified, "consume" the wait state
2491 if ((flags
& WNOWAIT
) == 0 || ignoreFoundEntries
) {
2492 if (entry
->state
== JOB_CONTROL_STATE_DEAD
) {
2493 // The child is dead. Reap its death entry.
2494 freeDeathEntry
= entry
;
2495 team
->dead_children
.entries
.Remove(entry
);
2496 team
->dead_children
.count
--;
2498 // The child is well. Reset its job control state.
2499 team_set_job_control_state(entry
->team
,
2500 JOB_CONTROL_STATE_NONE
, NULL
);
2505 // If we haven't got anything yet, prepare for waiting for the
2506 // condition variable.
2507 ConditionVariableEntry deadWaitEntry
;
2509 if (status
== B_WOULD_BLOCK
&& (flags
& WNOHANG
) == 0)
2510 team
->dead_children
.condition_variable
.Add(&deadWaitEntry
);
2512 teamLocker
.Unlock();
2514 // we got our entry and can return to our caller
2515 if (status
== B_OK
) {
2516 if (ignoreFoundEntries
) {
2517 // ... unless we shall ignore found entries
2518 delete freeDeathEntry
;
2519 freeDeathEntry
= NULL
;
2526 if (status
!= B_WOULD_BLOCK
|| (flags
& WNOHANG
) != 0) {
2527 T(WaitForChildDone(status
));
2531 status
= deadWaitEntry
.Wait(B_CAN_INTERRUPT
);
2532 if (status
== B_INTERRUPTED
) {
2533 T(WaitForChildDone(status
));
2537 // If SA_NOCLDWAIT is set or SIGCHLD is ignored, we shall wait until
2538 // all our children are dead and fail with ECHILD. We check the
2539 // condition at this point.
2540 if (!ignoreFoundEntriesChecked
) {
2543 struct sigaction
& handler
= team
->SignalActionFor(SIGCHLD
);
2544 if ((handler
.sa_flags
& SA_NOCLDWAIT
) != 0
2545 || handler
.sa_handler
== SIG_IGN
) {
2546 ignoreFoundEntries
= true;
2549 teamLocker
.Unlock();
2551 ignoreFoundEntriesChecked
= true;
2555 delete freeDeathEntry
;
2557 // When we got here, we have a valid death entry, and already got
2558 // unregistered from the team or group. Fill in the returned info.
2559 memset(&_info
, 0, sizeof(_info
));
2560 _info
.si_signo
= SIGCHLD
;
2561 _info
.si_pid
= foundEntry
.thread
;
2562 _info
.si_uid
= foundEntry
.signaling_user
;
2563 // TODO: Fill in si_errno?
2565 switch (foundEntry
.state
) {
2566 case JOB_CONTROL_STATE_DEAD
:
2567 _info
.si_code
= foundEntry
.reason
;
2568 _info
.si_status
= foundEntry
.reason
== CLD_EXITED
2569 ? foundEntry
.status
: foundEntry
.signal
;
2570 _usage_info
.user_time
= foundEntry
.user_time
;
2571 _usage_info
.kernel_time
= foundEntry
.kernel_time
;
2573 case JOB_CONTROL_STATE_STOPPED
:
2574 _info
.si_code
= CLD_STOPPED
;
2575 _info
.si_status
= foundEntry
.signal
;
2577 case JOB_CONTROL_STATE_CONTINUED
:
2578 _info
.si_code
= CLD_CONTINUED
;
2579 _info
.si_status
= 0;
2581 case JOB_CONTROL_STATE_NONE
:
2586 // If SIGCHLD is blocked, we shall clear pending SIGCHLDs, if no other child
2587 // status is available.
2588 TeamLocker
teamLocker(team
);
2589 InterruptsSpinLocker
signalLocker(team
->signal_lock
);
2590 SpinLocker
threadCreationLocker(gThreadCreationLock
);
2592 if (is_team_signal_blocked(team
, SIGCHLD
)) {
2593 if (get_job_control_entry(team
, child
, flags
) == NULL
)
2594 team
->RemovePendingSignals(SIGNAL_TO_MASK(SIGCHLD
));
2597 threadCreationLocker
.Unlock();
2598 signalLocker
.Unlock();
2599 teamLocker
.Unlock();
2601 // When the team is dead, the main thread continues to live in the kernel
2602 // team for a very short time. To avoid surprises for the caller we rather
2603 // wait until the thread is really gone.
2604 if (foundEntry
.state
== JOB_CONTROL_STATE_DEAD
)
2605 wait_for_thread(foundEntry
.thread
, NULL
);
2607 T(WaitForChildDone(foundEntry
));
2609 return foundEntry
.thread
;
2613 /*! Fills the team_info structure with information from the specified team.
2614 Interrupts must be enabled. The team must not be locked.
2617 fill_team_info(Team
* team
, team_info
* info
, size_t size
)
2619 if (size
!= sizeof(team_info
))
2622 // TODO: Set more informations for team_info
2623 memset(info
, 0, size
);
2625 info
->team
= team
->id
;
2627 info
->image_count
= count_images(team
);
2628 // protected by sImageMutex
2630 TeamLocker
teamLocker(team
);
2631 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
2633 info
->thread_count
= team
->num_threads
;
2634 //info->area_count =
2635 info
->debugger_nub_thread
= team
->debug_info
.nub_thread
;
2636 info
->debugger_nub_port
= team
->debug_info
.nub_port
;
2637 info
->uid
= team
->effective_uid
;
2638 info
->gid
= team
->effective_gid
;
2640 strlcpy(info
->args
, team
->Args(), sizeof(info
->args
));
2647 /*! Returns whether the process group contains stopped processes.
2648 The caller must hold the process group's lock.
2651 process_group_has_stopped_processes(ProcessGroup
* group
)
2653 Team
* team
= group
->teams
;
2654 while (team
!= NULL
) {
2655 // the parent team's lock guards the job control entry -- acquire it
2656 team
->LockTeamAndParent(false);
2658 if (team
->job_control_entry
!= NULL
2659 && team
->job_control_entry
->state
== JOB_CONTROL_STATE_STOPPED
) {
2660 team
->UnlockTeamAndParent();
2664 team
->UnlockTeamAndParent();
2666 team
= team
->group_next
;
2673 /*! Iterates through all process groups queued in team_remove_team() and signals
2674 those that are orphaned and have stopped processes.
2675 The caller must not hold any team or process group locks.
2678 orphaned_process_group_check()
2680 // process as long as there are groups in the list
2682 // remove the head from the list
2683 MutexLocker
orphanedCheckLocker(sOrphanedCheckLock
);
2685 ProcessGroup
* group
= sOrphanedCheckProcessGroups
.RemoveHead();
2689 group
->UnsetOrphanedCheck();
2690 BReference
<ProcessGroup
> groupReference(group
);
2692 orphanedCheckLocker
.Unlock();
2694 AutoLocker
<ProcessGroup
> groupLocker(group
);
2696 // If the group is orphaned and contains stopped processes, we're
2697 // supposed to send SIGHUP + SIGCONT.
2698 if (group
->IsOrphaned() && process_group_has_stopped_processes(group
)) {
2699 Thread
* currentThread
= thread_get_current_thread();
2701 Signal
signal(SIGHUP
, SI_USER
, B_OK
, currentThread
->team
->id
);
2702 send_signal_to_process_group_locked(group
, signal
, 0);
2704 signal
.SetNumber(SIGCONT
);
2705 send_signal_to_process_group_locked(group
, signal
, 0);
2712 common_get_team_usage_info(team_id id
, int32 who
, team_usage_info
* info
,
2715 if (who
!= B_TEAM_USAGE_SELF
&& who
!= B_TEAM_USAGE_CHILDREN
)
2719 Team
* team
= Team::GetAndLock(id
);
2721 return B_BAD_TEAM_ID
;
2722 BReference
<Team
> teamReference(team
, true);
2723 TeamLocker
teamLocker(team
, true);
2725 if ((flags
& B_CHECK_PERMISSION
) != 0) {
2726 uid_t uid
= geteuid();
2727 if (uid
!= 0 && uid
!= team
->effective_uid
)
2728 return B_NOT_ALLOWED
;
2731 bigtime_t kernelTime
= 0;
2732 bigtime_t userTime
= 0;
2735 case B_TEAM_USAGE_SELF
:
2737 Thread
* thread
= team
->thread_list
;
2739 for (; thread
!= NULL
; thread
= thread
->team_next
) {
2740 InterruptsSpinLocker
threadTimeLocker(thread
->time_lock
);
2741 kernelTime
+= thread
->kernel_time
;
2742 userTime
+= thread
->user_time
;
2745 kernelTime
+= team
->dead_threads_kernel_time
;
2746 userTime
+= team
->dead_threads_user_time
;
2750 case B_TEAM_USAGE_CHILDREN
:
2752 Team
* child
= team
->children
;
2753 for (; child
!= NULL
; child
= child
->siblings_next
) {
2754 TeamLocker
childLocker(child
);
2756 Thread
* thread
= team
->thread_list
;
2758 for (; thread
!= NULL
; thread
= thread
->team_next
) {
2759 InterruptsSpinLocker
threadTimeLocker(thread
->time_lock
);
2760 kernelTime
+= thread
->kernel_time
;
2761 userTime
+= thread
->user_time
;
2764 kernelTime
+= child
->dead_threads_kernel_time
;
2765 userTime
+= child
->dead_threads_user_time
;
2768 kernelTime
+= team
->dead_children
.kernel_time
;
2769 userTime
+= team
->dead_children
.user_time
;
2774 info
->kernel_time
= kernelTime
;
2775 info
->user_time
= userTime
;
2781 // #pragma mark - Private kernel API
2785 team_init(kernel_args
* args
)
2787 // create the team hash table
2788 new(&sTeamHash
) TeamTable
;
2789 if (sTeamHash
.Init(64) != B_OK
)
2790 panic("Failed to init team hash table!");
2792 new(&sGroupHash
) ProcessGroupHashTable
;
2793 if (sGroupHash
.Init() != B_OK
)
2794 panic("Failed to init process group hash table!");
2796 // create initial session and process groups
2798 ProcessSession
* session
= new(std::nothrow
) ProcessSession(1);
2799 if (session
== NULL
)
2800 panic("Could not create initial session.\n");
2801 BReference
<ProcessSession
> sessionReference(session
, true);
2803 ProcessGroup
* group
= new(std::nothrow
) ProcessGroup(1);
2805 panic("Could not create initial process group.\n");
2806 BReference
<ProcessGroup
> groupReference(group
, true);
2808 group
->Publish(session
);
2810 // create the kernel team
2811 sKernelTeam
= Team::Create(1, "kernel_team", true);
2812 if (sKernelTeam
== NULL
)
2813 panic("could not create kernel team!\n");
2814 sKernelTeam
->SetArgs(sKernelTeam
->Name());
2815 sKernelTeam
->state
= TEAM_STATE_NORMAL
;
2817 sKernelTeam
->saved_set_uid
= 0;
2818 sKernelTeam
->real_uid
= 0;
2819 sKernelTeam
->effective_uid
= 0;
2820 sKernelTeam
->saved_set_gid
= 0;
2821 sKernelTeam
->real_gid
= 0;
2822 sKernelTeam
->effective_gid
= 0;
2823 sKernelTeam
->supplementary_groups
= NULL
;
2824 sKernelTeam
->supplementary_group_count
= 0;
2826 insert_team_into_group(group
, sKernelTeam
);
2828 sKernelTeam
->io_context
= vfs_new_io_context(NULL
, false);
2829 if (sKernelTeam
->io_context
== NULL
)
2830 panic("could not create io_context for kernel team!\n");
2832 if (vfs_resize_fd_table(sKernelTeam
->io_context
, 4096) != B_OK
)
2833 dprintf("Failed to resize FD table for kernel team!\n");
2835 // stick it in the team hash
2836 sTeamHash
.Insert(sKernelTeam
);
2838 add_debugger_command_etc("team", &dump_team_info
,
2839 "Dump info about a particular team",
2840 "[ <id> | <address> | <name> ]\n"
2841 "Prints information about the specified team. If no argument is given\n"
2842 "the current team is selected.\n"
2843 " <id> - The ID of the team.\n"
2844 " <address> - The address of the team structure.\n"
2845 " <name> - The team's name.\n", 0);
2846 add_debugger_command_etc("teams", &dump_teams
, "List all teams",
2848 "Prints a list of all existing teams.\n", 0);
2850 new(&sNotificationService
) TeamNotificationService();
2852 sNotificationService
.Register();
2859 team_max_teams(void)
2866 team_used_teams(void)
2868 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
2873 /*! Returns a death entry of a child team specified by ID (if any).
2874 The caller must hold the team's lock.
2876 \param team The team whose dead children list to check.
2877 \param child The ID of the child for whose death entry to lock. Must be > 0.
2878 \param _deleteEntry Return variable, indicating whether the caller needs to
2879 delete the returned entry.
2880 \return The death entry of the matching team, or \c NULL, if no death entry
2881 for the team was found.
2884 team_get_death_entry(Team
* team
, thread_id child
, bool* _deleteEntry
)
2889 job_control_entry
* entry
= get_job_control_entry(team
->dead_children
,
2892 // remove the entry only, if the caller is the parent of the found team
2893 if (team_get_current_team_id() == entry
->thread
) {
2894 team
->dead_children
.entries
.Remove(entry
);
2895 team
->dead_children
.count
--;
2896 *_deleteEntry
= true;
2898 *_deleteEntry
= false;
2906 /*! Quick check to see if we have a valid team ID. */
2908 team_is_valid(team_id id
)
2913 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
2915 return team_get_team_struct_locked(id
) != NULL
;
2920 team_get_team_struct_locked(team_id id
)
2922 return sTeamHash
.Lookup(id
);
2927 team_set_controlling_tty(int32 ttyIndex
)
2929 // lock the team, so its session won't change while we're playing with it
2930 Team
* team
= thread_get_current_thread()->team
;
2931 TeamLocker
teamLocker(team
);
2933 // get and lock the session
2934 ProcessSession
* session
= team
->group
->Session();
2935 AutoLocker
<ProcessSession
> sessionLocker(session
);
2937 // set the session's fields
2938 session
->controlling_tty
= ttyIndex
;
2939 session
->foreground_group
= -1;
2944 team_get_controlling_tty()
2946 // lock the team, so its session won't change while we're playing with it
2947 Team
* team
= thread_get_current_thread()->team
;
2948 TeamLocker
teamLocker(team
);
2950 // get and lock the session
2951 ProcessSession
* session
= team
->group
->Session();
2952 AutoLocker
<ProcessSession
> sessionLocker(session
);
2954 // get the session's field
2955 return session
->controlling_tty
;
2960 team_set_foreground_process_group(int32 ttyIndex
, pid_t processGroupID
)
2962 // lock the team, so its session won't change while we're playing with it
2963 Thread
* thread
= thread_get_current_thread();
2964 Team
* team
= thread
->team
;
2965 TeamLocker
teamLocker(team
);
2967 // get and lock the session
2968 ProcessSession
* session
= team
->group
->Session();
2969 AutoLocker
<ProcessSession
> sessionLocker(session
);
2971 // check given TTY -- must be the controlling tty of the calling process
2972 if (session
->controlling_tty
!= ttyIndex
)
2975 // check given process group -- must belong to our session
2977 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
2978 ProcessGroup
* group
= sGroupHash
.Lookup(processGroupID
);
2979 if (group
== NULL
|| group
->Session() != session
)
2983 // If we are a background group, we can do that unharmed only when we
2984 // ignore or block SIGTTOU. Otherwise the group gets a SIGTTOU.
2985 if (session
->foreground_group
!= -1
2986 && session
->foreground_group
!= team
->group_id
2987 && team
->SignalActionFor(SIGTTOU
).sa_handler
!= SIG_IGN
2988 && (thread
->sig_block_mask
& SIGNAL_TO_MASK(SIGTTOU
)) == 0) {
2989 InterruptsSpinLocker
signalLocker(team
->signal_lock
);
2991 if (!is_team_signal_blocked(team
, SIGTTOU
)) {
2992 pid_t groupID
= team
->group_id
;
2994 signalLocker
.Unlock();
2995 sessionLocker
.Unlock();
2996 teamLocker
.Unlock();
2998 Signal
signal(SIGTTOU
, SI_USER
, B_OK
, team
->id
);
2999 send_signal_to_process_group(groupID
, signal
, 0);
3000 return B_INTERRUPTED
;
3004 session
->foreground_group
= processGroupID
;
3010 /*! Removes the specified team from the global team hash, from its process
3011 group, and from its parent.
3012 It also moves all of its children to the kernel team.
3014 The caller must hold the following locks:
3015 - \a team's process group's lock,
3016 - the kernel team's lock,
3017 - \a team's parent team's lock (might be the kernel team), and
3021 team_remove_team(Team
* team
, pid_t
& _signalGroup
)
3023 Team
* parent
= team
->parent
;
3025 // remember how long this team lasted
3026 parent
->dead_children
.kernel_time
+= team
->dead_threads_kernel_time
3027 + team
->dead_children
.kernel_time
;
3028 parent
->dead_children
.user_time
+= team
->dead_threads_user_time
3029 + team
->dead_children
.user_time
;
3031 // remove the team from the hash table
3032 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
3033 sTeamHash
.Remove(team
);
3035 teamsLocker
.Unlock();
3037 // The team can no longer be accessed by ID. Navigation to it is still
3038 // possible from its process group and its parent and children, but that
3039 // will be rectified shortly.
3040 team
->state
= TEAM_STATE_DEATH
;
3042 // If we're a controlling process (i.e. a session leader with controlling
3043 // terminal), there's a bit of signalling we have to do. We can't do any of
3044 // the signaling here due to the bunch of locks we're holding, but we need
3045 // to determine, whom to signal.
3047 bool isSessionLeader
= false;
3048 if (team
->session_id
== team
->id
3049 && team
->group
->Session()->controlling_tty
>= 0) {
3050 isSessionLeader
= true;
3052 ProcessSession
* session
= team
->group
->Session();
3054 AutoLocker
<ProcessSession
> sessionLocker(session
);
3056 session
->controlling_tty
= -1;
3057 _signalGroup
= session
->foreground_group
;
3060 // remove us from our process group
3061 remove_team_from_group(team
);
3063 // move the team's children to the kernel team
3064 while (Team
* child
= team
->children
) {
3065 // remove the child from the current team and add it to the kernel team
3066 TeamLocker
childLocker(child
);
3068 remove_team_from_parent(team
, child
);
3069 insert_team_into_parent(sKernelTeam
, child
);
3071 // move job control entries too
3072 sKernelTeam
->stopped_children
.entries
.MoveFrom(
3073 &team
->stopped_children
.entries
);
3074 sKernelTeam
->continued_children
.entries
.MoveFrom(
3075 &team
->continued_children
.entries
);
3077 // If the team was a session leader with controlling terminal,
3078 // we need to send SIGHUP + SIGCONT to all newly-orphaned process
3079 // groups with stopped processes. Due to locking complications we can't
3080 // do that here, so we only check whether we were a reason for the
3081 // child's process group not being an orphan and, if so, schedule a
3082 // later check (cf. orphaned_process_group_check()).
3083 if (isSessionLeader
) {
3084 ProcessGroup
* childGroup
= child
->group
;
3085 if (childGroup
->Session()->id
== team
->session_id
3086 && childGroup
->id
!= team
->group_id
) {
3087 childGroup
->ScheduleOrphanedCheck();
3091 // Note, we don't move the dead children entries. Those will be deleted
3092 // when the team structure is deleted.
3095 // remove us from our parent
3096 remove_team_from_parent(parent
, team
);
3100 /*! Kills all threads but the main thread of the team and shuts down user
3102 To be called on exit of the team's main thread. No locks must be held.
3104 \param team The team in question.
3105 \return The port of the debugger for the team, -1 if none. To be passed to
3109 team_shutdown_team(Team
* team
)
3111 ASSERT(thread_get_current_thread() == team
->main_thread
);
3113 TeamLocker
teamLocker(team
);
3115 // Make sure debugging changes won't happen anymore.
3116 port_id debuggerPort
= -1;
3118 // If a debugger change is in progress for the team, we'll have to
3119 // wait until it is done.
3120 ConditionVariableEntry waitForDebuggerEntry
;
3121 bool waitForDebugger
= false;
3123 InterruptsSpinLocker
debugInfoLocker(team
->debug_info
.lock
);
3125 if (team
->debug_info
.debugger_changed_condition
!= NULL
) {
3126 team
->debug_info
.debugger_changed_condition
->Add(
3127 &waitForDebuggerEntry
);
3128 waitForDebugger
= true;
3129 } else if (team
->debug_info
.flags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) {
3130 // The team is being debugged. That will stop with the termination
3131 // of the nub thread. Since we set the team state to death, no one
3132 // can install a debugger anymore. We fetch the debugger's port to
3133 // send it a message at the bitter end.
3134 debuggerPort
= team
->debug_info
.debugger_port
;
3137 debugInfoLocker
.Unlock();
3139 if (!waitForDebugger
)
3142 // wait for the debugger change to be finished
3143 teamLocker
.Unlock();
3145 waitForDebuggerEntry
.Wait();
3150 // Mark the team as shutting down. That will prevent new threads from being
3151 // created and debugger changes from taking place.
3152 team
->state
= TEAM_STATE_SHUTDOWN
;
3154 // delete all timers
3155 team
->DeleteUserTimers(false);
3157 // deactivate CPU time user timers for the team
3158 InterruptsSpinLocker
timeLocker(team
->time_lock
);
3160 if (team
->HasActiveCPUTimeUserTimers())
3161 team
->DeactivateCPUTimeUserTimers();
3163 timeLocker
.Unlock();
3165 // kill all threads but the main thread
3166 team_death_entry deathEntry
;
3167 deathEntry
.condition
.Init(team
, "team death");
3170 team
->death_entry
= &deathEntry
;
3171 deathEntry
.remaining_threads
= 0;
3173 Thread
* thread
= team
->thread_list
;
3174 while (thread
!= NULL
) {
3175 if (thread
!= team
->main_thread
) {
3176 Signal
signal(SIGKILLTHR
, SI_USER
, B_OK
, team
->id
);
3177 send_signal_to_thread(thread
, signal
, B_DO_NOT_RESCHEDULE
);
3178 deathEntry
.remaining_threads
++;
3181 thread
= thread
->team_next
;
3184 if (deathEntry
.remaining_threads
== 0)
3187 // there are threads to wait for
3188 ConditionVariableEntry entry
;
3189 deathEntry
.condition
.Add(&entry
);
3191 teamLocker
.Unlock();
3198 team
->death_entry
= NULL
;
3200 return debuggerPort
;
3204 /*! Called on team exit to notify threads waiting on the team and free most
3205 resources associated with it.
3206 The caller shouldn't hold any locks.
3209 team_delete_team(Team
* team
, port_id debuggerPort
)
3211 // Not quite in our job description, but work that has been left by
3212 // team_remove_team() and that can be done now that we're not holding any
3214 orphaned_process_group_check();
3216 team_id teamID
= team
->id
;
3218 ASSERT(team
->num_threads
== 0);
3220 // If someone is waiting for this team to be loaded, but it dies
3221 // unexpectedly before being done, we need to notify the waiting
3224 TeamLocker
teamLocker(team
);
3226 if (team
->loading_info
) {
3227 // there's indeed someone waiting
3228 struct team_loading_info
* loadingInfo
= team
->loading_info
;
3229 team
->loading_info
= NULL
;
3231 loadingInfo
->result
= B_ERROR
;
3232 loadingInfo
->done
= true;
3234 // wake up the waiting thread
3235 thread_continue(loadingInfo
->thread
);
3238 // notify team watchers
3241 // we're not reachable from anyone anymore at this point, so we
3242 // can safely access the list without any locking
3243 struct team_watcher
* watcher
;
3244 while ((watcher
= (struct team_watcher
*)list_remove_head_item(
3245 &team
->watcher_list
)) != NULL
) {
3246 watcher
->hook(teamID
, watcher
->data
);
3251 teamLocker
.Unlock();
3253 sNotificationService
.Notify(TEAM_REMOVED
, team
);
3255 // free team resources
3257 delete_realtime_sem_context(team
->realtime_sem_context
);
3259 remove_images(team
);
3260 team
->address_space
->RemoveAndPut();
3262 team
->ReleaseReference();
3264 // notify the debugger, that the team is gone
3265 user_debug_team_deleted(teamID
, debuggerPort
);
3270 team_get_kernel_team(void)
3277 team_get_kernel_team_id(void)
3282 return sKernelTeam
->id
;
3287 team_get_current_team_id(void)
3289 return thread_get_current_thread()->team
->id
;
3294 team_get_address_space(team_id id
, VMAddressSpace
** _addressSpace
)
3296 if (id
== sKernelTeam
->id
) {
3297 // we're the kernel team, so we don't have to go through all
3298 // the hassle (locking and hash lookup)
3299 *_addressSpace
= VMAddressSpace::GetKernel();
3303 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
3305 Team
* team
= team_get_team_struct_locked(id
);
3309 team
->address_space
->Get();
3310 *_addressSpace
= team
->address_space
;
3315 /*! Sets the team's job control state.
3316 The caller must hold the parent team's lock. Interrupts are allowed to be
3317 enabled or disabled.
3318 \a team The team whose job control state shall be set.
3319 \a newState The new state to be set.
3320 \a signal The signal the new state was caused by. Can \c NULL, if none. Then
3321 the caller is responsible for filling in the following fields of the
3322 entry before releasing the parent team's lock, unless the new state is
3323 \c JOB_CONTROL_STATE_NONE:
3324 - \c signal: The number of the signal causing the state change.
3325 - \c signaling_user: The real UID of the user sending the signal.
3328 team_set_job_control_state(Team
* team
, job_control_state newState
,
3331 if (team
== NULL
|| team
->job_control_entry
== NULL
)
3334 // don't touch anything, if the state stays the same or the team is already
3336 job_control_entry
* entry
= team
->job_control_entry
;
3337 if (entry
->state
== newState
|| entry
->state
== JOB_CONTROL_STATE_DEAD
)
3340 T(SetJobControlState(team
->id
, newState
, signal
));
3342 // remove from the old list
3343 switch (entry
->state
) {
3344 case JOB_CONTROL_STATE_NONE
:
3345 // entry is in no list ATM
3347 case JOB_CONTROL_STATE_DEAD
:
3350 case JOB_CONTROL_STATE_STOPPED
:
3351 team
->parent
->stopped_children
.entries
.Remove(entry
);
3353 case JOB_CONTROL_STATE_CONTINUED
:
3354 team
->parent
->continued_children
.entries
.Remove(entry
);
3358 entry
->state
= newState
;
3360 if (signal
!= NULL
) {
3361 entry
->signal
= signal
->Number();
3362 entry
->signaling_user
= signal
->SendingUser();
3366 team_job_control_children
* childList
= NULL
;
3367 switch (entry
->state
) {
3368 case JOB_CONTROL_STATE_NONE
:
3369 // entry doesn't get into any list
3371 case JOB_CONTROL_STATE_DEAD
:
3372 childList
= &team
->parent
->dead_children
;
3373 team
->parent
->dead_children
.count
++;
3375 case JOB_CONTROL_STATE_STOPPED
:
3376 childList
= &team
->parent
->stopped_children
;
3378 case JOB_CONTROL_STATE_CONTINUED
:
3379 childList
= &team
->parent
->continued_children
;
3383 if (childList
!= NULL
) {
3384 childList
->entries
.Add(entry
);
3385 team
->parent
->dead_children
.condition_variable
.NotifyAll();
3390 /*! Inits the given team's exit information, if not yet initialized, to some
3391 generic "killed" status.
3392 The caller must not hold the team's lock. Interrupts must be enabled.
3394 \param team The team whose exit info shall be initialized.
3397 team_init_exit_info_on_error(Team
* team
)
3399 TeamLocker
teamLocker(team
);
3401 if (!team
->exit
.initialized
) {
3402 team
->exit
.reason
= CLD_KILLED
;
3403 team
->exit
.signal
= SIGKILL
;
3404 team
->exit
.signaling_user
= geteuid();
3405 team
->exit
.status
= 0;
3406 team
->exit
.initialized
= true;
3411 /*! Adds a hook to the team that is called as soon as this team goes away.
3412 This call might get public in the future.
3415 start_watching_team(team_id teamID
, void (*hook
)(team_id
, void*), void* data
)
3417 if (hook
== NULL
|| teamID
< B_OK
)
3420 // create the watcher object
3421 team_watcher
* watcher
= (team_watcher
*)malloc(sizeof(team_watcher
));
3422 if (watcher
== NULL
)
3425 watcher
->hook
= hook
;
3426 watcher
->data
= data
;
3428 // add watcher, if the team isn't already dying
3430 Team
* team
= Team::GetAndLock(teamID
);
3433 return B_BAD_TEAM_ID
;
3436 list_add_item(&team
->watcher_list
, watcher
);
3438 team
->UnlockAndReleaseReference();
3445 stop_watching_team(team_id teamID
, void (*hook
)(team_id
, void*), void* data
)
3447 if (hook
== NULL
|| teamID
< 0)
3450 // get team and remove watcher (if present)
3451 Team
* team
= Team::GetAndLock(teamID
);
3453 return B_BAD_TEAM_ID
;
3455 // search for watcher
3456 team_watcher
* watcher
= NULL
;
3457 while ((watcher
= (team_watcher
*)list_get_next_item(
3458 &team
->watcher_list
, watcher
)) != NULL
) {
3459 if (watcher
->hook
== hook
&& watcher
->data
== data
) {
3461 list_remove_item(&team
->watcher_list
, watcher
);
3466 team
->UnlockAndReleaseReference();
3468 if (watcher
== NULL
)
3469 return B_ENTRY_NOT_FOUND
;
3476 /*! Allocates a user_thread structure from the team.
3477 The team lock must be held, unless the function is called for the team's
3478 main thread. Interrupts must be enabled.
3481 team_allocate_user_thread(Team
* team
)
3483 if (team
->user_data
== 0)
3486 // take an entry from the free list, if any
3487 if (struct free_user_thread
* entry
= team
->free_user_threads
) {
3488 user_thread
* thread
= entry
->thread
;
3489 team
->free_user_threads
= entry
->next
;
3495 // enough space left?
3496 size_t needed
= ROUNDUP(sizeof(user_thread
), CACHE_LINE_SIZE
);
3497 if (team
->user_data_size
- team
->used_user_data
< needed
) {
3498 // try to resize the area
3499 if (resize_area(team
->user_data_area
,
3500 team
->user_data_size
+ B_PAGE_SIZE
) != B_OK
) {
3504 // resized user area successfully -- try to allocate the user_thread
3506 team
->user_data_size
+= B_PAGE_SIZE
;
3510 // allocate the user_thread
3512 = (user_thread
*)(team
->user_data
+ team
->used_user_data
);
3513 team
->used_user_data
+= needed
;
3520 /*! Frees the given user_thread structure.
3521 The team's lock must not be held. Interrupts must be enabled.
3522 \param team The team the user thread was allocated from.
3523 \param userThread The user thread to free.
3526 team_free_user_thread(Team
* team
, struct user_thread
* userThread
)
3528 if (userThread
== NULL
)
3531 // create a free list entry
3532 free_user_thread
* entry
3533 = (free_user_thread
*)malloc(sizeof(free_user_thread
));
3534 if (entry
== NULL
) {
3535 // we have to leak the user thread :-/
3540 TeamLocker
teamLocker(team
);
3542 entry
->thread
= userThread
;
3543 entry
->next
= team
->free_user_threads
;
3544 team
->free_user_threads
= entry
;
3548 // #pragma mark - Associated data interface
3551 AssociatedData::AssociatedData()
3558 AssociatedData::~AssociatedData()
3564 AssociatedData::OwnerDeleted(AssociatedDataOwner
* owner
)
3569 AssociatedDataOwner::AssociatedDataOwner()
3571 mutex_init(&fLock
, "associated data owner");
3575 AssociatedDataOwner::~AssociatedDataOwner()
3577 mutex_destroy(&fLock
);
3582 AssociatedDataOwner::AddData(AssociatedData
* data
)
3584 MutexLocker
locker(fLock
);
3586 if (data
->Owner() != NULL
)
3589 data
->AcquireReference();
3591 data
->SetOwner(this);
3598 AssociatedDataOwner::RemoveData(AssociatedData
* data
)
3600 MutexLocker
locker(fLock
);
3602 if (data
->Owner() != this)
3605 data
->SetOwner(NULL
);
3610 data
->ReleaseReference();
3617 AssociatedDataOwner::PrepareForDeletion()
3619 MutexLocker
locker(fLock
);
3621 // move all data to a temporary list and unset the owner
3623 list
.MoveFrom(&fList
);
3625 for (DataList::Iterator it
= list
.GetIterator();
3626 AssociatedData
* data
= it
.Next();) {
3627 data
->SetOwner(NULL
);
3632 // call the notification hooks and release our references
3633 while (AssociatedData
* data
= list
.RemoveHead()) {
3634 data
->OwnerDeleted(this);
3635 data
->ReleaseReference();
3640 /*! Associates data with the current team.
3641 When the team is deleted, the data object is notified.
3642 The team acquires a reference to the object.
3644 \param data The data object.
3645 \return \c true on success, \c false otherwise. Fails only when the supplied
3646 data object is already associated with another owner.
3649 team_associate_data(AssociatedData
* data
)
3651 return thread_get_current_thread()->team
->AddData(data
);
3655 /*! Dissociates data from the current team.
3656 Balances an earlier call to team_associate_data().
3658 \param data The data object.
3659 \return \c true on success, \c false otherwise. Fails only when the data
3660 object is not associated with the current team.
3663 team_dissociate_data(AssociatedData
* data
)
3665 return thread_get_current_thread()->team
->RemoveData(data
);
3669 // #pragma mark - Public kernel API
3673 load_image(int32 argCount
, const char** args
, const char** env
)
3675 return load_image_etc(argCount
, args
, env
, B_NORMAL_PRIORITY
,
3676 B_CURRENT_TEAM
, B_WAIT_TILL_LOADED
);
3681 load_image_etc(int32 argCount
, const char* const* args
,
3682 const char* const* env
, int32 priority
, team_id parentID
, uint32 flags
)
3684 // we need to flatten the args and environment
3689 // determine total needed size
3691 for (int32 i
= 0; i
< argCount
; i
++)
3692 argSize
+= strlen(args
[i
]) + 1;
3696 while (env
!= NULL
&& env
[envCount
] != NULL
)
3697 envSize
+= strlen(env
[envCount
++]) + 1;
3699 int32 size
= (argCount
+ envCount
+ 2) * sizeof(char*) + argSize
+ envSize
;
3700 if (size
> MAX_PROCESS_ARGS_SIZE
)
3701 return B_TOO_MANY_ARGS
;
3704 char** flatArgs
= (char**)malloc(size
);
3705 if (flatArgs
== NULL
)
3708 char** slot
= flatArgs
;
3709 char* stringSpace
= (char*)(flatArgs
+ argCount
+ envCount
+ 2);
3711 // copy arguments and environment
3712 for (int32 i
= 0; i
< argCount
; i
++) {
3713 int32 argSize
= strlen(args
[i
]) + 1;
3714 memcpy(stringSpace
, args
[i
], argSize
);
3715 *slot
++ = stringSpace
;
3716 stringSpace
+= argSize
;
3721 for (int32 i
= 0; i
< envCount
; i
++) {
3722 int32 envSize
= strlen(env
[i
]) + 1;
3723 memcpy(stringSpace
, env
[i
], envSize
);
3724 *slot
++ = stringSpace
;
3725 stringSpace
+= envSize
;
3730 thread_id thread
= load_image_internal(flatArgs
, size
, argCount
, envCount
,
3731 B_NORMAL_PRIORITY
, parentID
, B_WAIT_TILL_LOADED
, -1, 0);
3734 // load_image_internal() unset our variable if it took over ownership
3741 wait_for_team(team_id id
, status_t
* _returnCode
)
3743 // check whether the team exists
3744 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
3746 Team
* team
= team_get_team_struct_locked(id
);
3748 return B_BAD_TEAM_ID
;
3752 teamsLocker
.Unlock();
3754 // wait for the main thread (it has the same ID as the team)
3755 return wait_for_thread(id
, _returnCode
);
3760 kill_team(team_id id
)
3762 InterruptsSpinLocker
teamsLocker(sTeamHashLock
);
3764 Team
* team
= team_get_team_struct_locked(id
);
3766 return B_BAD_TEAM_ID
;
3770 teamsLocker
.Unlock();
3772 if (team
== sKernelTeam
)
3773 return B_NOT_ALLOWED
;
3775 // Just kill the team's main thread (it has same ID as the team). The
3776 // cleanup code there will take care of the team.
3777 return kill_thread(id
);
3782 _get_team_info(team_id id
, team_info
* info
, size_t size
)
3785 Team
* team
= Team::Get(id
);
3787 return B_BAD_TEAM_ID
;
3788 BReference
<Team
> teamReference(team
, true);
3791 return fill_team_info(team
, info
, size
);
3796 _get_next_team_info(int32
* cookie
, team_info
* info
, size_t size
)
3798 int32 slot
= *cookie
;
3802 InterruptsSpinLocker
locker(sTeamHashLock
);
3804 team_id lastTeamID
= peek_next_thread_id();
3805 // TODO: This is broken, since the id can wrap around!
3807 // get next valid team
3809 while (slot
< lastTeamID
&& !(team
= team_get_team_struct_locked(slot
)))
3813 return B_BAD_TEAM_ID
;
3815 // get a reference to the team and unlock
3816 BReference
<Team
> teamReference(team
);
3821 return fill_team_info(team
, info
, size
);
3826 _get_team_usage_info(team_id id
, int32 who
, team_usage_info
* info
, size_t size
)
3828 if (size
!= sizeof(team_usage_info
))
3831 return common_get_team_usage_info(id
, who
, info
, 0);
3838 return thread_get_current_thread()->team
->id
;
3845 Team
* team
= thread_get_current_thread()->team
;
3847 TeamLocker
teamLocker(team
);
3849 return team
->parent
->id
;
3862 // get process group of the calling process
3863 Team
* team
= thread_get_current_thread()->team
;
3864 TeamLocker
teamLocker(team
);
3865 return team
->group_id
;
3869 Team
* team
= Team::GetAndLock(id
);
3875 // get the team's process group ID
3876 pid_t groupID
= team
->group_id
;
3878 team
->UnlockAndReleaseReference();
3893 // get session of the calling process
3894 Team
* team
= thread_get_current_thread()->team
;
3895 TeamLocker
teamLocker(team
);
3896 return team
->session_id
;
3900 Team
* team
= Team::GetAndLock(id
);
3906 // get the team's session ID
3907 pid_t sessionID
= team
->session_id
;
3909 team
->UnlockAndReleaseReference();
3915 // #pragma mark - User syscalls
3919 _user_exec(const char* userPath
, const char* const* userFlatArgs
,
3920 size_t flatArgsSize
, int32 argCount
, int32 envCount
, mode_t umask
)
3922 // NOTE: Since this function normally doesn't return, don't use automatic
3923 // variables that need destruction in the function scope.
3924 char path
[B_PATH_NAME_LENGTH
];
3926 if (!IS_USER_ADDRESS(userPath
) || !IS_USER_ADDRESS(userFlatArgs
)
3927 || user_strlcpy(path
, userPath
, sizeof(path
)) < B_OK
)
3928 return B_BAD_ADDRESS
;
3930 // copy and relocate the flat arguments
3932 status_t error
= copy_user_process_args(userFlatArgs
, flatArgsSize
,
3933 argCount
, envCount
, flatArgs
);
3935 if (error
== B_OK
) {
3936 error
= exec_team(path
, flatArgs
, _ALIGN(flatArgsSize
), argCount
,
3938 // this one only returns in case of error
3954 _user_wait_for_child(thread_id child
, uint32 flags
, siginfo_t
* userInfo
,
3955 team_usage_info
* usageInfo
)
3957 if (userInfo
!= NULL
&& !IS_USER_ADDRESS(userInfo
))
3958 return B_BAD_ADDRESS
;
3959 if (usageInfo
!= NULL
&& !IS_USER_ADDRESS(usageInfo
))
3960 return B_BAD_ADDRESS
;
3963 team_usage_info usage_info
;
3964 pid_t foundChild
= wait_for_child(child
, flags
, info
, usage_info
);
3966 return syscall_restart_handle_post(foundChild
);
3968 // copy info back to userland
3969 if (userInfo
!= NULL
&& user_memcpy(userInfo
, &info
, sizeof(info
)) != B_OK
)
3970 return B_BAD_ADDRESS
;
3971 // copy usage_info back to userland
3972 if (usageInfo
!= NULL
&& user_memcpy(usageInfo
, &usage_info
,
3973 sizeof(usage_info
)) != B_OK
) {
3974 return B_BAD_ADDRESS
;
3982 _user_process_info(pid_t process
, int32 which
)
3984 // we only allow to return the parent of the current process
3985 if (which
== PARENT_ID
3986 && process
!= 0 && process
!= thread_get_current_thread()->team
->id
)
3992 result
= getsid(process
);
3995 result
= getpgid(process
);
4004 return result
>= 0 ? result
: errno
;
4009 _user_setpgid(pid_t processID
, pid_t groupID
)
4011 // setpgid() can be called either by the parent of the target process or
4012 // by the process itself to do one of two things:
4013 // * Create a new process group with the target process' ID and the target
4014 // process as group leader.
4015 // * Set the target process' process group to an already existing one in the
4021 Team
* currentTeam
= thread_get_current_thread()->team
;
4023 processID
= currentTeam
->id
;
4025 // if the group ID is not specified, use the target process' ID
4027 groupID
= processID
;
4029 // We loop when running into the following race condition: We create a new
4030 // process group, because there isn't one with that ID yet, but later when
4031 // trying to publish it, we find that someone else created and published
4032 // a group with that ID in the meantime. In that case we just restart the
4035 // Look up the process group by ID. If it doesn't exist yet and we are
4036 // allowed to create a new one, do that.
4037 ProcessGroup
* group
= ProcessGroup::Get(groupID
);
4038 bool newGroup
= false;
4039 if (group
== NULL
) {
4040 if (groupID
!= processID
)
4041 return B_NOT_ALLOWED
;
4043 group
= new(std::nothrow
) ProcessGroup(groupID
);
4049 BReference
<ProcessGroup
> groupReference(group
, true);
4051 // get the target team
4052 Team
* team
= Team::Get(processID
);
4055 BReference
<Team
> teamReference(team
, true);
4057 // lock the new process group and the team's current process group
4059 // lock the team's current process group
4060 team
->LockProcessGroup();
4062 ProcessGroup
* oldGroup
= team
->group
;
4063 if (oldGroup
== group
) {
4064 // it's the same as the target group, so just bail out
4069 oldGroup
->AcquireReference();
4071 // lock the target process group, if locking order allows it
4072 if (newGroup
|| group
->id
> oldGroup
->id
) {
4078 if (group
->TryLock())
4081 // no dice -- unlock the team's current process group and relock in
4082 // the correct order
4088 // check whether things are still the same
4089 TeamLocker
teamLocker(team
);
4090 if (team
->group
== oldGroup
)
4093 // something changed -- unlock everything and retry
4094 teamLocker
.Unlock();
4097 oldGroup
->ReleaseReference();
4100 // we now have references and locks of both new and old process group
4101 BReference
<ProcessGroup
> oldGroupReference(team
->group
, true);
4102 AutoLocker
<ProcessGroup
> oldGroupLocker(team
->group
, true);
4103 AutoLocker
<ProcessGroup
> groupLocker(group
, true);
4105 // also lock the target team and its parent
4106 team
->LockTeamAndParent(false);
4107 TeamLocker
parentLocker(team
->parent
, true);
4108 TeamLocker
teamLocker(team
, true);
4110 // perform the checks
4111 if (team
== currentTeam
) {
4112 // we set our own group
4114 // we must not change our process group ID if we're a session leader
4115 if (is_session_leader(currentTeam
))
4116 return B_NOT_ALLOWED
;
4118 // Calling team != target team. The target team must be a child of
4119 // the calling team and in the same session. (If that's the case it
4120 // isn't a session leader either.)
4121 if (team
->parent
!= currentTeam
4122 || team
->session_id
!= currentTeam
->session_id
) {
4123 return B_NOT_ALLOWED
;
4126 // The call is also supposed to fail on a child, when the child has
4127 // already executed exec*() [EACCES].
4128 if ((team
->flags
& TEAM_FLAG_EXEC_DONE
) != 0)
4132 // If we created a new process group, publish it now.
4134 InterruptsSpinLocker
groupHashLocker(sGroupHashLock
);
4135 if (sGroupHash
.Lookup(groupID
)) {
4136 // A group with the group ID appeared since we first checked.
4137 // Back to square one.
4141 group
->PublishLocked(team
->group
->Session());
4142 } else if (group
->Session()->id
!= team
->session_id
) {
4143 // The existing target process group belongs to a different session.
4144 // That's not allowed.
4145 return B_NOT_ALLOWED
;
4148 // Everything is ready -- set the group.
4149 remove_team_from_group(team
);
4150 insert_team_into_group(group
, team
);
4152 // Changing the process group might have changed the situation for a
4153 // parent waiting in wait_for_child(). Hence we notify it.
4154 team
->parent
->dead_children
.condition_variable
.NotifyAll();
4164 Team
* team
= thread_get_current_thread()->team
;
4166 // create a new process group and session
4167 ProcessGroup
* group
= new(std::nothrow
) ProcessGroup(team
->id
);
4170 BReference
<ProcessGroup
> groupReference(group
, true);
4171 AutoLocker
<ProcessGroup
> groupLocker(group
);
4173 ProcessSession
* session
= new(std::nothrow
) ProcessSession(group
->id
);
4174 if (session
== NULL
)
4176 BReference
<ProcessSession
> sessionReference(session
, true);
4178 // lock the team's current process group, parent, and the team itself
4179 team
->LockTeamParentAndProcessGroup();
4180 BReference
<ProcessGroup
> oldGroupReference(team
->group
);
4181 AutoLocker
<ProcessGroup
> oldGroupLocker(team
->group
, true);
4182 TeamLocker
parentLocker(team
->parent
, true);
4183 TeamLocker
teamLocker(team
, true);
4185 // the team must not already be a process group leader
4186 if (is_process_group_leader(team
))
4187 return B_NOT_ALLOWED
;
4189 // remove the team from the old and add it to the new process group
4190 remove_team_from_group(team
);
4191 group
->Publish(session
);
4192 insert_team_into_group(group
, team
);
4194 // Changing the process group might have changed the situation for a
4195 // parent waiting in wait_for_child(). Hence we notify it.
4196 team
->parent
->dead_children
.condition_variable
.NotifyAll();
4203 _user_wait_for_team(team_id id
, status_t
* _userReturnCode
)
4205 status_t returnCode
;
4208 if (_userReturnCode
!= NULL
&& !IS_USER_ADDRESS(_userReturnCode
))
4209 return B_BAD_ADDRESS
;
4211 status
= wait_for_team(id
, &returnCode
);
4212 if (status
>= B_OK
&& _userReturnCode
!= NULL
) {
4213 if (user_memcpy(_userReturnCode
, &returnCode
, sizeof(returnCode
))
4215 return B_BAD_ADDRESS
;
4219 return syscall_restart_handle_post(status
);
4224 _user_load_image(const char* const* userFlatArgs
, size_t flatArgsSize
,
4225 int32 argCount
, int32 envCount
, int32 priority
, uint32 flags
,
4226 port_id errorPort
, uint32 errorToken
)
4228 TRACE(("_user_load_image: argc = %" B_PRId32
"\n", argCount
));
4233 // copy and relocate the flat arguments
4235 status_t error
= copy_user_process_args(userFlatArgs
, flatArgsSize
,
4236 argCount
, envCount
, flatArgs
);
4240 thread_id thread
= load_image_internal(flatArgs
, _ALIGN(flatArgsSize
),
4241 argCount
, envCount
, priority
, B_CURRENT_TEAM
, flags
, errorPort
,
4245 // load_image_internal() unset our variable if it took over ownership
4252 _user_exit_team(status_t returnValue
)
4254 Thread
* thread
= thread_get_current_thread();
4255 Team
* team
= thread
->team
;
4257 // set this thread's exit status
4258 thread
->exit
.status
= returnValue
;
4260 // set the team exit status
4261 TeamLocker
teamLocker(team
);
4263 if (!team
->exit
.initialized
) {
4264 team
->exit
.reason
= CLD_EXITED
;
4265 team
->exit
.signal
= 0;
4266 team
->exit
.signaling_user
= 0;
4267 team
->exit
.status
= returnValue
;
4268 team
->exit
.initialized
= true;
4271 teamLocker
.Unlock();
4273 // Stop the thread, if the team is being debugged and that has been
4275 if ((atomic_get(&team
->debug_info
.flags
) & B_TEAM_DEBUG_PREVENT_EXIT
) != 0)
4276 user_debug_stop_thread();
4278 // Send this thread a SIGKILL. This makes sure the thread will not return to
4279 // userland. The signal handling code forwards the signal to the main
4280 // thread (if that's not already this one), which will take the team down.
4281 Signal
signal(SIGKILL
, SI_USER
, B_OK
, team
->id
);
4282 send_signal_to_thread(thread
, signal
, 0);
4287 _user_kill_team(team_id team
)
4289 return kill_team(team
);
4294 _user_get_team_info(team_id id
, team_info
* userInfo
)
4299 if (!IS_USER_ADDRESS(userInfo
))
4300 return B_BAD_ADDRESS
;
4302 status
= _get_team_info(id
, &info
, sizeof(team_info
));
4303 if (status
== B_OK
) {
4304 if (user_memcpy(userInfo
, &info
, sizeof(team_info
)) < B_OK
)
4305 return B_BAD_ADDRESS
;
4313 _user_get_next_team_info(int32
* userCookie
, team_info
* userInfo
)
4319 if (!IS_USER_ADDRESS(userCookie
)
4320 || !IS_USER_ADDRESS(userInfo
)
4321 || user_memcpy(&cookie
, userCookie
, sizeof(int32
)) < B_OK
)
4322 return B_BAD_ADDRESS
;
4324 status
= _get_next_team_info(&cookie
, &info
, sizeof(team_info
));
4328 if (user_memcpy(userCookie
, &cookie
, sizeof(int32
)) < B_OK
4329 || user_memcpy(userInfo
, &info
, sizeof(team_info
)) < B_OK
)
4330 return B_BAD_ADDRESS
;
4337 _user_get_current_team(void)
4339 return team_get_current_team_id();
4344 _user_get_team_usage_info(team_id team
, int32 who
, team_usage_info
* userInfo
,
4347 if (size
!= sizeof(team_usage_info
))
4350 team_usage_info info
;
4351 status_t status
= common_get_team_usage_info(team
, who
, &info
,
4352 B_CHECK_PERMISSION
);
4354 if (userInfo
== NULL
|| !IS_USER_ADDRESS(userInfo
)
4355 || user_memcpy(userInfo
, &info
, size
) != B_OK
) {
4356 return B_BAD_ADDRESS
;
4364 _user_get_extended_team_info(team_id teamID
, uint32 flags
, void* buffer
,
4365 size_t size
, size_t* _sizeNeeded
)
4368 if ((buffer
!= NULL
&& !IS_USER_ADDRESS(buffer
))
4369 || (buffer
== NULL
&& size
> 0)
4370 || _sizeNeeded
== NULL
|| !IS_USER_ADDRESS(_sizeNeeded
)) {
4371 return B_BAD_ADDRESS
;
4376 if ((flags
& B_TEAM_INFO_BASIC
) != 0) {
4377 // allocate memory for a copy of the needed team data
4378 struct ExtendedTeamData
{
4384 uid_t effective_uid
;
4385 gid_t effective_gid
;
4386 char name
[B_OS_NAME_LENGTH
];
4389 ExtendedTeamData
* teamClone
4390 = (ExtendedTeamData
*)malloc(sizeof(ExtendedTeamData
));
4391 // It would be nicer to use new, but then we'd have to use
4392 // ObjectDeleter and declare the structure outside of the function
4393 // due to template parameter restrictions.
4394 if (teamClone
== NULL
)
4396 MemoryDeleter
teamCloneDeleter(teamClone
);
4398 io_context
* ioContext
;
4400 // get the team structure
4401 Team
* team
= Team::GetAndLock(teamID
);
4403 return B_BAD_TEAM_ID
;
4404 BReference
<Team
> teamReference(team
, true);
4405 TeamLocker
teamLocker(team
, true);
4408 teamClone
->id
= team
->id
;
4409 strlcpy(teamClone
->name
, team
->Name(), sizeof(teamClone
->name
));
4410 teamClone
->group_id
= team
->group_id
;
4411 teamClone
->session_id
= team
->session_id
;
4412 teamClone
->real_uid
= team
->real_uid
;
4413 teamClone
->real_gid
= team
->real_gid
;
4414 teamClone
->effective_uid
= team
->effective_uid
;
4415 teamClone
->effective_gid
= team
->effective_gid
;
4417 // also fetch a reference to the I/O context
4418 ioContext
= team
->io_context
;
4419 vfs_get_io_context(ioContext
);
4421 CObjectDeleter
<io_context
> ioContextPutter(ioContext
,
4422 &vfs_put_io_context
);
4424 // add the basic data to the info message
4425 if (info
.AddInt32("id", teamClone
->id
) != B_OK
4426 || info
.AddString("name", teamClone
->name
) != B_OK
4427 || info
.AddInt32("process group", teamClone
->group_id
) != B_OK
4428 || info
.AddInt32("session", teamClone
->session_id
) != B_OK
4429 || info
.AddInt32("uid", teamClone
->real_uid
) != B_OK
4430 || info
.AddInt32("gid", teamClone
->real_gid
) != B_OK
4431 || info
.AddInt32("euid", teamClone
->effective_uid
) != B_OK
4432 || info
.AddInt32("egid", teamClone
->effective_gid
) != B_OK
) {
4436 // get the current working directory from the I/O context
4440 MutexLocker
ioContextLocker(ioContext
->io_mutex
);
4441 vfs_vnode_to_node_ref(ioContext
->cwd
, &cwdDevice
, &cwdDirectory
);
4444 if (info
.AddInt32("cwd device", cwdDevice
) != B_OK
4445 || info
.AddInt64("cwd directory", cwdDirectory
) != B_OK
) {
4450 // TODO: Support the other flags!
4452 // copy the needed size and, if it fits, the message back to userland
4453 size_t sizeNeeded
= info
.ContentSize();
4454 if (user_memcpy(_sizeNeeded
, &sizeNeeded
, sizeof(sizeNeeded
)) != B_OK
)
4455 return B_BAD_ADDRESS
;
4457 if (sizeNeeded
> size
)
4458 return B_BUFFER_OVERFLOW
;
4460 if (user_memcpy(buffer
, info
.Buffer(), sizeNeeded
) != B_OK
)
4461 return B_BAD_ADDRESS
;