2 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 /*! Threading routines */
21 #include <sys/resource.h>
27 #include <util/AutoLock.h>
29 #include <arch/debug.h>
30 #include <boot/kernel_args.h>
31 #include <condition_variable.h>
35 #include <kscheduler.h>
37 #include <Notifications.h>
38 #include <real_time_clock.h>
39 #include <slab/Slab.h>
42 #include <syscall_restart.h>
45 #include <user_runtime.h>
46 #include <user_thread.h>
49 #include <vm/VMAddressSpace.h>
50 #include <wait_for_objects.h>
52 #include "TeamThreadTables.h"
55 //#define TRACE_THREAD
57 # define TRACE(x) dprintf x
63 #define THREAD_MAX_MESSAGE_SIZE 65536
66 // #pragma mark - ThreadHashTable
69 typedef BKernel::TeamThreadTable
<Thread
> ThreadHashTable
;
73 static Thread sIdleThreads
[SMP_MAX_CPUS
];
74 static ThreadHashTable sThreadHash
;
75 static spinlock sThreadHashLock
= B_SPINLOCK_INITIALIZER
;
76 static thread_id sNextThreadID
= 2;
77 // ID 1 is allocated for the kernel by Team::Team() behind our back
79 // some arbitrarily chosen limits -- should probably depend on the available
80 // memory (the limit is not yet enforced)
81 static int32 sMaxThreads
= 4096;
82 static int32 sUsedThreads
= 0;
84 spinlock gThreadCreationLock
= B_SPINLOCK_INITIALIZER
;
87 struct UndertakerEntry
: DoublyLinkedListLinkImpl
<UndertakerEntry
> {
91 UndertakerEntry(Thread
* thread
, team_id teamID
)
100 struct ThreadEntryArguments
{
101 status_t (*kernelFunction
)(void* argument
);
106 struct UserThreadEntryArguments
: ThreadEntryArguments
{
107 addr_t userlandEntry
;
108 void* userlandArgument1
;
109 void* userlandArgument2
;
111 arch_fork_arg
* forkArgs
;
116 class ThreadNotificationService
: public DefaultNotificationService
{
118 ThreadNotificationService()
119 : DefaultNotificationService("threads")
123 void Notify(uint32 eventCode
, team_id teamID
, thread_id threadID
,
124 Thread
* thread
= NULL
)
126 char eventBuffer
[180];
128 event
.SetTo(eventBuffer
, sizeof(eventBuffer
), THREAD_MONITOR
);
129 event
.AddInt32("event", eventCode
);
130 event
.AddInt32("team", teamID
);
131 event
.AddInt32("thread", threadID
);
133 event
.AddPointer("threadStruct", thread
);
135 DefaultNotificationService::Notify(event
, eventCode
);
138 void Notify(uint32 eventCode
, Thread
* thread
)
140 return Notify(eventCode
, thread
->id
, thread
->team
->id
, thread
);
145 static DoublyLinkedList
<UndertakerEntry
> sUndertakerEntries
;
146 static spinlock sUndertakerLock
= B_SPINLOCK_INITIALIZER
;
147 static ConditionVariable sUndertakerCondition
;
148 static ThreadNotificationService sNotificationService
;
151 // object cache to allocate thread structures from
152 static object_cache
* sThreadCache
;
155 // #pragma mark - Thread
158 /*! Constructs a thread.
160 \param name The thread's name.
161 \param threadID The ID to be assigned to the new thread. If
162 \code < 0 \endcode a fresh one is allocated.
163 \param cpu The CPU the thread shall be assigned.
165 Thread::Thread(const char* name
, thread_id threadID
, struct cpu_ent
* cpu
)
177 sigsuspend_original_unblocked_mask(0),
178 user_signal_context(NULL
),
179 signal_stack_base(0),
180 signal_stack_size(0),
181 signal_stack_enabled(false),
186 page_faults_allowed(1),
189 kernel_stack_area(-1),
190 kernel_stack_base(0),
193 user_local_storage(0),
199 post_interrupt_callback(NULL
),
200 post_interrupt_data(NULL
)
202 id
= threadID
>= 0 ? threadID
: allocate_thread_id();
207 snprintf(lockName
, sizeof(lockName
), "Thread:%" B_PRId32
, id
);
208 mutex_init_etc(&fLock
, lockName
, MUTEX_FLAG_CLONE_NAME
);
210 B_INITIALIZE_SPINLOCK(&time_lock
);
211 B_INITIALIZE_SPINLOCK(&scheduler_lock
);
212 B_INITIALIZE_RW_SPINLOCK(&team_lock
);
216 strlcpy(this->name
, name
, B_OS_NAME_LENGTH
);
218 strcpy(this->name
, "unnamed thread");
222 list_init(&exit
.waiters
);
228 // add to thread table -- yet invisible
229 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
230 sThreadHash
.Insert(this);
236 // Delete resources that should actually be deleted by the thread itself,
237 // when it exited, but that might still exist, if the thread was never run.
239 if (user_stack_area
>= 0)
240 delete_area(user_stack_area
);
242 DeleteUserTimers(false);
244 // delete the resources, that may remain in either case
246 if (kernel_stack_area
>= 0)
247 delete_area(kernel_stack_area
);
249 fPendingSignals
.Clear();
252 delete_sem(exit
.sem
);
253 if (msg
.write_sem
>= 0)
254 delete_sem(msg
.write_sem
);
255 if (msg
.read_sem
>= 0)
256 delete_sem(msg
.read_sem
);
258 scheduler_on_thread_destroy(this);
260 mutex_destroy(&fLock
);
262 // remove from thread table
263 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
264 sThreadHash
.Remove(this);
269 Thread::Create(const char* name
, Thread
*& _thread
)
271 Thread
* thread
= new Thread(name
, -1, NULL
);
275 status_t error
= thread
->Init(false);
287 Thread::Get(thread_id id
)
289 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
290 Thread
* thread
= sThreadHash
.Lookup(id
);
292 thread
->AcquireReference();
298 Thread::GetAndLock(thread_id id
)
300 // look it up and acquire a reference
301 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
302 Thread
* thread
= sThreadHash
.Lookup(id
);
306 thread
->AcquireReference();
307 threadHashLocker
.Unlock();
309 // lock and check, if it is still in the hash table
311 threadHashLocker
.Lock();
313 if (sThreadHash
.Lookup(id
) == thread
)
316 threadHashLocker
.Unlock();
318 // nope, the thread is no longer in the hash table
319 thread
->UnlockAndReleaseReference();
326 Thread::GetDebug(thread_id id
)
328 return sThreadHash
.Lookup(id
, false);
333 Thread::IsAlive(thread_id id
)
335 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
336 return sThreadHash
.Lookup(id
) != NULL
;
341 Thread::operator new(size_t size
)
343 return object_cache_alloc(sThreadCache
, 0);
348 Thread::operator new(size_t, void* pointer
)
355 Thread::operator delete(void* pointer
, size_t size
)
357 object_cache_free(sThreadCache
, pointer
, 0);
362 Thread::Init(bool idleThread
)
364 status_t error
= scheduler_on_thread_create(this, idleThread
);
369 snprintf(temp
, sizeof(temp
), "thread_%" B_PRId32
"_retcode_sem", id
);
370 exit
.sem
= create_sem(0, temp
);
374 snprintf(temp
, sizeof(temp
), "%s send", name
);
375 msg
.write_sem
= create_sem(1, temp
);
376 if (msg
.write_sem
< 0)
377 return msg
.write_sem
;
379 snprintf(temp
, sizeof(temp
), "%s receive", name
);
380 msg
.read_sem
= create_sem(0, temp
);
381 if (msg
.read_sem
< 0)
384 error
= arch_thread_init_thread_struct(this);
392 /*! Checks whether the thread is still in the thread hash table.
395 Thread::IsAlive() const
397 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
399 return sThreadHash
.Lookup(id
) != NULL
;
404 Thread::ResetSignalsOnExec()
406 // We are supposed keep the pending signals and the signal mask. Only the
407 // signal stack, if set, shall be unset.
409 sigsuspend_original_unblocked_mask
= 0;
410 user_signal_context
= NULL
;
411 signal_stack_base
= 0;
412 signal_stack_size
= 0;
413 signal_stack_enabled
= false;
417 /*! Adds the given user timer to the thread and, if user-defined, assigns it an
420 The caller must hold the thread's lock.
422 \param timer The timer to be added. If it doesn't have an ID yet, it is
423 considered user-defined and will be assigned an ID.
424 \return \c B_OK, if the timer was added successfully, another error code
428 Thread::AddUserTimer(UserTimer
* timer
)
430 // If the timer is user-defined, check timer limit and increment
431 // user-defined count.
432 if (timer
->ID() < 0 && !team
->CheckAddUserDefinedTimer())
435 fUserTimers
.AddTimer(timer
);
441 /*! Removes the given user timer from the thread.
443 The caller must hold the thread's lock.
445 \param timer The timer to be removed.
449 Thread::RemoveUserTimer(UserTimer
* timer
)
451 fUserTimers
.RemoveTimer(timer
);
453 if (timer
->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID
)
454 team
->UserDefinedTimersRemoved(1);
458 /*! Deletes all (or all user-defined) user timers of the thread.
460 The caller must hold the thread's lock.
462 \param userDefinedOnly If \c true, only the user-defined timers are deleted,
463 otherwise all timers are deleted.
466 Thread::DeleteUserTimers(bool userDefinedOnly
)
468 int32 count
= fUserTimers
.DeleteTimers(userDefinedOnly
);
470 team
->UserDefinedTimersRemoved(count
);
475 Thread::DeactivateCPUTimeUserTimers()
477 while (ThreadTimeUserTimer
* timer
= fCPUTimeUserTimers
.Head())
482 // #pragma mark - ThreadListIterator
485 ThreadListIterator::ThreadListIterator()
488 InterruptsSpinLocker
locker(sThreadHashLock
);
489 sThreadHash
.InsertIteratorEntry(&fEntry
);
493 ThreadListIterator::~ThreadListIterator()
496 InterruptsSpinLocker
locker(sThreadHashLock
);
497 sThreadHash
.RemoveIteratorEntry(&fEntry
);
502 ThreadListIterator::Next()
504 // get the next team -- if there is one, get reference for it
505 InterruptsSpinLocker
locker(sThreadHashLock
);
506 Thread
* thread
= sThreadHash
.NextElement(&fEntry
);
508 thread
->AcquireReference();
514 // #pragma mark - ThreadCreationAttributes
517 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function
,
518 const char* name
, int32 priority
, void* arg
, team_id team
,
523 this->priority
= priority
;
526 this->stack_address
= NULL
;
527 this->stack_size
= 0;
528 this->guard_size
= 0;
529 this->pthread
= NULL
;
531 this->team
= team
>= 0 ? team
: team_get_kernel_team()->id
;
532 this->thread
= thread
;
533 this->signal_mask
= 0;
534 this->additional_stack_size
= 0;
535 this->kernelEntry
= function
;
536 this->kernelArgument
= arg
;
537 this->forkArgs
= NULL
;
541 /*! Initializes the structure from a userland structure.
542 \param userAttributes The userland structure (must be a userland address).
543 \param nameBuffer A character array of at least size B_OS_NAME_LENGTH,
544 which will be used for the \c name field, if the userland structure has
545 a name. The buffer must remain valid as long as this structure is in
546 use afterwards (or until it is reinitialized).
547 \return \c B_OK, if the initialization went fine, another error code
551 ThreadCreationAttributes::InitFromUserAttributes(
552 const thread_creation_attributes
* userAttributes
, char* nameBuffer
)
554 if (userAttributes
== NULL
|| !IS_USER_ADDRESS(userAttributes
)
555 || user_memcpy((thread_creation_attributes
*)this, userAttributes
,
556 sizeof(thread_creation_attributes
)) != B_OK
) {
557 return B_BAD_ADDRESS
;
561 && (stack_size
< MIN_USER_STACK_SIZE
562 || stack_size
> MAX_USER_STACK_SIZE
)) {
566 if (entry
== NULL
|| !IS_USER_ADDRESS(entry
)
567 || (stack_address
!= NULL
&& !IS_USER_ADDRESS(stack_address
))
568 || (name
!= NULL
&& (!IS_USER_ADDRESS(name
)
569 || user_strlcpy(nameBuffer
, name
, B_OS_NAME_LENGTH
) < 0))) {
570 return B_BAD_ADDRESS
;
573 name
= name
!= NULL
? nameBuffer
: "user thread";
575 // kernel only attributes (not in thread_creation_attributes):
576 Thread
* currentThread
= thread_get_current_thread();
577 team
= currentThread
->team
->id
;
579 signal_mask
= currentThread
->sig_block_mask
;
580 // inherit the current thread's signal mask
581 additional_stack_size
= 0;
583 kernelArgument
= NULL
;
590 // #pragma mark - private functions
593 /*! Inserts a thread into a team.
594 The caller must hold the team's lock, the thread's lock, and the scheduler
598 insert_thread_into_team(Team
*team
, Thread
*thread
)
600 thread
->team_next
= team
->thread_list
;
601 team
->thread_list
= thread
;
604 if (team
->num_threads
== 1) {
605 // this was the first thread
606 team
->main_thread
= thread
;
612 /*! Removes a thread from a team.
613 The caller must hold the team's lock, the thread's lock, and the scheduler
617 remove_thread_from_team(Team
*team
, Thread
*thread
)
619 Thread
*temp
, *last
= NULL
;
621 for (temp
= team
->thread_list
; temp
!= NULL
; temp
= temp
->team_next
) {
622 if (temp
== thread
) {
624 team
->thread_list
= temp
->team_next
;
626 last
->team_next
= temp
->team_next
;
637 enter_userspace(Thread
* thread
, UserThreadEntryArguments
* args
)
639 status_t error
= arch_thread_init_tls(thread
);
641 dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32
642 ")\n", thread
->name
, thread
->id
);
643 free(args
->forkArgs
);
647 user_debug_update_new_thread_flags(thread
);
649 // init the thread's user_thread
650 user_thread
* userThread
= thread
->user_thread
;
651 userThread
->pthread
= args
->pthread
;
652 userThread
->flags
= 0;
653 userThread
->wait_status
= B_OK
;
654 userThread
->defer_signals
655 = (args
->flags
& THREAD_CREATION_FLAG_DEFER_SIGNALS
) != 0 ? 1 : 0;
656 userThread
->pending_signals
= 0;
658 if (args
->forkArgs
!= NULL
) {
659 // This is a fork()ed thread. Copy the fork args onto the stack and
661 arch_fork_arg archArgs
= *args
->forkArgs
;
662 free(args
->forkArgs
);
664 arch_restore_fork_frame(&archArgs
);
665 // this one won't return here
669 // Jump to the entry point in user space. Only returns, if something fails.
670 return arch_thread_enter_userspace(thread
, args
->userlandEntry
,
671 args
->userlandArgument1
, args
->userlandArgument2
);
676 thread_enter_userspace_new_team(Thread
* thread
, addr_t entryFunction
,
677 void* argument1
, void* argument2
)
679 UserThreadEntryArguments entryArgs
;
680 entryArgs
.kernelFunction
= NULL
;
681 entryArgs
.argument
= NULL
;
682 entryArgs
.enterUserland
= true;
683 entryArgs
.userlandEntry
= (addr_t
)entryFunction
;
684 entryArgs
.userlandArgument1
= argument1
;
685 entryArgs
.userlandArgument2
= argument2
;
686 entryArgs
.pthread
= NULL
;
687 entryArgs
.forkArgs
= NULL
;
690 return enter_userspace(thread
, &entryArgs
);
695 common_thread_entry(void* _args
)
697 Thread
* thread
= thread_get_current_thread();
699 // The thread is new and has been scheduled the first time.
701 scheduler_new_thread_entry(thread
);
703 // unlock the scheduler lock and enable interrupts
704 release_spinlock(&thread
->scheduler_lock
);
707 // call the kernel function, if any
708 ThreadEntryArguments
* args
= (ThreadEntryArguments
*)_args
;
709 if (args
->kernelFunction
!= NULL
)
710 args
->kernelFunction(args
->argument
);
712 // If requested, enter userland, now.
713 if (args
->enterUserland
) {
714 enter_userspace(thread
, (UserThreadEntryArguments
*)args
);
715 // only returns or error
717 // If that's the team's main thread, init the team exit info.
718 if (thread
== thread
->team
->main_thread
)
719 team_init_exit_info_on_error(thread
->team
);
727 /*! Prepares the given thread's kernel stack for executing its entry function.
729 The data pointed to by \a data of size \a dataSize are copied to the
730 thread's kernel stack. A pointer to the copy's data is passed to the entry
731 function. The entry function is common_thread_entry().
733 \param thread The thread.
734 \param data Pointer to data to be copied to the thread's stack and passed
735 to the entry function.
736 \param dataSize The size of \a data.
739 init_thread_kernel_stack(Thread
* thread
, const void* data
, size_t dataSize
)
741 uint8
* stack
= (uint8
*)thread
->kernel_stack_base
;
742 uint8
* stackTop
= (uint8
*)thread
->kernel_stack_top
;
744 // clear (or rather invalidate) the kernel stack contents, if compiled with
747 # if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS)
748 memset((void*)(stack
+ KERNEL_STACK_GUARD_PAGES
* B_PAGE_SIZE
), 0xcc,
751 memset(stack
, 0xcc, KERNEL_STACK_SIZE
);
755 // copy the data onto the stack, with 16-byte alignment to be on the safe
758 #ifdef STACK_GROWS_DOWNWARDS
759 clonedData
= (void*)ROUNDDOWN((addr_t
)stackTop
- dataSize
, 16);
760 stackTop
= (uint8
*)clonedData
;
762 clonedData
= (void*)ROUNDUP((addr_t
)stack
, 16);
763 stack
= (uint8
*)clonedData
+ ROUNDUP(dataSize
, 16);
766 memcpy(clonedData
, data
, dataSize
);
768 arch_thread_init_kthread_stack(thread
, stack
, stackTop
,
769 &common_thread_entry
, clonedData
);
774 create_thread_user_stack(Team
* team
, Thread
* thread
, void* _stackBase
,
775 size_t stackSize
, size_t additionalSize
, size_t guardSize
,
778 area_id stackArea
= -1;
779 uint8
* stackBase
= (uint8
*)_stackBase
;
781 if (stackBase
!= NULL
) {
782 // A stack has been specified. It must be large enough to hold the
783 // TLS space at least. Guard pages are ignored for existing stacks.
784 STATIC_ASSERT(TLS_SIZE
< MIN_USER_STACK_SIZE
);
785 if (stackSize
< MIN_USER_STACK_SIZE
)
788 stackSize
-= TLS_SIZE
;
790 // No user-defined stack -- allocate one. For non-main threads the stack
791 // will be between USER_STACK_REGION and the main thread stack area. For
792 // a main thread the position is fixed.
794 guardSize
= PAGE_ALIGN(guardSize
);
796 if (stackSize
== 0) {
797 // Use the default size (a different one for a main thread).
798 stackSize
= thread
->id
== team
->id
799 ? USER_MAIN_THREAD_STACK_SIZE
: USER_STACK_SIZE
;
801 // Verify that the given stack size is large enough.
802 if (stackSize
< MIN_USER_STACK_SIZE
)
805 stackSize
= PAGE_ALIGN(stackSize
);
808 size_t areaSize
= PAGE_ALIGN(guardSize
+ stackSize
+ TLS_SIZE
811 snprintf(nameBuffer
, B_OS_NAME_LENGTH
, "%s_%" B_PRId32
"_stack",
812 thread
->name
, thread
->id
);
814 stackBase
= (uint8
*)USER_STACK_REGION
;
816 virtual_address_restrictions virtualRestrictions
= {};
817 virtualRestrictions
.address_specification
= B_RANDOMIZED_BASE_ADDRESS
;
818 virtualRestrictions
.address
= (void*)stackBase
;
820 physical_address_restrictions physicalRestrictions
= {};
822 stackArea
= create_area_etc(team
->id
, nameBuffer
,
823 areaSize
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
| B_STACK_AREA
,
824 0, guardSize
, &virtualRestrictions
, &physicalRestrictions
,
831 ThreadLocker
threadLocker(thread
);
832 #ifdef STACK_GROWS_DOWNWARDS
833 thread
->user_stack_base
= (addr_t
)stackBase
+ guardSize
;
835 thread
->user_stack_base
= (addr_t
)stackBase
;
837 thread
->user_stack_size
= stackSize
;
838 thread
->user_stack_area
= stackArea
;
845 thread_create_user_stack(Team
* team
, Thread
* thread
, void* stackBase
,
846 size_t stackSize
, size_t additionalSize
)
848 char nameBuffer
[B_OS_NAME_LENGTH
];
849 return create_thread_user_stack(team
, thread
, stackBase
, stackSize
,
850 additionalSize
, USER_STACK_GUARD_SIZE
, nameBuffer
);
854 /*! Creates a new thread.
856 \param attributes The thread creation attributes, specifying the team in
857 which to create the thread, as well as a whole bunch of other arguments.
858 \param kernel \c true, if a kernel-only thread shall be created, \c false,
859 if the thread shall also be able to run in userland.
860 \return The ID of the newly created thread (>= 0) or an error code on
864 thread_create_thread(const ThreadCreationAttributes
& attributes
, bool kernel
)
866 status_t status
= B_OK
;
868 TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes
.name
,
869 attributes
.thread
, kernel
? "kernel" : "user"));
872 Team
* team
= Team::Get(attributes
.team
);
874 return B_BAD_TEAM_ID
;
875 BReference
<Team
> teamReference(team
, true);
877 // If a thread object is given, acquire a reference to it, otherwise create
878 // a new thread object with the given attributes.
879 Thread
* thread
= attributes
.thread
;
880 if (thread
!= NULL
) {
881 thread
->AcquireReference();
883 status
= Thread::Create(attributes
.name
, thread
);
887 BReference
<Thread
> threadReference(thread
, true);
890 // set already, so, if something goes wrong, the team pointer is
891 // available for deinitialization
892 thread
->priority
= attributes
.priority
== -1
893 ? B_NORMAL_PRIORITY
: attributes
.priority
;
894 thread
->priority
= std::max(thread
->priority
,
895 (int32
)THREAD_MIN_SET_PRIORITY
);
896 thread
->priority
= std::min(thread
->priority
,
897 (int32
)THREAD_MAX_SET_PRIORITY
);
898 thread
->state
= B_THREAD_SUSPENDED
;
900 thread
->sig_block_mask
= attributes
.signal_mask
;
902 // init debug structure
903 init_thread_debug_info(&thread
->debug_info
);
905 // create the kernel stack
906 char stackName
[B_OS_NAME_LENGTH
];
907 snprintf(stackName
, B_OS_NAME_LENGTH
, "%s_%" B_PRId32
"_kstack",
908 thread
->name
, thread
->id
);
909 virtual_address_restrictions virtualRestrictions
= {};
910 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
911 physical_address_restrictions physicalRestrictions
= {};
913 thread
->kernel_stack_area
= create_area_etc(B_SYSTEM_TEAM
, stackName
,
914 KERNEL_STACK_SIZE
+ KERNEL_STACK_GUARD_PAGES
* B_PAGE_SIZE
,
915 B_FULL_LOCK
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
916 | B_KERNEL_STACK_AREA
, 0, KERNEL_STACK_GUARD_PAGES
* B_PAGE_SIZE
,
917 &virtualRestrictions
, &physicalRestrictions
,
918 (void**)&thread
->kernel_stack_base
);
920 if (thread
->kernel_stack_area
< 0) {
921 // we're not yet part of a team, so we can just bail out
922 status
= thread
->kernel_stack_area
;
924 dprintf("create_thread: error creating kernel stack: %s!\n",
930 thread
->kernel_stack_top
= thread
->kernel_stack_base
+ KERNEL_STACK_SIZE
931 + KERNEL_STACK_GUARD_PAGES
* B_PAGE_SIZE
;
934 // Init the thread's kernel stack. It will start executing
935 // common_thread_entry() with the arguments we prepare here.
936 ThreadEntryArguments entryArgs
;
937 entryArgs
.kernelFunction
= attributes
.kernelEntry
;
938 entryArgs
.argument
= attributes
.kernelArgument
;
939 entryArgs
.enterUserland
= false;
941 init_thread_kernel_stack(thread
, &entryArgs
, sizeof(entryArgs
));
943 // create the userland stack, if the thread doesn't have one yet
944 if (thread
->user_stack_base
== 0) {
945 status
= create_thread_user_stack(team
, thread
,
946 attributes
.stack_address
, attributes
.stack_size
,
947 attributes
.additional_stack_size
, attributes
.guard_size
,
953 // Init the thread's kernel stack. It will start executing
954 // common_thread_entry() with the arguments we prepare here.
955 UserThreadEntryArguments entryArgs
;
956 entryArgs
.kernelFunction
= attributes
.kernelEntry
;
957 entryArgs
.argument
= attributes
.kernelArgument
;
958 entryArgs
.enterUserland
= true;
959 entryArgs
.userlandEntry
= (addr_t
)attributes
.entry
;
960 entryArgs
.userlandArgument1
= attributes
.args1
;
961 entryArgs
.userlandArgument2
= attributes
.args2
;
962 entryArgs
.pthread
= attributes
.pthread
;
963 entryArgs
.forkArgs
= attributes
.forkArgs
;
964 entryArgs
.flags
= attributes
.flags
;
966 init_thread_kernel_stack(thread
, &entryArgs
, sizeof(entryArgs
));
968 // create the pre-defined thread timers
969 status
= user_timer_create_thread_timers(team
, thread
);
974 // lock the team and see, if it is still alive
975 TeamLocker
teamLocker(team
);
976 if (team
->state
>= TEAM_STATE_SHUTDOWN
)
977 return B_BAD_TEAM_ID
;
979 bool debugNewThread
= false;
981 // allocate the user_thread structure, if not already allocated
982 if (thread
->user_thread
== NULL
) {
983 thread
->user_thread
= team_allocate_user_thread(team
);
984 if (thread
->user_thread
== NULL
)
988 // If the new thread belongs to the same team as the current thread, it
989 // may inherit some of the thread debug flags.
990 Thread
* currentThread
= thread_get_current_thread();
991 if (currentThread
!= NULL
&& currentThread
->team
== team
) {
992 // inherit all user flags...
993 int32 debugFlags
= atomic_get(¤tThread
->debug_info
.flags
)
994 & B_THREAD_DEBUG_USER_FLAG_MASK
;
996 // ... save the syscall tracing flags, unless explicitely specified
997 if (!(debugFlags
& B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS
)) {
998 debugFlags
&= ~(B_THREAD_DEBUG_PRE_SYSCALL
999 | B_THREAD_DEBUG_POST_SYSCALL
);
1002 thread
->debug_info
.flags
= debugFlags
;
1004 // stop the new thread, if desired
1005 debugNewThread
= debugFlags
& B_THREAD_DEBUG_STOP_CHILD_THREADS
;
1009 // We're going to make the thread live, now. The thread itself will take
1010 // over a reference to its Thread object. We'll acquire another reference
1011 // for our own use (and threadReference remains armed).
1013 ThreadLocker
threadLocker(thread
);
1015 InterruptsSpinLocker
threadCreationLocker(gThreadCreationLock
);
1016 SpinLocker
threadHashLocker(sThreadHashLock
);
1018 // check the thread limit
1019 if (sUsedThreads
>= sMaxThreads
) {
1020 // Clean up the user_thread structure. It's a bit unfortunate that the
1021 // Thread destructor cannot do that, so we have to do that explicitly.
1022 threadHashLocker
.Unlock();
1023 threadCreationLocker
.Unlock();
1025 user_thread
* userThread
= thread
->user_thread
;
1026 thread
->user_thread
= NULL
;
1028 threadLocker
.Unlock();
1030 if (userThread
!= NULL
)
1031 team_free_user_thread(team
, userThread
);
1033 return B_NO_MORE_THREADS
;
1036 // make thread visible in global hash/list
1037 thread
->visible
= true;
1040 scheduler_on_thread_init(thread
);
1042 thread
->AcquireReference();
1044 // Debug the new thread, if the parent thread required that (see above),
1045 // or the respective global team debug flag is set. But only, if a
1046 // debugger is installed for the team.
1048 int32 teamDebugFlags
= atomic_get(&team
->debug_info
.flags
);
1049 debugNewThread
|= (teamDebugFlags
& B_TEAM_DEBUG_STOP_NEW_THREADS
) != 0;
1051 && (teamDebugFlags
& B_TEAM_DEBUG_DEBUGGER_INSTALLED
) != 0) {
1052 thread
->debug_info
.flags
|= B_THREAD_DEBUG_STOP
;
1057 SpinLocker
signalLocker(team
->signal_lock
);
1058 SpinLocker
timeLocker(team
->time_lock
);
1060 // insert thread into team
1061 insert_thread_into_team(team
, thread
);
1064 threadHashLocker
.Unlock();
1065 threadCreationLocker
.Unlock();
1066 threadLocker
.Unlock();
1067 teamLocker
.Unlock();
1070 sNotificationService
.Notify(THREAD_ADDED
, thread
);
1077 undertaker(void* /*args*/)
1080 // wait for a thread to bury
1081 InterruptsSpinLocker
locker(sUndertakerLock
);
1083 while (sUndertakerEntries
.IsEmpty()) {
1084 ConditionVariableEntry conditionEntry
;
1085 sUndertakerCondition
.Add(&conditionEntry
);
1088 conditionEntry
.Wait();
1093 UndertakerEntry
* _entry
= sUndertakerEntries
.RemoveHead();
1096 UndertakerEntry entry
= *_entry
;
1097 // we need a copy, since the original entry is on the thread's stack
1099 // we've got an entry
1100 Thread
* thread
= entry
.thread
;
1102 // make sure the thread isn't running anymore
1103 InterruptsSpinLocker
schedulerLocker(thread
->scheduler_lock
);
1104 ASSERT(thread
->state
== THREAD_STATE_FREE_ON_RESCHED
);
1105 schedulerLocker
.Unlock();
1107 // remove this thread from from the kernel team -- this makes it
1109 Team
* kernelTeam
= team_get_kernel_team();
1110 TeamLocker
kernelTeamLocker(kernelTeam
);
1113 InterruptsSpinLocker
threadCreationLocker(gThreadCreationLock
);
1114 SpinLocker
signalLocker(kernelTeam
->signal_lock
);
1115 SpinLocker
timeLocker(kernelTeam
->time_lock
);
1117 remove_thread_from_team(kernelTeam
, thread
);
1119 timeLocker
.Unlock();
1120 signalLocker
.Unlock();
1121 threadCreationLocker
.Unlock();
1123 kernelTeamLocker
.Unlock();
1125 // free the thread structure
1126 thread
->UnlockAndReleaseReference();
1129 // can never get here
1134 /*! Returns the semaphore the thread is currently waiting on.
1136 The return value is purely informative.
1137 The caller must hold the scheduler lock.
1139 \param thread The thread.
1140 \return The ID of the semaphore the thread is currently waiting on or \c -1,
1141 if it isn't waiting on a semaphore.
1144 get_thread_wait_sem(Thread
* thread
)
1146 if (thread
->state
== B_THREAD_WAITING
1147 && thread
->wait
.type
== THREAD_BLOCK_TYPE_SEMAPHORE
) {
1148 return (sem_id
)(addr_t
)thread
->wait
.object
;
1154 /*! Fills the thread_info structure with information from the specified thread.
1155 The caller must hold the thread's lock and the scheduler lock.
1158 fill_thread_info(Thread
*thread
, thread_info
*info
, size_t size
)
1160 info
->thread
= thread
->id
;
1161 info
->team
= thread
->team
->id
;
1163 strlcpy(info
->name
, thread
->name
, B_OS_NAME_LENGTH
);
1167 if (thread
->state
== B_THREAD_WAITING
) {
1168 info
->state
= B_THREAD_WAITING
;
1170 switch (thread
->wait
.type
) {
1171 case THREAD_BLOCK_TYPE_SNOOZE
:
1172 info
->state
= B_THREAD_ASLEEP
;
1175 case THREAD_BLOCK_TYPE_SEMAPHORE
:
1177 sem_id sem
= (sem_id
)(addr_t
)thread
->wait
.object
;
1178 if (sem
== thread
->msg
.read_sem
)
1179 info
->state
= B_THREAD_RECEIVING
;
1185 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE
:
1190 info
->state
= (thread_state
)thread
->state
;
1192 info
->priority
= thread
->priority
;
1193 info
->stack_base
= (void *)thread
->user_stack_base
;
1194 info
->stack_end
= (void *)(thread
->user_stack_base
1195 + thread
->user_stack_size
);
1197 InterruptsSpinLocker
threadTimeLocker(thread
->time_lock
);
1198 info
->user_time
= thread
->user_time
;
1199 info
->kernel_time
= thread
->kernel_time
;
1204 send_data_etc(thread_id id
, int32 code
, const void *buffer
, size_t bufferSize
,
1208 Thread
*target
= Thread::Get(id
);
1210 return B_BAD_THREAD_ID
;
1211 BReference
<Thread
> targetReference(target
, true);
1213 // get the write semaphore
1214 ThreadLocker
targetLocker(target
);
1215 sem_id cachedSem
= target
->msg
.write_sem
;
1216 targetLocker
.Unlock();
1218 if (bufferSize
> THREAD_MAX_MESSAGE_SIZE
)
1221 status_t status
= acquire_sem_etc(cachedSem
, 1, flags
, 0);
1222 if (status
== B_INTERRUPTED
) {
1223 // we got interrupted by a signal
1226 if (status
!= B_OK
) {
1227 // Any other acquisition problems may be due to thread deletion
1228 return B_BAD_THREAD_ID
;
1232 if (bufferSize
> 0) {
1233 data
= malloc(bufferSize
);
1236 if (user_memcpy(data
, buffer
, bufferSize
) != B_OK
) {
1243 targetLocker
.Lock();
1245 // The target thread could have been deleted at this point.
1246 if (!target
->IsAlive()) {
1247 targetLocker
.Unlock();
1249 return B_BAD_THREAD_ID
;
1252 // Save message informations
1253 target
->msg
.sender
= thread_get_current_thread()->id
;
1254 target
->msg
.code
= code
;
1255 target
->msg
.size
= bufferSize
;
1256 target
->msg
.buffer
= data
;
1257 cachedSem
= target
->msg
.read_sem
;
1259 targetLocker
.Unlock();
1261 release_sem(cachedSem
);
1267 receive_data_etc(thread_id
*_sender
, void *buffer
, size_t bufferSize
,
1270 Thread
*thread
= thread_get_current_thread();
1274 status_t status
= acquire_sem_etc(thread
->msg
.read_sem
, 1, flags
, 0);
1275 if (status
!= B_OK
) {
1276 // Actually, we're not supposed to return error codes
1277 // but since the only reason this can fail is that we
1278 // were killed, it's probably okay to do so (but also
1283 if (buffer
!= NULL
&& bufferSize
!= 0 && thread
->msg
.buffer
!= NULL
) {
1284 size
= min_c(bufferSize
, thread
->msg
.size
);
1285 status
= user_memcpy(buffer
, thread
->msg
.buffer
, size
);
1286 if (status
!= B_OK
) {
1287 free(thread
->msg
.buffer
);
1288 release_sem(thread
->msg
.write_sem
);
1293 *_sender
= thread
->msg
.sender
;
1294 code
= thread
->msg
.code
;
1296 free(thread
->msg
.buffer
);
1297 release_sem(thread
->msg
.write_sem
);
1304 common_getrlimit(int resource
, struct rlimit
* rlp
)
1307 return B_BAD_ADDRESS
;
1312 return vfs_getrlimit(resource
, rlp
);
1321 rlp
->rlim_cur
= USER_MAIN_THREAD_STACK_SIZE
;
1322 rlp
->rlim_max
= USER_MAIN_THREAD_STACK_SIZE
;
1335 common_setrlimit(int resource
, const struct rlimit
* rlp
)
1338 return B_BAD_ADDRESS
;
1343 return vfs_setrlimit(resource
, rlp
);
1346 // We don't support core file, so allow settings to 0/0 only.
1347 if (rlp
->rlim_cur
!= 0 || rlp
->rlim_max
!= 0)
1360 common_snooze_etc(bigtime_t timeout
, clockid_t clockID
, uint32 flags
,
1361 bigtime_t
* _remainingTime
)
1364 case CLOCK_REALTIME
:
1365 // make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall
1367 flags
|= B_TIMEOUT_REAL_TIME_BASE
;
1368 case CLOCK_MONOTONIC
:
1370 // Store the start time, for the case that we get interrupted and
1371 // need to return the remaining time. For absolute timeouts we can
1372 // still get he time later, if needed.
1374 = _remainingTime
!= NULL
&& (flags
& B_RELATIVE_TIMEOUT
) != 0
1375 ? system_time() : 0;
1377 Thread
* thread
= thread_get_current_thread();
1379 thread_prepare_to_block(thread
, flags
, THREAD_BLOCK_TYPE_SNOOZE
,
1381 status_t status
= thread_block_with_timeout(flags
, timeout
);
1383 if (status
== B_TIMED_OUT
|| status
== B_WOULD_BLOCK
)
1386 // If interrupted, compute the remaining time, if requested.
1387 if (status
== B_INTERRUPTED
&& _remainingTime
!= NULL
) {
1388 if ((flags
& B_RELATIVE_TIMEOUT
) != 0) {
1389 *_remainingTime
= std::max(
1390 startTime
+ timeout
- system_time(), (bigtime_t
)0);
1392 bigtime_t now
= (flags
& B_TIMEOUT_REAL_TIME_BASE
) != 0
1393 ? real_time_clock_usecs() : system_time();
1394 *_remainingTime
= std::max(timeout
- now
, (bigtime_t
)0);
1401 case CLOCK_THREAD_CPUTIME_ID
:
1402 // Waiting for ourselves to do something isn't particularly
1406 case CLOCK_PROCESS_CPUTIME_ID
:
1408 // We don't have to support those, but we are allowed to. Could be
1409 // done be creating a UserTimer on the fly with a custom UserEvent
1410 // that would just wake us up.
1416 // #pragma mark - debugger calls
1420 make_thread_unreal(int argc
, char **argv
)
1425 print_debugger_command_usage(argv
[0]);
1430 id
= strtoul(argv
[1], NULL
, 0);
1432 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1433 Thread
* thread
= it
.Next();) {
1434 if (id
!= -1 && thread
->id
!= id
)
1437 if (thread
->priority
> B_DISPLAY_PRIORITY
) {
1438 scheduler_set_thread_priority(thread
, B_NORMAL_PRIORITY
);
1439 kprintf("thread %" B_PRId32
" made unreal\n", thread
->id
);
1448 set_thread_prio(int argc
, char **argv
)
1453 if (argc
> 3 || argc
< 2) {
1454 print_debugger_command_usage(argv
[0]);
1458 prio
= strtoul(argv
[1], NULL
, 0);
1459 if (prio
> THREAD_MAX_SET_PRIORITY
)
1460 prio
= THREAD_MAX_SET_PRIORITY
;
1461 if (prio
< THREAD_MIN_SET_PRIORITY
)
1462 prio
= THREAD_MIN_SET_PRIORITY
;
1465 id
= strtoul(argv
[2], NULL
, 0);
1467 id
= thread_get_current_thread()->id
;
1470 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1471 Thread
* thread
= it
.Next();) {
1472 if (thread
->id
!= id
)
1474 scheduler_set_thread_priority(thread
, prio
);
1475 kprintf("thread %" B_PRId32
" set to priority %" B_PRId32
"\n", id
, prio
);
1480 kprintf("thread %" B_PRId32
" (%#" B_PRIx32
") not found\n", id
, id
);
1487 make_thread_suspended(int argc
, char **argv
)
1492 print_debugger_command_usage(argv
[0]);
1497 id
= thread_get_current_thread()->id
;
1499 id
= strtoul(argv
[1], NULL
, 0);
1502 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1503 Thread
* thread
= it
.Next();) {
1504 if (thread
->id
!= id
)
1507 Signal
signal(SIGSTOP
, SI_USER
, B_OK
, team_get_kernel_team()->id
);
1508 send_signal_to_thread(thread
, signal
, B_DO_NOT_RESCHEDULE
);
1510 kprintf("thread %" B_PRId32
" suspended\n", id
);
1515 kprintf("thread %" B_PRId32
" (%#" B_PRIx32
") not found\n", id
, id
);
1522 make_thread_resumed(int argc
, char **argv
)
1527 print_debugger_command_usage(argv
[0]);
1531 // force user to enter a thread id, as using
1532 // the current thread is usually not intended
1533 id
= strtoul(argv
[1], NULL
, 0);
1536 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1537 Thread
* thread
= it
.Next();) {
1538 if (thread
->id
!= id
)
1541 if (thread
->state
== B_THREAD_SUSPENDED
) {
1542 scheduler_enqueue_in_run_queue(thread
);
1543 kprintf("thread %" B_PRId32
" resumed\n", thread
->id
);
1549 kprintf("thread %" B_PRId32
" (%#" B_PRIx32
") not found\n", id
, id
);
1556 drop_into_debugger(int argc
, char **argv
)
1562 print_debugger_command_usage(argv
[0]);
1567 id
= thread_get_current_thread()->id
;
1569 id
= strtoul(argv
[1], NULL
, 0);
1571 err
= _user_debug_thread(id
);
1572 // TODO: This is a non-trivial syscall doing some locking, so this is
1573 // really nasty and may go seriously wrong.
1575 kprintf("drop failed\n");
1577 kprintf("thread %" B_PRId32
" dropped into user debugger\n", id
);
1583 /*! Returns a user-readable string for a thread state.
1584 Only for use in the kernel debugger.
1587 state_to_text(Thread
*thread
, int32 state
)
1590 case B_THREAD_READY
:
1593 case B_THREAD_RUNNING
:
1596 case B_THREAD_WAITING
:
1598 if (thread
!= NULL
) {
1599 switch (thread
->wait
.type
) {
1600 case THREAD_BLOCK_TYPE_SNOOZE
:
1603 case THREAD_BLOCK_TYPE_SEMAPHORE
:
1605 sem_id sem
= (sem_id
)(addr_t
)thread
->wait
.object
;
1606 if (sem
== thread
->msg
.read_sem
)
1616 case B_THREAD_SUSPENDED
:
1619 case THREAD_STATE_FREE_ON_RESCHED
:
1629 print_thread_list_table_head()
1631 kprintf("%-*s id state wait for %-*s cpu pri %-*s team "
1633 B_PRINTF_POINTER_WIDTH
, "thread", B_PRINTF_POINTER_WIDTH
, "object",
1634 B_PRINTF_POINTER_WIDTH
, "stack");
1639 _dump_thread_info(Thread
*thread
, bool shortInfo
)
1642 kprintf("%p %6" B_PRId32
" %-10s", thread
, thread
->id
,
1643 state_to_text(thread
, thread
->state
));
1645 // does it block on a semaphore or a condition variable?
1646 if (thread
->state
== B_THREAD_WAITING
) {
1647 switch (thread
->wait
.type
) {
1648 case THREAD_BLOCK_TYPE_SEMAPHORE
:
1650 sem_id sem
= (sem_id
)(addr_t
)thread
->wait
.object
;
1651 if (sem
== thread
->msg
.read_sem
)
1652 kprintf("%*s", B_PRINTF_POINTER_WIDTH
+ 15, "");
1654 kprintf("sem %-*" B_PRId32
,
1655 B_PRINTF_POINTER_WIDTH
+ 5, sem
);
1660 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE
:
1661 kprintf("cvar %p ", thread
->wait
.object
);
1664 case THREAD_BLOCK_TYPE_SNOOZE
:
1665 kprintf("%*s", B_PRINTF_POINTER_WIDTH
+ 15, "");
1668 case THREAD_BLOCK_TYPE_SIGNAL
:
1669 kprintf("signal%*s", B_PRINTF_POINTER_WIDTH
+ 9, "");
1672 case THREAD_BLOCK_TYPE_MUTEX
:
1673 kprintf("mutex %p ", thread
->wait
.object
);
1676 case THREAD_BLOCK_TYPE_RW_LOCK
:
1677 kprintf("rwlock %p ", thread
->wait
.object
);
1680 case THREAD_BLOCK_TYPE_OTHER
:
1681 kprintf("other%*s", B_PRINTF_POINTER_WIDTH
+ 10, "");
1685 kprintf("??? %p ", thread
->wait
.object
);
1689 kprintf("-%*s", B_PRINTF_POINTER_WIDTH
+ 14, "");
1691 // on which CPU does it run?
1693 kprintf("%2d", thread
->cpu
->cpu_num
);
1697 kprintf("%4" B_PRId32
" %p%5" B_PRId32
" %s\n", thread
->priority
,
1698 (void *)thread
->kernel_stack_base
, thread
->team
->id
,
1699 thread
->name
!= NULL
? thread
->name
: "<NULL>");
1704 // print the long info
1706 struct thread_death_entry
*death
= NULL
;
1708 kprintf("THREAD: %p\n", thread
);
1709 kprintf("id: %" B_PRId32
" (%#" B_PRIx32
")\n", thread
->id
,
1711 kprintf("serial_number: %" B_PRId64
"\n", thread
->serial_number
);
1712 kprintf("name: \"%s\"\n", thread
->name
);
1713 kprintf("hash_next: %p\nteam_next: %p\n",
1714 thread
->hash_next
, thread
->team_next
);
1715 kprintf("priority: %" B_PRId32
" (I/O: %" B_PRId32
")\n",
1716 thread
->priority
, thread
->io_priority
);
1717 kprintf("state: %s\n", state_to_text(thread
, thread
->state
));
1718 kprintf("cpu: %p ", thread
->cpu
);
1720 kprintf("(%d)\n", thread
->cpu
->cpu_num
);
1723 kprintf("sig_pending: %#" B_PRIx64
" (blocked: %#" B_PRIx64
1724 ", before sigsuspend(): %#" B_PRIx64
")\n",
1725 (int64
)thread
->ThreadPendingSignals(),
1726 (int64
)thread
->sig_block_mask
,
1727 (int64
)thread
->sigsuspend_original_unblocked_mask
);
1728 kprintf("in_kernel: %d\n", thread
->in_kernel
);
1730 if (thread
->state
== B_THREAD_WAITING
) {
1731 kprintf("waiting for: ");
1733 switch (thread
->wait
.type
) {
1734 case THREAD_BLOCK_TYPE_SEMAPHORE
:
1736 sem_id sem
= (sem_id
)(addr_t
)thread
->wait
.object
;
1737 if (sem
== thread
->msg
.read_sem
)
1740 kprintf("semaphore %" B_PRId32
"\n", sem
);
1744 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE
:
1745 kprintf("condition variable %p\n", thread
->wait
.object
);
1748 case THREAD_BLOCK_TYPE_SNOOZE
:
1749 kprintf("snooze()\n");
1752 case THREAD_BLOCK_TYPE_SIGNAL
:
1753 kprintf("signal\n");
1756 case THREAD_BLOCK_TYPE_MUTEX
:
1757 kprintf("mutex %p\n", thread
->wait
.object
);
1760 case THREAD_BLOCK_TYPE_RW_LOCK
:
1761 kprintf("rwlock %p\n", thread
->wait
.object
);
1764 case THREAD_BLOCK_TYPE_OTHER
:
1765 kprintf("other (%s)\n", (char*)thread
->wait
.object
);
1769 kprintf("unknown (%p)\n", thread
->wait
.object
);
1774 kprintf("fault_handler: %p\n", (void *)thread
->fault_handler
);
1775 kprintf("team: %p, \"%s\"\n", thread
->team
,
1776 thread
->team
->Name());
1777 kprintf(" exit.sem: %" B_PRId32
"\n", thread
->exit
.sem
);
1778 kprintf(" exit.status: %#" B_PRIx32
" (%s)\n", thread
->exit
.status
,
1779 strerror(thread
->exit
.status
));
1780 kprintf(" exit.waiters:\n");
1781 while ((death
= (struct thread_death_entry
*)list_get_next_item(
1782 &thread
->exit
.waiters
, death
)) != NULL
) {
1783 kprintf("\t%p (thread %" B_PRId32
")\n", death
, death
->thread
);
1786 kprintf("kernel_stack_area: %" B_PRId32
"\n", thread
->kernel_stack_area
);
1787 kprintf("kernel_stack_base: %p\n", (void *)thread
->kernel_stack_base
);
1788 kprintf("user_stack_area: %" B_PRId32
"\n", thread
->user_stack_area
);
1789 kprintf("user_stack_base: %p\n", (void *)thread
->user_stack_base
);
1790 kprintf("user_local_storage: %p\n", (void *)thread
->user_local_storage
);
1791 kprintf("user_thread: %p\n", (void *)thread
->user_thread
);
1792 kprintf("kernel_errno: %#x (%s)\n", thread
->kernel_errno
,
1793 strerror(thread
->kernel_errno
));
1794 kprintf("kernel_time: %" B_PRId64
"\n", thread
->kernel_time
);
1795 kprintf("user_time: %" B_PRId64
"\n", thread
->user_time
);
1796 kprintf("flags: 0x%" B_PRIx32
"\n", thread
->flags
);
1797 kprintf("architecture dependant section:\n");
1798 arch_thread_dump_info(&thread
->arch_info
);
1799 kprintf("scheduler data:\n");
1800 scheduler_dump_thread_data(thread
);
1805 dump_thread_info(int argc
, char **argv
)
1807 bool shortInfo
= false;
1809 if (argi
< argc
&& strcmp(argv
[argi
], "-s") == 0) {
1811 print_thread_list_table_head();
1816 _dump_thread_info(thread_get_current_thread(), shortInfo
);
1820 for (; argi
< argc
; argi
++) {
1821 const char *name
= argv
[argi
];
1822 ulong arg
= strtoul(name
, NULL
, 0);
1824 if (IS_KERNEL_ADDRESS(arg
)) {
1826 _dump_thread_info((Thread
*)arg
, shortInfo
);
1830 // walk through the thread list, trying to match name or id
1832 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1833 Thread
* thread
= it
.Next();) {
1834 if (!strcmp(name
, thread
->name
) || thread
->id
== (thread_id
)arg
) {
1835 _dump_thread_info(thread
, shortInfo
);
1842 kprintf("thread \"%s\" (%" B_PRId32
") doesn't exist!\n", name
, (thread_id
)arg
);
1850 dump_thread_list(int argc
, char **argv
)
1852 bool realTimeOnly
= false;
1853 bool calling
= false;
1854 const char *callSymbol
= NULL
;
1855 addr_t callStart
= 0;
1857 int32 requiredState
= 0;
1861 if (!strcmp(argv
[0], "realtime"))
1862 realTimeOnly
= true;
1863 else if (!strcmp(argv
[0], "ready"))
1864 requiredState
= B_THREAD_READY
;
1865 else if (!strcmp(argv
[0], "running"))
1866 requiredState
= B_THREAD_RUNNING
;
1867 else if (!strcmp(argv
[0], "waiting")) {
1868 requiredState
= B_THREAD_WAITING
;
1871 sem
= strtoul(argv
[1], NULL
, 0);
1873 kprintf("ignoring invalid semaphore argument.\n");
1875 } else if (!strcmp(argv
[0], "calling")) {
1877 kprintf("Need to give a symbol name or start and end arguments.\n");
1879 } else if (argc
== 3) {
1880 callStart
= parse_expression(argv
[1]);
1881 callEnd
= parse_expression(argv
[2]);
1883 callSymbol
= argv
[1];
1886 } else if (argc
> 1) {
1887 team
= strtoul(argv
[1], NULL
, 0);
1889 kprintf("ignoring invalid team argument.\n");
1892 print_thread_list_table_head();
1894 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
1895 Thread
* thread
= it
.Next();) {
1896 // filter out threads not matching the search criteria
1897 if ((requiredState
&& thread
->state
!= requiredState
)
1898 || (calling
&& !arch_debug_contains_call(thread
, callSymbol
,
1899 callStart
, callEnd
))
1900 || (sem
> 0 && get_thread_wait_sem(thread
) != sem
)
1901 || (team
> 0 && thread
->team
->id
!= team
)
1902 || (realTimeOnly
&& thread
->priority
< B_REAL_TIME_DISPLAY_PRIORITY
))
1905 _dump_thread_info(thread
, true);
1911 // #pragma mark - private kernel API
1918 Thread
* thread
= thread_get_current_thread();
1919 Team
* team
= thread
->team
;
1920 Team
* kernelTeam
= team_get_kernel_team();
1922 struct thread_debug_info debugInfo
;
1923 team_id teamID
= team
->id
;
1925 TRACE(("thread %" B_PRId32
" exiting w/return code %#" B_PRIx32
"\n",
1926 thread
->id
, thread
->exit
.status
));
1928 if (!are_interrupts_enabled())
1929 panic("thread_exit() called with interrupts disabled!\n");
1931 // boost our priority to get this over with
1932 scheduler_set_thread_priority(thread
, B_URGENT_DISPLAY_PRIORITY
);
1934 if (team
!= kernelTeam
) {
1935 // Delete all user timers associated with the thread.
1936 ThreadLocker
threadLocker(thread
);
1937 thread
->DeleteUserTimers(false);
1939 // detach the thread's user thread
1940 user_thread
* userThread
= thread
->user_thread
;
1941 thread
->user_thread
= NULL
;
1943 threadLocker
.Unlock();
1945 // Delete the thread's user thread, if it's not the main thread. If it
1946 // is, we can save the work, since it will be deleted with the team's
1948 if (thread
!= team
->main_thread
)
1949 team_free_user_thread(team
, userThread
);
1952 // remember the user stack area -- we will delete it below
1953 area_id userStackArea
= -1;
1954 if (team
->address_space
!= NULL
&& thread
->user_stack_area
>= 0) {
1955 userStackArea
= thread
->user_stack_area
;
1956 thread
->user_stack_area
= -1;
1959 struct job_control_entry
*death
= NULL
;
1960 struct thread_death_entry
* threadDeathEntry
= NULL
;
1961 bool deleteTeam
= false;
1962 port_id debuggerPort
= -1;
1964 if (team
!= kernelTeam
) {
1965 user_debug_thread_exiting(thread
);
1967 if (team
->main_thread
== thread
) {
1968 // The main thread is exiting. Shut down the whole team.
1971 // kill off all other threads and the user debugger facilities
1972 debuggerPort
= team_shutdown_team(team
);
1974 // acquire necessary locks, which are: process group lock, kernel
1975 // team lock, parent team lock, and the team lock
1976 team
->LockProcessGroup();
1978 team
->LockTeamAndParent(true);
1981 = (thread_death_entry
*)malloc(sizeof(thread_death_entry
));
1983 // acquire necessary locks, which are: kernel team lock and the team
1989 ThreadLocker
threadLocker(thread
);
1991 state
= disable_interrupts();
1993 // swap address spaces, to make sure we're running on the kernel's pgdir
1994 vm_swap_address_space(team
->address_space
, VMAddressSpace::Kernel());
1996 WriteSpinLocker
teamLocker(thread
->team_lock
);
1997 SpinLocker
threadCreationLocker(gThreadCreationLock
);
1998 // removing the thread and putting its death entry to the parent
1999 // team needs to be an atomic operation
2001 // remember how long this thread lasted
2002 bigtime_t now
= system_time();
2004 InterruptsSpinLocker
signalLocker(kernelTeam
->signal_lock
);
2005 SpinLocker
teamTimeLocker(kernelTeam
->time_lock
);
2006 SpinLocker
threadTimeLocker(thread
->time_lock
);
2008 thread
->kernel_time
+= now
- thread
->last_time
;
2009 thread
->last_time
= now
;
2011 team
->dead_threads_kernel_time
+= thread
->kernel_time
;
2012 team
->dead_threads_user_time
+= thread
->user_time
;
2014 // stop/update thread/team CPU time user timers
2015 if (thread
->HasActiveCPUTimeUserTimers()
2016 || team
->HasActiveCPUTimeUserTimers()) {
2017 user_timer_stop_cpu_timers(thread
, NULL
);
2020 // deactivate CPU time user timers for the thread
2021 if (thread
->HasActiveCPUTimeUserTimers())
2022 thread
->DeactivateCPUTimeUserTimers();
2024 threadTimeLocker
.Unlock();
2026 // put the thread into the kernel team until it dies
2027 remove_thread_from_team(team
, thread
);
2028 insert_thread_into_team(kernelTeam
, thread
);
2030 teamTimeLocker
.Unlock();
2031 signalLocker
.Unlock();
2033 teamLocker
.Unlock();
2035 if (team
->death_entry
!= NULL
) {
2036 if (--team
->death_entry
->remaining_threads
== 0)
2037 team
->death_entry
->condition
.NotifyOne();
2041 Team
* parent
= team
->parent
;
2043 // Set the team job control state to "dead" and detach the job
2044 // control entry from our team struct.
2045 team_set_job_control_state(team
, JOB_CONTROL_STATE_DEAD
, NULL
);
2046 death
= team
->job_control_entry
;
2047 team
->job_control_entry
= NULL
;
2049 if (death
!= NULL
) {
2050 death
->InitDeadState();
2052 // team_set_job_control_state() already moved our entry
2053 // into the parent's list. We just check the soft limit of
2055 if (parent
->dead_children
.count
> MAX_DEAD_CHILDREN
) {
2056 death
= parent
->dead_children
.entries
.RemoveHead();
2057 parent
->dead_children
.count
--;
2062 threadCreationLocker
.Unlock();
2063 restore_interrupts(state
);
2065 threadLocker
.Unlock();
2067 // Get a temporary reference to the team's process group
2068 // -- team_remove_team() removes the team from the group, which
2069 // might destroy it otherwise and we wouldn't be able to unlock it.
2070 ProcessGroup
* group
= team
->group
;
2071 group
->AcquireReference();
2073 pid_t foregroundGroupToSignal
;
2074 team_remove_team(team
, foregroundGroupToSignal
);
2076 // unlock everything but the parent team
2078 if (parent
!= kernelTeam
)
2079 kernelTeam
->Unlock();
2081 group
->ReleaseReference();
2083 // Send SIGCHLD to the parent as long as we still have its lock.
2084 // This makes job control state change + signalling atomic.
2085 Signal
childSignal(SIGCHLD
, team
->exit
.reason
, B_OK
, team
->id
);
2086 if (team
->exit
.reason
== CLD_EXITED
) {
2087 childSignal
.SetStatus(team
->exit
.status
);
2089 childSignal
.SetStatus(team
->exit
.signal
);
2090 childSignal
.SetSendingUser(team
->exit
.signaling_user
);
2092 send_signal_to_team(parent
, childSignal
, B_DO_NOT_RESCHEDULE
);
2094 // also unlock the parent
2097 // If the team was a session leader with controlling TTY, we have
2098 // to send SIGHUP to the foreground process group.
2099 if (foregroundGroupToSignal
>= 0) {
2100 Signal
groupSignal(SIGHUP
, SI_USER
, B_OK
, team
->id
);
2101 send_signal_to_process_group(foregroundGroupToSignal
,
2102 groupSignal
, B_DO_NOT_RESCHEDULE
);
2105 // The thread is not the main thread. We store a thread death entry
2106 // for it, unless someone is already waiting for it.
2107 if (threadDeathEntry
!= NULL
2108 && list_is_empty(&thread
->exit
.waiters
)) {
2109 threadDeathEntry
->thread
= thread
->id
;
2110 threadDeathEntry
->status
= thread
->exit
.status
;
2112 // add entry -- remove an old one, if we hit the limit
2113 list_add_item(&team
->dead_threads
, threadDeathEntry
);
2114 team
->dead_threads_count
++;
2115 threadDeathEntry
= NULL
;
2117 if (team
->dead_threads_count
> MAX_DEAD_THREADS
) {
2119 = (thread_death_entry
*)list_remove_head_item(
2120 &team
->dead_threads
);
2121 team
->dead_threads_count
--;
2125 threadCreationLocker
.Unlock();
2126 restore_interrupts(state
);
2128 threadLocker
.Unlock();
2130 kernelTeam
->Unlock();
2133 TRACE(("thread_exit: thread %" B_PRId32
" now a kernel thread!\n",
2137 free(threadDeathEntry
);
2139 // delete the team if we're its main thread
2141 team_delete_team(team
, debuggerPort
);
2143 // we need to delete any death entry that made it to here
2147 ThreadLocker
threadLocker(thread
);
2149 state
= disable_interrupts();
2150 SpinLocker
threadCreationLocker(gThreadCreationLock
);
2152 // mark invisible in global hash/list, so it's no longer accessible
2153 SpinLocker
threadHashLocker(sThreadHashLock
);
2154 thread
->visible
= false;
2156 threadHashLocker
.Unlock();
2158 // Stop debugging for this thread
2159 SpinLocker
threadDebugInfoLocker(thread
->debug_info
.lock
);
2160 debugInfo
= thread
->debug_info
;
2161 clear_thread_debug_info(&thread
->debug_info
, true);
2162 threadDebugInfoLocker
.Unlock();
2164 // Remove the select infos. We notify them a little later.
2165 select_info
* selectInfos
= thread
->select_infos
;
2166 thread
->select_infos
= NULL
;
2168 threadCreationLocker
.Unlock();
2169 restore_interrupts(state
);
2171 threadLocker
.Unlock();
2173 destroy_thread_debug_info(&debugInfo
);
2175 // notify select infos
2176 select_info
* info
= selectInfos
;
2177 while (info
!= NULL
) {
2178 select_sync
* sync
= info
->sync
;
2180 notify_select_events(info
, B_EVENT_INVALID
);
2182 put_select_sync(sync
);
2186 sNotificationService
.Notify(THREAD_REMOVED
, thread
);
2188 // shutdown the thread messaging
2190 status
= acquire_sem_etc(thread
->msg
.write_sem
, 1, B_RELATIVE_TIMEOUT
, 0);
2191 if (status
== B_WOULD_BLOCK
) {
2192 // there is data waiting for us, so let us eat it
2195 delete_sem(thread
->msg
.write_sem
);
2196 // first, let's remove all possibly waiting writers
2197 receive_data_etc(&sender
, NULL
, 0, B_RELATIVE_TIMEOUT
);
2199 // we probably own the semaphore here, and we're the last to do so
2200 delete_sem(thread
->msg
.write_sem
);
2202 // now we can safely remove the msg.read_sem
2203 delete_sem(thread
->msg
.read_sem
);
2205 // fill all death entries and delete the sem that others will use to wait
2208 sem_id cachedExitSem
= thread
->exit
.sem
;
2210 ThreadLocker
threadLocker(thread
);
2212 // make sure no one will grab this semaphore again
2213 thread
->exit
.sem
= -1;
2215 // fill all death entries
2216 thread_death_entry
* entry
= NULL
;
2217 while ((entry
= (thread_death_entry
*)list_get_next_item(
2218 &thread
->exit
.waiters
, entry
)) != NULL
) {
2219 entry
->status
= thread
->exit
.status
;
2222 threadLocker
.Unlock();
2224 delete_sem(cachedExitSem
);
2227 // delete the user stack, if this was a user thread
2228 if (!deleteTeam
&& userStackArea
>= 0) {
2229 // We postponed deleting the user stack until now, since this way all
2230 // notifications for the thread's death are out already and all other
2231 // threads waiting for this thread's death and some object on its stack
2232 // will wake up before we (try to) delete the stack area. Of most
2233 // relevance is probably the case where this is the main thread and
2234 // other threads use objects on its stack -- so we want them terminated
2236 // When the team is deleted, all areas are deleted anyway, so we don't
2237 // need to do that explicitly in that case.
2238 vm_delete_area(teamID
, userStackArea
, true);
2241 // notify the debugger
2242 if (teamID
!= kernelTeam
->id
)
2243 user_debug_thread_deleted(teamID
, thread
->id
);
2245 // enqueue in the undertaker list and reschedule for the last time
2246 UndertakerEntry
undertakerEntry(thread
, teamID
);
2248 disable_interrupts();
2250 SpinLocker
schedulerLocker(thread
->scheduler_lock
);
2252 SpinLocker
undertakerLocker(sUndertakerLock
);
2253 sUndertakerEntries
.Add(&undertakerEntry
);
2254 sUndertakerCondition
.NotifyOne();
2255 undertakerLocker
.Unlock();
2257 scheduler_reschedule(THREAD_STATE_FREE_ON_RESCHED
);
2259 panic("never can get here\n");
2263 /*! Called in the interrupt handler code when a thread enters
2264 the kernel for any reason.
2265 Only tracks time for now.
2266 Interrupts are disabled.
2269 thread_at_kernel_entry(bigtime_t now
)
2271 Thread
*thread
= thread_get_current_thread();
2273 TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32
"\n", thread
->id
));
2276 SpinLocker
threadTimeLocker(thread
->time_lock
);
2277 thread
->user_time
+= now
- thread
->last_time
;
2278 thread
->last_time
= now
;
2279 thread
->in_kernel
= true;
2280 threadTimeLocker
.Unlock();
2284 /*! Called whenever a thread exits kernel space to user space.
2285 Tracks time, handles signals, ...
2286 Interrupts must be enabled. When the function returns, interrupts will be
2288 The function may not return. This e.g. happens when the thread has received
2292 thread_at_kernel_exit(void)
2294 Thread
*thread
= thread_get_current_thread();
2296 TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32
"\n", thread
->id
));
2298 handle_signals(thread
);
2300 disable_interrupts();
2302 // track kernel time
2303 bigtime_t now
= system_time();
2304 SpinLocker
threadTimeLocker(thread
->time_lock
);
2305 thread
->in_kernel
= false;
2306 thread
->kernel_time
+= now
- thread
->last_time
;
2307 thread
->last_time
= now
;
2311 /*! The quick version of thread_kernel_exit(), in case no signals are pending
2312 and no debugging shall be done.
2313 Interrupts must be disabled.
2316 thread_at_kernel_exit_no_signals(void)
2318 Thread
*thread
= thread_get_current_thread();
2320 TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32
"\n",
2323 // track kernel time
2324 bigtime_t now
= system_time();
2325 SpinLocker
threadTimeLocker(thread
->time_lock
);
2326 thread
->in_kernel
= false;
2327 thread
->kernel_time
+= now
- thread
->last_time
;
2328 thread
->last_time
= now
;
2333 thread_reset_for_exec(void)
2335 Thread
* thread
= thread_get_current_thread();
2337 ThreadLocker
threadLocker(thread
);
2339 // delete user-defined timers
2340 thread
->DeleteUserTimers(true);
2342 // cancel pre-defined timer
2343 if (UserTimer
* timer
= thread
->UserTimerFor(USER_TIMER_REAL_TIME_ID
))
2346 // reset user_thread and user stack
2347 thread
->user_thread
= NULL
;
2348 thread
->user_stack_area
= -1;
2349 thread
->user_stack_base
= 0;
2350 thread
->user_stack_size
= 0;
2353 thread
->ResetSignalsOnExec();
2355 // reset thread CPU time clock
2356 InterruptsSpinLocker
timeLocker(thread
->time_lock
);
2357 thread
->cpu_clock_offset
= -thread
->CPUTime(false);
2362 allocate_thread_id()
2364 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
2366 // find the next unused ID
2369 id
= sNextThreadID
++;
2371 // deal with integer overflow
2372 if (sNextThreadID
< 0)
2375 // check whether the ID is already in use
2376 } while (sThreadHash
.Lookup(id
, false) != NULL
);
2383 peek_next_thread_id()
2385 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
2386 return sNextThreadID
;
2390 /*! Yield the CPU to other threads.
2391 Thread will continue to run, if there's no other thread in ready
2392 state, and if it has a higher priority than the other ready threads, it
2393 still has a good chance to continue.
2398 Thread
*thread
= thread_get_current_thread();
2402 InterruptsSpinLocker
_(thread
->scheduler_lock
);
2404 thread
->has_yielded
= true;
2405 scheduler_reschedule(B_THREAD_READY
);
2410 thread_map(void (*function
)(Thread
* thread
, void* data
), void* data
)
2412 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
2414 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
2415 Thread
* thread
= it
.Next();) {
2416 function(thread
, data
);
2421 /*! Kernel private thread creation function.
2424 spawn_kernel_thread_etc(thread_func function
, const char *name
, int32 priority
,
2425 void *arg
, team_id team
)
2427 return thread_create_thread(
2428 ThreadCreationAttributes(function
, name
, priority
, arg
, team
),
2434 wait_for_thread_etc(thread_id id
, uint32 flags
, bigtime_t timeout
,
2435 status_t
*_returnCode
)
2438 return B_BAD_THREAD_ID
;
2440 // get the thread, queue our death entry, and fetch the semaphore we have to
2442 sem_id exitSem
= B_BAD_THREAD_ID
;
2443 struct thread_death_entry death
;
2445 Thread
* thread
= Thread::GetAndLock(id
);
2446 if (thread
!= NULL
) {
2447 // remember the semaphore we have to wait on and place our death entry
2448 exitSem
= thread
->exit
.sem
;
2450 list_add_link_to_head(&thread
->exit
.waiters
, &death
);
2452 thread
->UnlockAndReleaseReference();
2455 return B_BAD_THREAD_ID
;
2457 // we couldn't find this thread -- maybe it's already gone, and we'll
2458 // find its death entry in our team
2459 Team
* team
= thread_get_current_thread()->team
;
2460 TeamLocker
teamLocker(team
);
2462 // check the child death entries first (i.e. main threads of child
2465 job_control_entry
* freeDeath
2466 = team_get_death_entry(team
, id
, &deleteEntry
);
2467 if (freeDeath
!= NULL
) {
2468 death
.status
= freeDeath
->status
;
2472 // check the thread death entries of the team (non-main threads)
2473 thread_death_entry
* threadDeathEntry
= NULL
;
2474 while ((threadDeathEntry
= (thread_death_entry
*)list_get_next_item(
2475 &team
->dead_threads
, threadDeathEntry
)) != NULL
) {
2476 if (threadDeathEntry
->thread
== id
) {
2477 list_remove_item(&team
->dead_threads
, threadDeathEntry
);
2478 team
->dead_threads_count
--;
2479 death
.status
= threadDeathEntry
->status
;
2480 free(threadDeathEntry
);
2485 if (threadDeathEntry
== NULL
)
2486 return B_BAD_THREAD_ID
;
2489 // we found the thread's death entry in our team
2491 *_returnCode
= death
.status
;
2496 // we need to wait for the death of the thread
2499 // make sure we don't wait forever on a suspended thread
2501 status_t status
= acquire_sem_etc(exitSem
, 1, flags
, timeout
);
2503 if (status
== B_OK
) {
2504 // this should never happen as the thread deletes the semaphore on exit
2505 panic("could acquire exit_sem for thread %" B_PRId32
"\n", id
);
2506 } else if (status
== B_BAD_SEM_ID
) {
2507 // this is the way the thread normally exits
2510 // We were probably interrupted or the timeout occurred; we need to
2511 // remove our death entry now.
2512 thread
= Thread::GetAndLock(id
);
2513 if (thread
!= NULL
) {
2514 list_remove_link(&death
);
2515 thread
->UnlockAndReleaseReference();
2517 // The thread is already gone, so we need to wait uninterruptibly
2518 // for its exit semaphore to make sure our death entry stays valid.
2519 // It won't take long, since the thread is apparently already in the
2520 // middle of the cleanup.
2521 acquire_sem(exitSem
);
2526 if (status
== B_OK
&& _returnCode
!= NULL
)
2527 *_returnCode
= death
.status
;
2534 select_thread(int32 id
, struct select_info
* info
, bool kernel
)
2536 // get and lock the thread
2537 Thread
* thread
= Thread::GetAndLock(id
);
2539 return B_BAD_THREAD_ID
;
2540 BReference
<Thread
> threadReference(thread
, true);
2541 ThreadLocker
threadLocker(thread
, true);
2543 // We support only B_EVENT_INVALID at the moment.
2544 info
->selected_events
&= B_EVENT_INVALID
;
2547 if (info
->selected_events
!= 0) {
2548 info
->next
= thread
->select_infos
;
2549 thread
->select_infos
= info
;
2551 // we need a sync reference
2552 atomic_add(&info
->sync
->ref_count
, 1);
2560 deselect_thread(int32 id
, struct select_info
* info
, bool kernel
)
2562 // get and lock the thread
2563 Thread
* thread
= Thread::GetAndLock(id
);
2565 return B_BAD_THREAD_ID
;
2566 BReference
<Thread
> threadReference(thread
, true);
2567 ThreadLocker
threadLocker(thread
, true);
2569 // remove info from list
2570 select_info
** infoLocation
= &thread
->select_infos
;
2571 while (*infoLocation
!= NULL
&& *infoLocation
!= info
)
2572 infoLocation
= &(*infoLocation
)->next
;
2574 if (*infoLocation
!= info
)
2577 *infoLocation
= info
->next
;
2579 threadLocker
.Unlock();
2581 // surrender sync reference
2582 put_select_sync(info
->sync
);
2589 thread_max_threads(void)
2596 thread_used_threads(void)
2598 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
2599 return sUsedThreads
;
2603 /*! Returns a user-readable string for a thread state.
2604 Only for use in the kernel debugger.
2607 thread_state_to_text(Thread
* thread
, int32 state
)
2609 return state_to_text(thread
, state
);
2614 thread_get_io_priority(thread_id id
)
2616 Thread
* thread
= Thread::GetAndLock(id
);
2618 return B_BAD_THREAD_ID
;
2619 BReference
<Thread
> threadReference(thread
, true);
2620 ThreadLocker
threadLocker(thread
, true);
2622 int32 priority
= thread
->io_priority
;
2624 // negative I/O priority means using the (CPU) priority
2625 priority
= thread
->priority
;
2633 thread_set_io_priority(int32 priority
)
2635 Thread
* thread
= thread_get_current_thread();
2636 ThreadLocker
threadLocker(thread
);
2638 thread
->io_priority
= priority
;
2643 thread_init(kernel_args
*args
)
2645 TRACE(("thread_init: entry\n"));
2647 // create the thread hash table
2648 new(&sThreadHash
) ThreadHashTable();
2649 if (sThreadHash
.Init(128) != B_OK
)
2650 panic("thread_init(): failed to init thread hash table!");
2652 // create the thread structure object cache
2653 sThreadCache
= create_object_cache("threads", sizeof(Thread
), 16, NULL
,
2655 // Note: The x86 port requires 16 byte alignment of thread structures.
2656 if (sThreadCache
== NULL
)
2657 panic("thread_init(): failed to allocate thread object cache!");
2659 if (arch_thread_init(args
) < B_OK
)
2660 panic("arch_thread_init() failed!\n");
2662 // skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2663 sNextThreadID
= B_SYSTEM_TEAM
+ 1;
2665 // create an idle thread for each cpu
2666 for (uint32 i
= 0; i
< args
->num_cpus
; i
++) {
2671 sprintf(name
, "idle thread %" B_PRIu32
, i
+ 1);
2672 thread
= new(&sIdleThreads
[i
]) Thread(name
,
2673 i
== 0 ? team_get_kernel_team_id() : -1, &gCPU
[i
]);
2674 if (thread
== NULL
|| thread
->Init(true) != B_OK
) {
2675 panic("error creating idle thread struct\n");
2679 gCPU
[i
].running_thread
= thread
;
2681 thread
->team
= team_get_kernel_team();
2682 thread
->priority
= B_IDLE_PRIORITY
;
2683 thread
->state
= B_THREAD_RUNNING
;
2684 sprintf(name
, "idle thread %" B_PRIu32
" kstack", i
+ 1);
2685 thread
->kernel_stack_area
= find_area(name
);
2687 if (get_area_info(thread
->kernel_stack_area
, &info
) != B_OK
)
2688 panic("error finding idle kstack area\n");
2690 thread
->kernel_stack_base
= (addr_t
)info
.address
;
2691 thread
->kernel_stack_top
= thread
->kernel_stack_base
+ info
.size
;
2693 thread
->visible
= true;
2694 insert_thread_into_team(thread
->team
, thread
);
2696 scheduler_on_thread_init(thread
);
2698 sUsedThreads
= args
->num_cpus
;
2700 // init the notification service
2701 new(&sNotificationService
) ThreadNotificationService();
2703 sNotificationService
.Register();
2705 // start the undertaker thread
2706 new(&sUndertakerEntries
) DoublyLinkedList
<UndertakerEntry
>();
2707 sUndertakerCondition
.Init(&sUndertakerEntries
, "undertaker entries");
2709 thread_id undertakerThread
= spawn_kernel_thread(&undertaker
, "undertaker",
2710 B_DISPLAY_PRIORITY
, NULL
);
2711 if (undertakerThread
< 0)
2712 panic("Failed to create undertaker thread!");
2713 resume_thread(undertakerThread
);
2715 // set up some debugger commands
2716 add_debugger_command_etc("threads", &dump_thread_list
, "List all threads",
2718 "Prints a list of all existing threads, or, if a team ID is given,\n"
2719 "all threads of the specified team.\n"
2720 " <team> - The ID of the team whose threads shall be listed.\n", 0);
2721 add_debugger_command_etc("ready", &dump_thread_list
,
2722 "List all ready threads",
2724 "Prints a list of all threads in ready state.\n", 0);
2725 add_debugger_command_etc("running", &dump_thread_list
,
2726 "List all running threads",
2728 "Prints a list of all threads in running state.\n", 0);
2729 add_debugger_command_etc("waiting", &dump_thread_list
,
2730 "List all waiting threads (optionally for a specific semaphore)",
2732 "Prints a list of all threads in waiting state. If a semaphore is\n"
2733 "specified, only the threads waiting on that semaphore are listed.\n"
2734 " <sem> - ID of the semaphore.\n", 0);
2735 add_debugger_command_etc("realtime", &dump_thread_list
,
2736 "List all realtime threads",
2738 "Prints a list of all threads with realtime priority.\n", 0);
2739 add_debugger_command_etc("thread", &dump_thread_info
,
2740 "Dump info about a particular thread",
2741 "[ -s ] ( <id> | <address> | <name> )*\n"
2742 "Prints information about the specified thread. If no argument is\n"
2743 "given the current thread is selected.\n"
2744 " -s - Print info in compact table form (like \"threads\").\n"
2745 " <id> - The ID of the thread.\n"
2746 " <address> - The address of the thread structure.\n"
2747 " <name> - The thread's name.\n", 0);
2748 add_debugger_command_etc("calling", &dump_thread_list
,
2749 "Show all threads that have a specific address in their call chain",
2750 "{ <symbol-pattern> | <start> <end> }\n", 0);
2751 add_debugger_command_etc("unreal", &make_thread_unreal
,
2752 "Set realtime priority threads to normal priority",
2754 "Sets the priority of all realtime threads or, if given, the one\n"
2755 "with the specified ID to \"normal\" priority.\n"
2756 " <id> - The ID of the thread.\n", 0);
2757 add_debugger_command_etc("suspend", &make_thread_suspended
,
2760 "Suspends the thread with the given ID. If no ID argument is given\n"
2761 "the current thread is selected.\n"
2762 " <id> - The ID of the thread.\n", 0);
2763 add_debugger_command_etc("resume", &make_thread_resumed
, "Resume a thread",
2765 "Resumes the specified thread, if it is currently suspended.\n"
2766 " <id> - The ID of the thread.\n", 0);
2767 add_debugger_command_etc("drop", &drop_into_debugger
,
2768 "Drop a thread into the userland debugger",
2770 "Drops the specified (userland) thread into the userland debugger\n"
2771 "after leaving the kernel debugger.\n"
2772 " <id> - The ID of the thread.\n", 0);
2773 add_debugger_command_etc("priority", &set_thread_prio
,
2774 "Set a thread's priority",
2775 "<priority> [ <id> ]\n"
2776 "Sets the priority of the thread with the specified ID to the given\n"
2777 "priority. If no thread ID is given, the current thread is selected.\n"
2778 " <priority> - The thread's new priority (0 - 120)\n"
2779 " <id> - The ID of the thread.\n", 0);
2786 thread_preboot_init_percpu(struct kernel_args
*args
, int32 cpuNum
)
2788 // set up the cpu pointer in the not yet initialized per-cpu idle thread
2789 // so that get_current_cpu and friends will work, which is crucial for
2790 // a lot of low level routines
2791 sIdleThreads
[cpuNum
].cpu
= &gCPU
[cpuNum
];
2792 arch_thread_set_current_thread(&sIdleThreads
[cpuNum
]);
2797 // #pragma mark - thread blocking API
2801 thread_block_timeout(timer
* timer
)
2803 Thread
* thread
= (Thread
*)timer
->user_data
;
2804 thread_unblock(thread
, B_TIMED_OUT
);
2806 return B_HANDLED_INTERRUPT
;
2810 /*! Blocks the current thread.
2812 The thread is blocked until someone else unblock it. Must be called after a
2813 call to thread_prepare_to_block(). If the thread has already been unblocked
2814 after the previous call to thread_prepare_to_block(), this function will
2815 return immediately. Cf. the documentation of thread_prepare_to_block() for
2818 The caller must hold the scheduler lock.
2820 \param thread The current thread.
2821 \return The error code passed to the unblocking function. thread_interrupt()
2822 uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was
2823 successful while another error code indicates a failure (what that means
2824 depends on the client code).
2826 static inline status_t
2827 thread_block_locked(Thread
* thread
)
2829 if (thread
->wait
.status
== 1) {
2830 // check for signals, if interruptible
2831 if (thread_is_interrupted(thread
, thread
->wait
.flags
)) {
2832 thread
->wait
.status
= B_INTERRUPTED
;
2834 scheduler_reschedule(B_THREAD_WAITING
);
2837 return thread
->wait
.status
;
2841 /*! Blocks the current thread.
2843 The function acquires the scheduler lock and calls thread_block_locked().
2844 See there for more information.
2849 InterruptsSpinLocker
_(thread_get_current_thread()->scheduler_lock
);
2850 return thread_block_locked(thread_get_current_thread());
2854 /*! Blocks the current thread with a timeout.
2856 The thread is blocked until someone else unblock it or the specified timeout
2857 occurs. Must be called after a call to thread_prepare_to_block(). If the
2858 thread has already been unblocked after the previous call to
2859 thread_prepare_to_block(), this function will return immediately. See
2860 thread_prepare_to_block() for more details.
2862 The caller must not hold the scheduler lock.
2864 \param thread The current thread.
2865 \param timeoutFlags The standard timeout flags:
2866 - \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait.
2867 - \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when
2868 the timeout shall occur.
2869 - \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT
2870 is specified, too. Specifies that \a timeout is a real time, not a
2872 If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are
2873 specified, an infinite timeout is implied and the function behaves like
2874 thread_block_locked().
2875 \return The error code passed to the unblocking function. thread_interrupt()
2876 uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is
2877 returned. By convention \c B_OK means that the wait was successful while
2878 another error code indicates a failure (what that means depends on the
2882 thread_block_with_timeout(uint32 timeoutFlags
, bigtime_t timeout
)
2884 Thread
* thread
= thread_get_current_thread();
2886 InterruptsSpinLocker
locker(thread
->scheduler_lock
);
2888 if (thread
->wait
.status
!= 1)
2889 return thread
->wait
.status
;
2891 bool useTimer
= (timeoutFlags
& (B_RELATIVE_TIMEOUT
| B_ABSOLUTE_TIMEOUT
))
2892 && timeout
!= B_INFINITE_TIMEOUT
;
2895 // Timer flags: absolute/relative.
2897 if ((timeoutFlags
& B_RELATIVE_TIMEOUT
) != 0) {
2898 timerFlags
= B_ONE_SHOT_RELATIVE_TIMER
;
2900 timerFlags
= B_ONE_SHOT_ABSOLUTE_TIMER
;
2901 if ((timeoutFlags
& B_TIMEOUT_REAL_TIME_BASE
) != 0)
2902 timerFlags
|= B_TIMER_REAL_TIME_BASE
;
2905 // install the timer
2906 thread
->wait
.unblock_timer
.user_data
= thread
;
2907 add_timer(&thread
->wait
.unblock_timer
, &thread_block_timeout
, timeout
,
2912 status_t error
= thread_block_locked(thread
);
2916 // cancel timer, if it didn't fire
2917 if (error
!= B_TIMED_OUT
&& useTimer
)
2918 cancel_timer(&thread
->wait
.unblock_timer
);
2924 /*! Unblocks a thread.
2926 Acquires the scheduler lock and calls thread_unblock_locked().
2927 See there for more information.
2930 thread_unblock(Thread
* thread
, status_t status
)
2932 InterruptsSpinLocker
locker(thread
->scheduler_lock
);
2933 thread_unblock_locked(thread
, status
);
2937 /*! Unblocks a userland-blocked thread.
2938 The caller must not hold any locks.
2941 user_unblock_thread(thread_id threadID
, status_t status
)
2944 Thread
* thread
= Thread::GetAndLock(threadID
);
2946 return B_BAD_THREAD_ID
;
2947 BReference
<Thread
> threadReference(thread
, true);
2948 ThreadLocker
threadLocker(thread
, true);
2950 if (thread
->user_thread
== NULL
)
2951 return B_NOT_ALLOWED
;
2953 InterruptsSpinLocker
locker(thread
->scheduler_lock
);
2955 if (thread
->user_thread
->wait_status
> 0) {
2956 thread
->user_thread
->wait_status
= status
;
2957 thread_unblock_locked(thread
, status
);
2964 // #pragma mark - public kernel API
2968 exit_thread(status_t returnValue
)
2970 Thread
*thread
= thread_get_current_thread();
2971 Team
* team
= thread
->team
;
2973 thread
->exit
.status
= returnValue
;
2975 // if called from a kernel thread, we don't deliver the signal,
2976 // we just exit directly to keep the user space behaviour of
2978 if (team
!= team_get_kernel_team()) {
2979 // If this is its main thread, set the team's exit status.
2980 if (thread
== team
->main_thread
) {
2981 TeamLocker
teamLocker(team
);
2983 if (!team
->exit
.initialized
) {
2984 team
->exit
.reason
= CLD_EXITED
;
2985 team
->exit
.signal
= 0;
2986 team
->exit
.signaling_user
= 0;
2987 team
->exit
.status
= returnValue
;
2988 team
->exit
.initialized
= true;
2991 teamLocker
.Unlock();
2994 Signal
signal(SIGKILLTHR
, SI_USER
, B_OK
, team
->id
);
2995 send_signal_to_thread(thread
, signal
, B_DO_NOT_RESCHEDULE
);
3002 kill_thread(thread_id id
)
3007 Thread
* currentThread
= thread_get_current_thread();
3009 Signal
signal(SIGKILLTHR
, SI_USER
, B_OK
, currentThread
->team
->id
);
3010 return send_signal_to_thread_id(id
, signal
, 0);
3015 send_data(thread_id thread
, int32 code
, const void *buffer
, size_t bufferSize
)
3017 return send_data_etc(thread
, code
, buffer
, bufferSize
, 0);
3022 receive_data(thread_id
*sender
, void *buffer
, size_t bufferSize
)
3024 return receive_data_etc(sender
, buffer
, bufferSize
, 0);
3029 has_data(thread_id thread
)
3031 // TODO: The thread argument is ignored.
3034 if (get_sem_count(thread_get_current_thread()->msg
.read_sem
,
3038 return count
== 0 ? false : true;
3043 _get_thread_info(thread_id id
, thread_info
*info
, size_t size
)
3045 if (info
== NULL
|| size
!= sizeof(thread_info
) || id
< B_OK
)
3049 Thread
* thread
= Thread::GetAndLock(id
);
3051 return B_BAD_THREAD_ID
;
3052 BReference
<Thread
> threadReference(thread
, true);
3053 ThreadLocker
threadLocker(thread
, true);
3055 // fill the info -- also requires the scheduler lock to be held
3056 InterruptsSpinLocker
locker(thread
->scheduler_lock
);
3058 fill_thread_info(thread
, info
, size
);
3065 _get_next_thread_info(team_id teamID
, int32
*_cookie
, thread_info
*info
,
3068 if (info
== NULL
|| size
!= sizeof(thread_info
) || teamID
< 0)
3071 int32 lastID
= *_cookie
;
3074 Team
* team
= Team::GetAndLock(teamID
);
3077 BReference
<Team
> teamReference(team
, true);
3078 TeamLocker
teamLocker(team
, true);
3080 Thread
* thread
= NULL
;
3083 // We start with the main thread
3084 thread
= team
->main_thread
;
3086 // Find the one thread with an ID greater than ours (as long as the IDs
3087 // don't wrap they are always sorted from highest to lowest).
3088 // TODO: That is broken not only when the IDs wrap, but also for the
3089 // kernel team, to which threads are added when they are dying.
3090 for (Thread
* next
= team
->thread_list
; next
!= NULL
;
3091 next
= next
->team_next
) {
3092 if (next
->id
<= lastID
)
3102 lastID
= thread
->id
;
3105 ThreadLocker
threadLocker(thread
);
3106 InterruptsSpinLocker
locker(thread
->scheduler_lock
);
3108 fill_thread_info(thread
, info
, size
);
3115 find_thread(const char* name
)
3118 return thread_get_current_thread_id();
3120 InterruptsSpinLocker
threadHashLocker(sThreadHashLock
);
3122 // TODO: Scanning the whole hash with the thread hash lock held isn't
3123 // exactly cheap -- although this function is probably used very rarely.
3125 for (ThreadHashTable::Iterator it
= sThreadHash
.GetIterator();
3126 Thread
* thread
= it
.Next();) {
3127 if (!thread
->visible
)
3130 if (strcmp(thread
->name
, name
) == 0)
3134 return B_NAME_NOT_FOUND
;
3139 rename_thread(thread_id id
, const char* name
)
3145 Thread
* thread
= Thread::GetAndLock(id
);
3147 return B_BAD_THREAD_ID
;
3148 BReference
<Thread
> threadReference(thread
, true);
3149 ThreadLocker
threadLocker(thread
, true);
3151 // check whether the operation is allowed
3152 if (thread
->team
!= thread_get_current_thread()->team
)
3153 return B_NOT_ALLOWED
;
3155 strlcpy(thread
->name
, name
, B_OS_NAME_LENGTH
);
3157 team_id teamID
= thread
->team
->id
;
3159 threadLocker
.Unlock();
3162 sNotificationService
.Notify(THREAD_NAME_CHANGED
, teamID
, id
);
3163 // don't pass the thread structure, as it's unsafe, if it isn't ours
3170 set_thread_priority(thread_id id
, int32 priority
)
3172 // make sure the passed in priority is within bounds
3173 if (priority
> THREAD_MAX_SET_PRIORITY
)
3174 priority
= THREAD_MAX_SET_PRIORITY
;
3175 if (priority
< THREAD_MIN_SET_PRIORITY
)
3176 priority
= THREAD_MIN_SET_PRIORITY
;
3179 Thread
* thread
= Thread::GetAndLock(id
);
3181 return B_BAD_THREAD_ID
;
3182 BReference
<Thread
> threadReference(thread
, true);
3183 ThreadLocker
threadLocker(thread
, true);
3185 // check whether the change is allowed
3186 if (thread_is_idle_thread(thread
))
3187 return B_NOT_ALLOWED
;
3189 return scheduler_set_thread_priority(thread
, priority
);
3194 snooze_etc(bigtime_t timeout
, int timebase
, uint32 flags
)
3196 return common_snooze_etc(timeout
, timebase
, flags
, NULL
);
3200 /*! snooze() for internal kernel use only; doesn't interrupt on signals. */
3202 snooze(bigtime_t timeout
)
3204 return snooze_etc(timeout
, B_SYSTEM_TIMEBASE
, B_RELATIVE_TIMEOUT
);
3208 /*! snooze_until() for internal kernel use only; doesn't interrupt on
3212 snooze_until(bigtime_t timeout
, int timebase
)
3214 return snooze_etc(timeout
, timebase
, B_ABSOLUTE_TIMEOUT
);
3219 wait_for_thread(thread_id thread
, status_t
*_returnCode
)
3221 return wait_for_thread_etc(thread
, 0, 0, _returnCode
);
3226 suspend_thread(thread_id id
)
3231 Thread
* currentThread
= thread_get_current_thread();
3233 Signal
signal(SIGSTOP
, SI_USER
, B_OK
, currentThread
->team
->id
);
3234 return send_signal_to_thread_id(id
, signal
, 0);
3239 resume_thread(thread_id id
)
3244 Thread
* currentThread
= thread_get_current_thread();
3246 // Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains
3247 // compatibility to BeOS which documents the combination of suspend_thread()
3248 // and resume_thread() to interrupt threads waiting on semaphores.
3249 Signal
signal(SIGNAL_CONTINUE_THREAD
, SI_USER
, B_OK
,
3250 currentThread
->team
->id
);
3251 return send_signal_to_thread_id(id
, signal
, 0);
3256 spawn_kernel_thread(thread_func function
, const char *name
, int32 priority
,
3259 return thread_create_thread(
3260 ThreadCreationAttributes(function
, name
, priority
, arg
),
3266 getrlimit(int resource
, struct rlimit
* rlp
)
3268 status_t error
= common_getrlimit(resource
, rlp
);
3269 if (error
!= B_OK
) {
3279 setrlimit(int resource
, const struct rlimit
* rlp
)
3281 status_t error
= common_setrlimit(resource
, rlp
);
3282 if (error
!= B_OK
) {
3291 // #pragma mark - syscalls
3295 _user_exit_thread(status_t returnValue
)
3297 exit_thread(returnValue
);
3302 _user_kill_thread(thread_id thread
)
3304 // TODO: Don't allow kernel threads to be killed!
3305 return kill_thread(thread
);
3310 _user_cancel_thread(thread_id threadID
, void (*cancelFunction
)(int))
3312 // check the cancel function
3313 if (cancelFunction
== NULL
|| !IS_USER_ADDRESS(cancelFunction
))
3316 // get and lock the thread
3317 Thread
* thread
= Thread::GetAndLock(threadID
);
3319 return B_BAD_THREAD_ID
;
3320 BReference
<Thread
> threadReference(thread
, true);
3321 ThreadLocker
threadLocker(thread
, true);
3323 // only threads of the same team can be canceled
3324 if (thread
->team
!= thread_get_current_thread()->team
)
3325 return B_NOT_ALLOWED
;
3327 // set the cancel function
3328 thread
->cancel_function
= cancelFunction
;
3330 // send the cancellation signal to the thread
3331 InterruptsReadSpinLocker
teamLocker(thread
->team_lock
);
3332 SpinLocker
locker(thread
->team
->signal_lock
);
3333 return send_signal_to_thread_locked(thread
, SIGNAL_CANCEL_THREAD
, NULL
, 0);
3338 _user_resume_thread(thread_id thread
)
3340 // TODO: Don't allow kernel threads to be resumed!
3341 return resume_thread(thread
);
3346 _user_suspend_thread(thread_id thread
)
3348 // TODO: Don't allow kernel threads to be suspended!
3349 return suspend_thread(thread
);
3354 _user_rename_thread(thread_id thread
, const char *userName
)
3356 char name
[B_OS_NAME_LENGTH
];
3358 if (!IS_USER_ADDRESS(userName
)
3360 || user_strlcpy(name
, userName
, B_OS_NAME_LENGTH
) < B_OK
)
3361 return B_BAD_ADDRESS
;
3363 // TODO: Don't allow kernel threads to be renamed!
3364 return rename_thread(thread
, name
);
3369 _user_set_thread_priority(thread_id thread
, int32 newPriority
)
3371 // TODO: Don't allow setting priority of kernel threads!
3372 return set_thread_priority(thread
, newPriority
);
3377 _user_spawn_thread(thread_creation_attributes
* userAttributes
)
3379 // copy the userland structure to the kernel
3380 char nameBuffer
[B_OS_NAME_LENGTH
];
3381 ThreadCreationAttributes attributes
;
3382 status_t error
= attributes
.InitFromUserAttributes(userAttributes
,
3387 // create the thread
3388 thread_id threadID
= thread_create_thread(attributes
, false);
3391 user_debug_thread_created(threadID
);
3398 _user_snooze_etc(bigtime_t timeout
, int timebase
, uint32 flags
,
3399 bigtime_t
* userRemainingTime
)
3401 // We need to store more syscall restart parameters than usual and need a
3402 // somewhat different handling. Hence we can't use
3403 // syscall_restart_handle_timeout_pre() but do the job ourselves.
3404 struct restart_parameters
{
3410 Thread
* thread
= thread_get_current_thread();
3412 if ((thread
->flags
& THREAD_FLAGS_SYSCALL_RESTARTED
) != 0) {
3413 // The syscall was restarted. Fetch the parameters from the stored
3414 // restart parameters.
3415 restart_parameters
* restartParameters
3416 = (restart_parameters
*)thread
->syscall_restart
.parameters
;
3417 timeout
= restartParameters
->timeout
;
3418 timebase
= restartParameters
->timebase
;
3419 flags
= restartParameters
->flags
;
3421 // convert relative timeouts to absolute ones
3422 if ((flags
& B_RELATIVE_TIMEOUT
) != 0) {
3423 // not restarted yet and the flags indicate a relative timeout
3425 // Make sure we use the system time base, so real-time clock changes
3426 // won't affect our wait.
3427 flags
&= ~(uint32
)B_TIMEOUT_REAL_TIME_BASE
;
3428 if (timebase
== CLOCK_REALTIME
)
3429 timebase
= CLOCK_MONOTONIC
;
3431 // get the current time and make the timeout absolute
3433 status_t error
= user_timer_get_clock(timebase
, now
);
3439 // deal with overflow
3441 timeout
= B_INFINITE_TIMEOUT
;
3443 flags
= (flags
& ~B_RELATIVE_TIMEOUT
) | B_ABSOLUTE_TIMEOUT
;
3445 flags
|= B_ABSOLUTE_TIMEOUT
;
3449 bigtime_t remainingTime
;
3450 status_t error
= common_snooze_etc(timeout
, timebase
,
3451 flags
| B_CAN_INTERRUPT
| B_CHECK_PERMISSION
,
3452 userRemainingTime
!= NULL
? &remainingTime
: NULL
);
3454 // If interrupted, copy the remaining time back to userland and prepare the
3456 if (error
== B_INTERRUPTED
) {
3457 if (userRemainingTime
!= NULL
3458 && (!IS_USER_ADDRESS(userRemainingTime
)
3459 || user_memcpy(userRemainingTime
, &remainingTime
,
3460 sizeof(remainingTime
)) != B_OK
)) {
3461 return B_BAD_ADDRESS
;
3464 // store the normalized values in the restart parameters
3465 restart_parameters
* restartParameters
3466 = (restart_parameters
*)thread
->syscall_restart
.parameters
;
3467 restartParameters
->timeout
= timeout
;
3468 restartParameters
->timebase
= timebase
;
3469 restartParameters
->flags
= flags
;
3471 // restart the syscall, if possible
3472 atomic_or(&thread
->flags
, THREAD_FLAGS_RESTART_SYSCALL
);
3480 _user_thread_yield(void)
3487 _user_get_thread_info(thread_id id
, thread_info
*userInfo
)
3492 if (!IS_USER_ADDRESS(userInfo
))
3493 return B_BAD_ADDRESS
;
3495 status
= _get_thread_info(id
, &info
, sizeof(thread_info
));
3498 && user_memcpy(userInfo
, &info
, sizeof(thread_info
)) < B_OK
)
3499 return B_BAD_ADDRESS
;
3506 _user_get_next_thread_info(team_id team
, int32
*userCookie
,
3507 thread_info
*userInfo
)
3513 if (!IS_USER_ADDRESS(userCookie
) || !IS_USER_ADDRESS(userInfo
)
3514 || user_memcpy(&cookie
, userCookie
, sizeof(int32
)) < B_OK
)
3515 return B_BAD_ADDRESS
;
3517 status
= _get_next_thread_info(team
, &cookie
, &info
, sizeof(thread_info
));
3521 if (user_memcpy(userCookie
, &cookie
, sizeof(int32
)) < B_OK
3522 || user_memcpy(userInfo
, &info
, sizeof(thread_info
)) < B_OK
)
3523 return B_BAD_ADDRESS
;
3530 _user_find_thread(const char *userName
)
3532 char name
[B_OS_NAME_LENGTH
];
3534 if (userName
== NULL
)
3535 return find_thread(NULL
);
3537 if (!IS_USER_ADDRESS(userName
)
3538 || user_strlcpy(name
, userName
, sizeof(name
)) < B_OK
)
3539 return B_BAD_ADDRESS
;
3541 return find_thread(name
);
3546 _user_wait_for_thread(thread_id id
, status_t
*userReturnCode
)
3548 status_t returnCode
;
3551 if (userReturnCode
!= NULL
&& !IS_USER_ADDRESS(userReturnCode
))
3552 return B_BAD_ADDRESS
;
3554 status
= wait_for_thread_etc(id
, B_CAN_INTERRUPT
, 0, &returnCode
);
3556 if (status
== B_OK
&& userReturnCode
!= NULL
3557 && user_memcpy(userReturnCode
, &returnCode
, sizeof(status_t
)) < B_OK
) {
3558 return B_BAD_ADDRESS
;
3561 return syscall_restart_handle_post(status
);
3566 _user_has_data(thread_id thread
)
3568 return has_data(thread
);
3573 _user_send_data(thread_id thread
, int32 code
, const void *buffer
,
3576 if (!IS_USER_ADDRESS(buffer
))
3577 return B_BAD_ADDRESS
;
3579 return send_data_etc(thread
, code
, buffer
, bufferSize
,
3580 B_KILL_CAN_INTERRUPT
);
3581 // supports userland buffers
3586 _user_receive_data(thread_id
*_userSender
, void *buffer
, size_t bufferSize
)
3591 if ((!IS_USER_ADDRESS(_userSender
) && _userSender
!= NULL
)
3592 || !IS_USER_ADDRESS(buffer
))
3593 return B_BAD_ADDRESS
;
3595 code
= receive_data_etc(&sender
, buffer
, bufferSize
, B_KILL_CAN_INTERRUPT
);
3596 // supports userland buffers
3598 if (_userSender
!= NULL
)
3599 if (user_memcpy(_userSender
, &sender
, sizeof(thread_id
)) < B_OK
)
3600 return B_BAD_ADDRESS
;
3607 _user_block_thread(uint32 flags
, bigtime_t timeout
)
3609 syscall_restart_handle_timeout_pre(flags
, timeout
);
3610 flags
|= B_CAN_INTERRUPT
;
3612 Thread
* thread
= thread_get_current_thread();
3613 ThreadLocker
threadLocker(thread
);
3615 // check, if already done
3616 if (thread
->user_thread
->wait_status
<= 0)
3617 return thread
->user_thread
->wait_status
;
3620 thread_prepare_to_block(thread
, flags
, THREAD_BLOCK_TYPE_OTHER
, "user");
3622 threadLocker
.Unlock();
3624 status_t status
= thread_block_with_timeout(flags
, timeout
);
3626 threadLocker
.Lock();
3628 // Interruptions or timeouts can race with other threads unblocking us.
3629 // Favor a wake-up by another thread, i.e. if someone changed the wait
3630 // status, use that.
3631 status_t oldStatus
= thread
->user_thread
->wait_status
;
3633 thread
->user_thread
->wait_status
= status
;
3637 threadLocker
.Unlock();
3639 return syscall_restart_handle_timeout_post(status
, timeout
);
3644 _user_unblock_thread(thread_id threadID
, status_t status
)
3646 status_t error
= user_unblock_thread(threadID
, status
);
3649 scheduler_reschedule_if_necessary();
3656 _user_unblock_threads(thread_id
* userThreads
, uint32 count
, status_t status
)
3659 MAX_USER_THREADS_TO_UNBLOCK
= 128
3662 if (userThreads
== NULL
|| !IS_USER_ADDRESS(userThreads
))
3663 return B_BAD_ADDRESS
;
3664 if (count
> MAX_USER_THREADS_TO_UNBLOCK
)
3667 thread_id threads
[MAX_USER_THREADS_TO_UNBLOCK
];
3668 if (user_memcpy(threads
, userThreads
, count
* sizeof(thread_id
)) != B_OK
)
3669 return B_BAD_ADDRESS
;
3671 for (uint32 i
= 0; i
< count
; i
++)
3672 user_unblock_thread(threads
[i
], status
);
3674 scheduler_reschedule_if_necessary();
3680 // TODO: the following two functions don't belong here
3684 _user_getrlimit(int resource
, struct rlimit
*urlp
)
3692 if (!IS_USER_ADDRESS(urlp
))
3693 return B_BAD_ADDRESS
;
3695 ret
= common_getrlimit(resource
, &rl
);
3698 ret
= user_memcpy(urlp
, &rl
, sizeof(struct rlimit
));
3710 _user_setrlimit(int resource
, const struct rlimit
*userResourceLimit
)
3712 struct rlimit resourceLimit
;
3714 if (userResourceLimit
== NULL
)
3717 if (!IS_USER_ADDRESS(userResourceLimit
)
3718 || user_memcpy(&resourceLimit
, userResourceLimit
,
3719 sizeof(struct rlimit
)) < B_OK
)
3720 return B_BAD_ADDRESS
;
3722 return common_setrlimit(resource
, &resourceLimit
);