btrfs: [] on the end of a struct field is a variable length array.
[haiku.git] / headers / private / kernel / thread_types.h
blob720a221c17233723bf79f1202489323882e92253
1 /*
2 * Copyright 2004-2016, Haiku, Inc.
3 * Distributed under the terms of the MIT License.
5 * Thread definition and structures
6 */
7 #ifndef _KERNEL_THREAD_TYPES_H
8 #define _KERNEL_THREAD_TYPES_H
11 #ifndef _ASSEMBLER
13 #include <pthread.h>
15 #include <arch/thread_types.h>
16 #include <condition_variable.h>
17 #include <heap.h>
18 #include <ksignal.h>
19 #include <lock.h>
20 #include <smp.h>
21 #include <thread_defs.h>
22 #include <timer.h>
23 #include <UserTimer.h>
24 #include <user_debugger.h>
25 #include <util/DoublyLinkedList.h>
26 #include <util/KernelReferenceable.h>
27 #include <util/list.h>
30 enum additional_thread_state {
31 THREAD_STATE_FREE_ON_RESCHED = 7, // free the thread structure upon reschedule
32 // THREAD_STATE_BIRTH // thread is being created
35 #define THREAD_MIN_SET_PRIORITY B_LOWEST_ACTIVE_PRIORITY
36 #define THREAD_MAX_SET_PRIORITY B_REAL_TIME_PRIORITY
38 enum team_state {
39 TEAM_STATE_NORMAL, // normal state
40 TEAM_STATE_BIRTH, // being constructed
41 TEAM_STATE_SHUTDOWN, // still lives, but is going down
42 TEAM_STATE_DEATH // only the Team object still exists, threads are
43 // gone
46 #define TEAM_FLAG_EXEC_DONE 0x01
47 // team has executed exec*()
48 #define TEAM_FLAG_DUMP_CORE 0x02
49 // a core dump is in progress
51 typedef enum job_control_state {
52 JOB_CONTROL_STATE_NONE,
53 JOB_CONTROL_STATE_STOPPED,
54 JOB_CONTROL_STATE_CONTINUED,
55 JOB_CONTROL_STATE_DEAD
56 } job_control_state;
59 struct cpu_ent;
60 struct image; // defined in image.c
61 struct io_context;
62 struct realtime_sem_context; // defined in realtime_sem.cpp
63 struct select_info;
64 struct user_thread; // defined in libroot/user_thread.h
65 struct VMAddressSpace;
66 struct xsi_sem_context; // defined in xsi_semaphore.cpp
68 namespace Scheduler {
69 struct ThreadData;
72 namespace BKernel {
73 struct Team;
74 struct Thread;
75 struct ProcessGroup;
79 struct thread_death_entry {
80 struct list_link link;
81 thread_id thread;
82 status_t status;
85 struct team_loading_info {
86 Thread* thread; // the waiting thread
87 status_t result; // the result of the loading
88 bool done; // set when loading is done/aborted
91 struct team_watcher {
92 struct list_link link;
93 void (*hook)(team_id team, void *data);
94 void *data;
98 #define MAX_DEAD_CHILDREN 32
99 // this is a soft limit for the number of child death entries in a team
100 #define MAX_DEAD_THREADS 32
101 // this is a soft limit for the number of thread death entries in a team
104 struct job_control_entry : DoublyLinkedListLinkImpl<job_control_entry> {
105 job_control_state state; // current team job control state
106 thread_id thread; // main thread ID == team ID
107 uint16 signal; // signal causing the current state
108 bool has_group_ref;
109 uid_t signaling_user;
111 // valid while state != JOB_CONTROL_STATE_DEAD
112 BKernel::Team* team;
114 // valid when state == JOB_CONTROL_STATE_DEAD
115 pid_t group_id;
116 status_t status;
117 uint16 reason; // reason for the team's demise, one of the
118 // CLD_* values defined in <signal.h>
119 bigtime_t user_time;
120 bigtime_t kernel_time;
122 job_control_entry();
123 ~job_control_entry();
125 void InitDeadState();
127 job_control_entry& operator=(const job_control_entry& other);
130 typedef DoublyLinkedList<job_control_entry> JobControlEntryList;
132 struct team_job_control_children {
133 JobControlEntryList entries;
136 struct team_dead_children : team_job_control_children {
137 ConditionVariable condition_variable;
138 uint32 count;
139 bigtime_t kernel_time;
140 bigtime_t user_time;
144 struct team_death_entry {
145 int32 remaining_threads;
146 ConditionVariable condition;
150 struct free_user_thread {
151 struct free_user_thread* next;
152 struct user_thread* thread;
156 class AssociatedDataOwner;
158 class AssociatedData : public BReferenceable,
159 public DoublyLinkedListLinkImpl<AssociatedData> {
160 public:
161 AssociatedData();
162 virtual ~AssociatedData();
164 AssociatedDataOwner* Owner() const
165 { return fOwner; }
166 void SetOwner(AssociatedDataOwner* owner)
167 { fOwner = owner; }
169 virtual void OwnerDeleted(AssociatedDataOwner* owner);
171 private:
172 AssociatedDataOwner* fOwner;
176 class AssociatedDataOwner {
177 public:
178 AssociatedDataOwner();
179 ~AssociatedDataOwner();
181 bool AddData(AssociatedData* data);
182 bool RemoveData(AssociatedData* data);
184 void PrepareForDeletion();
186 private:
187 typedef DoublyLinkedList<AssociatedData> DataList;
189 private:
191 mutex fLock;
192 DataList fList;
196 typedef int32 (*thread_entry_func)(thread_func, void *);
199 namespace BKernel {
202 template<typename IDType>
203 struct TeamThreadIteratorEntry
204 : DoublyLinkedListLinkImpl<TeamThreadIteratorEntry<IDType> > {
205 typedef IDType id_type;
206 typedef TeamThreadIteratorEntry<id_type> iterator_type;
208 id_type id; // -1 for iterator entries, >= 0 for actual elements
209 bool visible; // the entry is publicly visible
213 struct Team : TeamThreadIteratorEntry<team_id>, KernelReferenceable,
214 AssociatedDataOwner {
215 DoublyLinkedListLink<Team> global_list_link;
216 Team *hash_next; // next in hash
217 Team *siblings_next; // next in parent's list; protected by
218 // parent's fLock
219 Team *parent; // write-protected by both parent (if any)
220 // and this team's fLock
221 Team *children; // protected by this team's fLock;
222 // adding/removing a child also requires the
223 // child's fLock
224 Team *group_next; // protected by the group's lock
226 int64 serial_number; // immutable after adding team to hash
228 // process group info -- write-protected by both the group's lock, the
229 // team's lock, and the team's parent's lock
230 pid_t group_id;
231 pid_t session_id;
232 ProcessGroup *group;
234 int num_threads; // number of threads in this team
235 int state; // current team state, see above
236 int32 flags;
237 struct io_context *io_context;
238 struct realtime_sem_context *realtime_sem_context;
239 struct xsi_sem_context *xsi_sem_context;
240 struct team_death_entry *death_entry; // protected by fLock
241 struct list dead_threads;
242 int dead_threads_count;
244 // protected by the team's fLock
245 team_dead_children dead_children;
246 team_job_control_children stopped_children;
247 team_job_control_children continued_children;
249 // protected by the parent team's fLock
250 struct job_control_entry* job_control_entry;
252 VMAddressSpace *address_space;
253 Thread *main_thread; // protected by fLock, immutable
254 // after first set
255 Thread *thread_list; // protected by fLock, signal_lock and
256 // gThreadCreationLock
257 struct team_loading_info *loading_info; // protected by fLock
258 struct list image_list; // protected by sImageMutex
259 struct list watcher_list;
260 struct list sem_list; // protected by sSemsSpinlock
261 struct list port_list; // protected by sPortsLock
262 struct arch_team arch_info;
264 addr_t user_data;
265 area_id user_data_area;
266 size_t user_data_size;
267 size_t used_user_data;
268 struct free_user_thread* free_user_threads;
270 void* commpage_address;
272 struct team_debug_info debug_info;
274 // protected by time_lock
275 bigtime_t dead_threads_kernel_time;
276 bigtime_t dead_threads_user_time;
277 bigtime_t cpu_clock_offset;
278 spinlock time_lock;
280 // user group information; protected by fLock
281 uid_t saved_set_uid;
282 uid_t real_uid;
283 uid_t effective_uid;
284 gid_t saved_set_gid;
285 gid_t real_gid;
286 gid_t effective_gid;
287 gid_t* supplementary_groups;
288 int supplementary_group_count;
290 // Exit status information. Set when the first terminal event occurs,
291 // immutable afterwards. Protected by fLock.
292 struct {
293 uint16 reason; // reason for the team's demise, one of the
294 // CLD_* values defined in <signal.h>
295 uint16 signal; // signal killing the team
296 uid_t signaling_user; // real UID of the signal sender
297 status_t status; // exit status, if normal team exit
298 bool initialized; // true when the state has been initialized
299 } exit;
301 spinlock signal_lock;
303 public:
304 ~Team();
306 static Team* Create(team_id id, const char* name,
307 bool kernel);
308 static Team* Get(team_id id);
309 static Team* GetAndLock(team_id id);
311 bool Lock()
312 { mutex_lock(&fLock); return true; }
313 bool TryLock()
314 { return mutex_trylock(&fLock) == B_OK; }
315 void Unlock()
316 { mutex_unlock(&fLock); }
318 void UnlockAndReleaseReference()
319 { Unlock(); ReleaseReference(); }
321 void LockTeamAndParent(bool dontLockParentIfKernel);
322 void UnlockTeamAndParent();
323 void LockTeamAndProcessGroup();
324 void UnlockTeamAndProcessGroup();
325 void LockTeamParentAndProcessGroup();
326 void UnlockTeamParentAndProcessGroup();
327 void LockProcessGroup()
328 { LockTeamAndProcessGroup(); Unlock(); }
330 const char* Name() const { return fName; }
331 void SetName(const char* name);
333 const char* Args() const { return fArgs; }
334 void SetArgs(const char* args);
335 void SetArgs(const char* path,
336 const char* const* otherArgs,
337 int otherArgCount);
339 BKernel::QueuedSignalsCounter* QueuedSignalsCounter() const
340 { return fQueuedSignalsCounter; }
341 sigset_t PendingSignals() const
342 { return fPendingSignals.AllSignals(); }
344 void AddPendingSignal(int signal)
345 { fPendingSignals.AddSignal(signal); }
346 void AddPendingSignal(Signal* signal)
347 { fPendingSignals.AddSignal(signal); }
348 void RemovePendingSignal(int signal)
349 { fPendingSignals.RemoveSignal(signal); }
350 void RemovePendingSignal(Signal* signal)
351 { fPendingSignals.RemoveSignal(signal); }
352 void RemovePendingSignals(sigset_t mask)
353 { fPendingSignals.RemoveSignals(mask); }
354 void ResetSignalsOnExec();
356 inline int32 HighestPendingSignalPriority(
357 sigset_t nonBlocked) const;
358 inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
359 Signal& buffer);
361 struct sigaction& SignalActionFor(int32 signal)
362 { return fSignalActions[signal - 1]; }
363 void InheritSignalActions(Team* parent);
365 // user timers -- protected by fLock
366 UserTimer* UserTimerFor(int32 id) const
367 { return fUserTimers.TimerFor(id); }
368 status_t AddUserTimer(UserTimer* timer);
369 void RemoveUserTimer(UserTimer* timer);
370 void DeleteUserTimers(bool userDefinedOnly);
372 bool CheckAddUserDefinedTimer();
373 void UserDefinedTimersRemoved(int32 count);
375 void UserTimerActivated(TeamTimeUserTimer* timer)
376 { fCPUTimeUserTimers.Add(timer); }
377 void UserTimerActivated(TeamUserTimeUserTimer* timer)
378 { fUserTimeUserTimers.Add(timer); }
379 void UserTimerDeactivated(TeamTimeUserTimer* timer)
380 { fCPUTimeUserTimers.Remove(timer); }
381 void UserTimerDeactivated(
382 TeamUserTimeUserTimer* timer)
383 { fUserTimeUserTimers.Remove(timer); }
384 void DeactivateCPUTimeUserTimers();
385 // both total and user CPU timers
386 bool HasActiveCPUTimeUserTimers() const
387 { return !fCPUTimeUserTimers.IsEmpty(); }
388 bool HasActiveUserTimeUserTimers() const
389 { return !fUserTimeUserTimers.IsEmpty(); }
390 TeamTimeUserTimerList::ConstIterator
391 CPUTimeUserTimerIterator() const
392 { return fCPUTimeUserTimers.GetIterator(); }
393 inline TeamUserTimeUserTimerList::ConstIterator
394 UserTimeUserTimerIterator() const;
396 bigtime_t CPUTime(bool ignoreCurrentRun,
397 Thread* lockedThread = NULL) const;
398 bigtime_t UserCPUTime() const;
400 ConditionVariable* CoreDumpCondition() const
401 { return fCoreDumpCondition; }
402 void SetCoreDumpCondition(
403 ConditionVariable* condition)
404 { fCoreDumpCondition = condition; }
405 private:
406 Team(team_id id, bool kernel);
408 private:
409 mutex fLock;
410 char fName[B_OS_NAME_LENGTH];
411 char fArgs[64];
412 // contents for the team_info::args field
414 BKernel::QueuedSignalsCounter* fQueuedSignalsCounter;
415 BKernel::PendingSignals fPendingSignals;
416 // protected by signal_lock
417 struct sigaction fSignalActions[MAX_SIGNAL_NUMBER];
418 // indexed signal - 1, protected by fLock
420 UserTimerList fUserTimers; // protected by fLock
421 TeamTimeUserTimerList fCPUTimeUserTimers;
422 // protected by scheduler lock
423 TeamUserTimeUserTimerList fUserTimeUserTimers;
424 int32 fUserDefinedTimerCount; // accessed atomically
426 ConditionVariable* fCoreDumpCondition;
427 // protected by fLock
431 struct Thread : TeamThreadIteratorEntry<thread_id>, KernelReferenceable {
432 int32 flags; // summary of events relevant in interrupt
433 // handlers (signals pending, user debugging
434 // enabled, etc.)
435 int64 serial_number; // immutable after adding thread to hash
436 Thread *hash_next; // protected by thread hash lock
437 Thread *team_next; // protected by team lock and fLock
438 char name[B_OS_NAME_LENGTH]; // protected by fLock
439 bool going_to_suspend; // protected by scheduler lock
440 int32 priority; // protected by scheduler lock
441 int32 io_priority; // protected by fLock
442 int32 state; // protected by scheduler lock
443 struct cpu_ent *cpu; // protected by scheduler lock
444 struct cpu_ent *previous_cpu; // protected by scheduler lock
445 int32 pinned_to_cpu; // only accessed by this thread or in the
446 // scheduler, when thread is not running
447 spinlock scheduler_lock;
449 sigset_t sig_block_mask; // protected by team->signal_lock,
450 // only modified by the thread itself
451 sigset_t sigsuspend_original_unblocked_mask;
452 // non-0 after a return from _user_sigsuspend(), containing the inverted
453 // original signal mask, reset in handle_signals(); only accessed by
454 // this thread
455 ucontext_t* user_signal_context; // only accessed by this thread
456 addr_t signal_stack_base; // only accessed by this thread
457 size_t signal_stack_size; // only accessed by this thread
458 bool signal_stack_enabled; // only accessed by this thread
460 bool in_kernel; // protected by time_lock, only written by
461 // this thread
462 bool has_yielded; // protected by scheduler lock
463 Scheduler::ThreadData* scheduler_data; // protected by scheduler lock
465 struct user_thread* user_thread; // write-protected by fLock, only
466 // modified by the thread itself and
467 // thus freely readable by it
469 void (*cancel_function)(int);
471 struct {
472 uint8 parameters[SYSCALL_RESTART_PARAMETER_SIZE];
473 } syscall_restart;
475 struct {
476 status_t status; // current wait status
477 uint32 flags; // interrupable flags
478 uint32 type; // type of the object waited on
479 const void* object; // pointer to the object waited on
480 timer unblock_timer; // timer for block with timeout
481 } wait;
483 struct PrivateConditionVariableEntry *condition_variable_entry;
485 struct {
486 sem_id write_sem; // acquired by writers before writing
487 sem_id read_sem; // release by writers after writing, acquired
488 // by this thread when reading
489 thread_id sender;
490 int32 code;
491 size_t size;
492 void* buffer;
493 } msg; // write_sem/read_sem are protected by fLock when accessed by
494 // others, the other fields are protected by write_sem/read_sem
496 void (*fault_handler)(void);
497 jmp_buf fault_handler_state;
498 int32 page_faults_allowed;
499 /* this field may only stay in debug builds in the future */
501 BKernel::Team *team; // protected by team lock, thread lock, scheduler
502 // lock, team_lock
503 rw_spinlock team_lock;
505 struct {
506 sem_id sem; // immutable after thread creation
507 status_t status; // accessed only by this thread
508 struct list waiters; // protected by fLock
509 } exit;
511 struct select_info *select_infos; // protected by fLock
513 struct thread_debug_info debug_info;
515 // stack
516 area_id kernel_stack_area; // immutable after thread creation
517 addr_t kernel_stack_base; // immutable after thread creation
518 addr_t kernel_stack_top; // immutable after thread creation
519 area_id user_stack_area; // protected by thread lock
520 addr_t user_stack_base; // protected by thread lock
521 size_t user_stack_size; // protected by thread lock
523 addr_t user_local_storage;
524 // usually allocated at the safe side of the stack
525 int kernel_errno;
526 // kernel "errno" differs from its userspace alter ego
528 // user_time, kernel_time, and last_time are only written by the thread
529 // itself, so they can be read by the thread without lock. Holding the
530 // scheduler lock and checking that the thread does not run also guarantees
531 // that the times will not change.
532 spinlock time_lock;
533 bigtime_t user_time; // protected by time_lock
534 bigtime_t kernel_time; // protected by time_lock
535 bigtime_t last_time; // protected by time_lock
536 bigtime_t cpu_clock_offset; // protected by time_lock
538 void (*post_interrupt_callback)(void*);
539 void* post_interrupt_data;
541 // architecture dependent section
542 struct arch_thread arch_info;
544 public:
545 Thread() {}
546 // dummy for the idle threads
547 Thread(const char *name, thread_id threadID,
548 struct cpu_ent *cpu);
549 ~Thread();
551 static status_t Create(const char* name, Thread*& _thread);
553 static Thread* Get(thread_id id);
554 static Thread* GetAndLock(thread_id id);
555 static Thread* GetDebug(thread_id id);
556 // in kernel debugger only
558 static bool IsAlive(thread_id id);
560 void* operator new(size_t size);
561 void* operator new(size_t, void* pointer);
562 void operator delete(void* pointer, size_t size);
564 status_t Init(bool idleThread);
566 bool Lock()
567 { mutex_lock(&fLock); return true; }
568 bool TryLock()
569 { return mutex_trylock(&fLock) == B_OK; }
570 void Unlock()
571 { mutex_unlock(&fLock); }
573 void UnlockAndReleaseReference()
574 { Unlock(); ReleaseReference(); }
576 bool IsAlive() const;
578 bool IsRunning() const
579 { return cpu != NULL; }
580 // scheduler lock must be held
582 sigset_t ThreadPendingSignals() const
583 { return fPendingSignals.AllSignals(); }
584 inline sigset_t AllPendingSignals() const;
585 void AddPendingSignal(int signal)
586 { fPendingSignals.AddSignal(signal); }
587 void AddPendingSignal(Signal* signal)
588 { fPendingSignals.AddSignal(signal); }
589 void RemovePendingSignal(int signal)
590 { fPendingSignals.RemoveSignal(signal); }
591 void RemovePendingSignal(Signal* signal)
592 { fPendingSignals.RemoveSignal(signal); }
593 void RemovePendingSignals(sigset_t mask)
594 { fPendingSignals.RemoveSignals(mask); }
595 void ResetSignalsOnExec();
597 inline int32 HighestPendingSignalPriority(
598 sigset_t nonBlocked) const;
599 inline Signal* DequeuePendingSignal(sigset_t nonBlocked,
600 Signal& buffer);
602 // user timers -- protected by fLock
603 UserTimer* UserTimerFor(int32 id) const
604 { return fUserTimers.TimerFor(id); }
605 status_t AddUserTimer(UserTimer* timer);
606 void RemoveUserTimer(UserTimer* timer);
607 void DeleteUserTimers(bool userDefinedOnly);
609 void UserTimerActivated(ThreadTimeUserTimer* timer)
610 { fCPUTimeUserTimers.Add(timer); }
611 void UserTimerDeactivated(ThreadTimeUserTimer* timer)
612 { fCPUTimeUserTimers.Remove(timer); }
613 void DeactivateCPUTimeUserTimers();
614 bool HasActiveCPUTimeUserTimers() const
615 { return !fCPUTimeUserTimers.IsEmpty(); }
616 ThreadTimeUserTimerList::ConstIterator
617 CPUTimeUserTimerIterator() const
618 { return fCPUTimeUserTimers.GetIterator(); }
620 inline bigtime_t CPUTime(bool ignoreCurrentRun) const;
622 private:
623 mutex fLock;
625 BKernel::PendingSignals fPendingSignals;
626 // protected by team->signal_lock
628 UserTimerList fUserTimers; // protected by fLock
629 ThreadTimeUserTimerList fCPUTimeUserTimers;
630 // protected by time_lock
634 struct ProcessSession : BReferenceable {
635 pid_t id;
636 int32 controlling_tty; // index of the controlling tty,
637 // -1 if none
638 pid_t foreground_group;
640 public:
641 ProcessSession(pid_t id);
642 ~ProcessSession();
644 bool Lock()
645 { mutex_lock(&fLock); return true; }
646 bool TryLock()
647 { return mutex_trylock(&fLock) == B_OK; }
648 void Unlock()
649 { mutex_unlock(&fLock); }
651 private:
652 mutex fLock;
656 struct ProcessGroup : KernelReferenceable {
657 struct ProcessGroup *next; // next in hash
658 pid_t id;
659 BKernel::Team *teams;
661 public:
662 ProcessGroup(pid_t id);
663 ~ProcessGroup();
665 static ProcessGroup* Get(pid_t id);
667 bool Lock()
668 { mutex_lock(&fLock); return true; }
669 bool TryLock()
670 { return mutex_trylock(&fLock) == B_OK; }
671 void Unlock()
672 { mutex_unlock(&fLock); }
674 ProcessSession* Session() const
675 { return fSession; }
676 void Publish(ProcessSession* session);
677 void PublishLocked(ProcessSession* session);
679 bool IsOrphaned() const;
681 void ScheduleOrphanedCheck();
682 void UnsetOrphanedCheck();
684 public:
685 SinglyLinkedListLink<ProcessGroup> fOrphanedCheckListLink;
687 private:
688 mutex fLock;
689 ProcessSession* fSession;
690 bool fInOrphanedCheckList; // protected by
691 // sOrphanedCheckLock
694 typedef SinglyLinkedList<ProcessGroup,
695 SinglyLinkedListMemberGetLink<ProcessGroup,
696 &ProcessGroup::fOrphanedCheckListLink> > ProcessGroupList;
699 /*! \brief Allows to iterate through all teams.
701 struct TeamListIterator {
702 TeamListIterator();
703 ~TeamListIterator();
705 Team* Next();
707 private:
708 TeamThreadIteratorEntry<team_id> fEntry;
712 /*! \brief Allows to iterate through all threads.
714 struct ThreadListIterator {
715 ThreadListIterator();
716 ~ThreadListIterator();
718 Thread* Next();
720 private:
721 TeamThreadIteratorEntry<thread_id> fEntry;
725 inline int32
726 Team::HighestPendingSignalPriority(sigset_t nonBlocked) const
728 return fPendingSignals.HighestSignalPriority(nonBlocked);
732 inline Signal*
733 Team::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
735 return fPendingSignals.DequeueSignal(nonBlocked, buffer);
739 inline TeamUserTimeUserTimerList::ConstIterator
740 Team::UserTimeUserTimerIterator() const
742 return fUserTimeUserTimers.GetIterator();
746 inline sigset_t
747 Thread::AllPendingSignals() const
749 return fPendingSignals.AllSignals() | team->PendingSignals();
753 inline int32
754 Thread::HighestPendingSignalPriority(sigset_t nonBlocked) const
756 return fPendingSignals.HighestSignalPriority(nonBlocked);
760 inline Signal*
761 Thread::DequeuePendingSignal(sigset_t nonBlocked, Signal& buffer)
763 return fPendingSignals.DequeueSignal(nonBlocked, buffer);
767 /*! Returns the thread's current total CPU time (kernel + user + offset).
769 The caller must hold \c time_lock.
771 \param ignoreCurrentRun If \c true and the thread is currently running,
772 don't add the time since the last time \c last_time was updated. Should
773 be used in "thread unscheduled" scheduler callbacks, since although the
774 thread is still running at that time, its time has already been stopped.
775 \return The thread's current total CPU time.
777 inline bigtime_t
778 Thread::CPUTime(bool ignoreCurrentRun) const
780 bigtime_t time = user_time + kernel_time + cpu_clock_offset;
782 // If currently running, also add the time since the last check, unless
783 // requested otherwise.
784 if (!ignoreCurrentRun && last_time != 0)
785 time += system_time() - last_time;
787 return time;
791 } // namespace BKernel
793 using BKernel::Team;
794 using BKernel::TeamListIterator;
795 using BKernel::Thread;
796 using BKernel::ThreadListIterator;
797 using BKernel::ProcessSession;
798 using BKernel::ProcessGroup;
799 using BKernel::ProcessGroupList;
802 #endif // !_ASSEMBLER
805 // bits for the thread::flags field
806 #define THREAD_FLAGS_SIGNALS_PENDING 0x0001
807 // unblocked signals are pending (computed flag for optimization purposes)
808 #define THREAD_FLAGS_DEBUG_THREAD 0x0002
809 // forces the thread into the debugger as soon as possible (set by
810 // debug_thread())
811 #define THREAD_FLAGS_SINGLE_STEP 0x0004
812 // indicates that the thread is in single-step mode (in userland)
813 #define THREAD_FLAGS_DEBUGGER_INSTALLED 0x0008
814 // a debugger is installed for the current team (computed flag for
815 // optimization purposes)
816 #define THREAD_FLAGS_BREAKPOINTS_DEFINED 0x0010
817 // hardware breakpoints are defined for the current team (computed flag for
818 // optimization purposes)
819 #define THREAD_FLAGS_BREAKPOINTS_INSTALLED 0x0020
820 // breakpoints are currently installed for the thread (i.e. the hardware is
821 // actually set up to trigger debug events for them)
822 #define THREAD_FLAGS_64_BIT_SYSCALL_RETURN 0x0040
823 // set by 64 bit return value syscalls
824 #define THREAD_FLAGS_RESTART_SYSCALL 0x0080
825 // set by handle_signals(), if the current syscall shall be restarted
826 #define THREAD_FLAGS_DONT_RESTART_SYSCALL 0x0100
827 // explicitly disables automatic syscall restarts (e.g. resume_thread())
828 #define THREAD_FLAGS_ALWAYS_RESTART_SYSCALL 0x0200
829 // force syscall restart, even if a signal handler without SA_RESTART was
830 // invoked (e.g. sigwait())
831 #define THREAD_FLAGS_SYSCALL_RESTARTED 0x0400
832 // the current syscall has been restarted
833 #define THREAD_FLAGS_SYSCALL 0x0800
834 // the thread is currently in a syscall; set/reset only for certain
835 // functions (e.g. ioctl()) to allow inner functions to discriminate
836 // whether e.g. parameters were passed from userland or kernel
837 #define THREAD_FLAGS_TRAP_FOR_CORE_DUMP 0x1000
838 // core dump in progress; the thread shall not exit the kernel to userland,
839 // but shall invoke core_dump_trap_thread() instead.
842 #endif /* _KERNEL_THREAD_TYPES_H */