2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
5 #ifndef KERNEL_SCHEDULER_THREAD_H
6 #define KERNEL_SCHEDULER_THREAD_H
10 #include <util/AutoLock.h>
12 #include "scheduler_common.h"
13 #include "scheduler_cpu.h"
14 #include "scheduler_locking.h"
15 #include "scheduler_profiler.h"
21 struct ThreadData
: public DoublyLinkedListLinkImpl
<ThreadData
>,
22 RunQueueLinkImpl
<ThreadData
> {
24 inline void _InitBase();
26 inline int32
_GetMinimalPriority() const;
28 inline CoreEntry
* _ChooseCore() const;
29 inline CPUEntry
* _ChooseCPU(CoreEntry
* core
,
30 bool& rescheduleNeeded
) const;
33 ThreadData(Thread
* thread
);
36 void Init(CoreEntry
* core
);
40 inline int32
GetPriority() const { return fThread
->priority
; }
41 inline Thread
* GetThread() const { return fThread
; }
43 inline bool IsRealTime() const;
44 inline bool IsIdle() const;
46 inline bool HasCacheExpired() const;
47 inline CoreEntry
* Rebalance() const;
49 inline int32
GetEffectivePriority() const;
51 inline void StartCPUTime();
52 inline void StopCPUTime();
54 inline void CancelPenalty();
55 inline bool ShouldCancelPenalty() const;
57 bool ChooseCoreAndCPU(CoreEntry
*& targetCore
,
58 CPUEntry
*& targetCPU
);
60 inline void SetLastInterruptTime(bigtime_t interruptTime
)
61 { fLastInterruptTime
= interruptTime
; }
62 inline void SetStolenInterruptTime(bigtime_t interruptTime
);
64 bigtime_t
ComputeQuantum() const;
65 inline bigtime_t
GetQuantumLeft();
66 inline void StartQuantum();
67 inline bool HasQuantumEnded(bool wasPreempted
, bool hasYielded
);
69 inline void Continues();
70 inline void GoesAway();
73 inline bigtime_t
WentSleep() const { return fWentSleep
; }
74 inline bigtime_t
WentSleepActive() const { return fWentSleepActive
; }
76 inline void PutBack();
77 inline void Enqueue();
78 inline bool Dequeue();
80 inline void UpdateActivity(bigtime_t active
);
82 inline bool IsEnqueued() const { return fEnqueued
; }
83 inline void SetDequeued() { fEnqueued
= false; }
85 inline int32
GetLoad() const { return fNeededLoad
; }
87 inline CoreEntry
* Core() const { return fCore
; }
88 void UnassignCore(bool running
= false);
90 static void ComputeQuantumLengths();
93 inline void _IncreasePenalty();
94 inline int32
_GetPenalty() const;
96 void _ComputeNeededLoad();
98 void _ComputeEffectivePriority() const;
100 static bigtime_t
_ScaleQuantum(bigtime_t maxQuantum
,
101 bigtime_t minQuantum
, int32 maxPriority
,
102 int32 minPriority
, int32 priority
);
104 bigtime_t fStolenTime
;
105 bigtime_t fQuantumStart
;
106 bigtime_t fLastInterruptTime
;
108 bigtime_t fWentSleep
;
109 bigtime_t fWentSleepActive
;
116 int32 fPriorityPenalty
;
117 int32 fAdditionalPenalty
;
119 mutable int32 fEffectivePriority
;
120 mutable bigtime_t fBaseQuantum
;
124 bigtime_t fMeasureAvailableActiveTime
;
125 bigtime_t fMeasureAvailableTime
;
126 bigtime_t fLastMeasureAvailableTime
;
129 uint32 fLoadMeasurementEpoch
;
134 class ThreadProcessing
{
136 virtual ~ThreadProcessing();
138 virtual void operator()(ThreadData
* thread
) = 0;
143 ThreadData::_GetMinimalPriority() const
145 SCHEDULER_ENTER_FUNCTION();
147 const int32 kDivisor
= 5;
149 const int32 kMaximalPriority
= 25;
150 const int32 kMinimalPriority
= B_LOWEST_ACTIVE_PRIORITY
;
152 int32 priority
= GetPriority() / kDivisor
;
153 return std::max(std::min(priority
, kMaximalPriority
), kMinimalPriority
);
158 ThreadData::IsRealTime() const
160 return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY
;
165 ThreadData::IsIdle() const
167 return GetPriority() == B_IDLE_PRIORITY
;
172 ThreadData::HasCacheExpired() const
174 SCHEDULER_ENTER_FUNCTION();
175 return gCurrentMode
->has_cache_expired(this);
180 ThreadData::Rebalance() const
182 SCHEDULER_ENTER_FUNCTION();
184 ASSERT(!gSingleCore
);
185 return gCurrentMode
->rebalance(this);
190 ThreadData::GetEffectivePriority() const
192 SCHEDULER_ENTER_FUNCTION();
193 return fEffectivePriority
;
198 ThreadData::_IncreasePenalty()
200 SCHEDULER_ENTER_FUNCTION();
202 if (IsIdle() || IsRealTime())
205 TRACE("increasing thread %ld penalty\n", fThread
->id
);
207 int32 oldPenalty
= fPriorityPenalty
++;
208 const int kMinimalPriority
= _GetMinimalPriority();
209 if (GetPriority() - oldPenalty
<= kMinimalPriority
)
210 fPriorityPenalty
= oldPenalty
;
212 _ComputeEffectivePriority();
217 ThreadData::StartCPUTime()
219 SCHEDULER_ENTER_FUNCTION();
221 SpinLocker
threadTimeLocker(fThread
->time_lock
);
222 fThread
->last_time
= system_time();
227 ThreadData::StopCPUTime()
229 SCHEDULER_ENTER_FUNCTION();
231 // User time is tracked in thread_at_kernel_entry()
232 SpinLocker
threadTimeLocker(fThread
->time_lock
);
233 fThread
->kernel_time
+= system_time() - fThread
->last_time
;
234 fThread
->last_time
= 0;
235 threadTimeLocker
.Unlock();
237 // If the old thread's team has user time timers, check them now.
238 Team
* team
= fThread
->team
;
239 SpinLocker
teamTimeLocker(team
->time_lock
);
240 if (team
->HasActiveUserTimeUserTimers())
241 user_timer_check_team_user_timers(team
);
246 ThreadData::CancelPenalty()
248 SCHEDULER_ENTER_FUNCTION();
250 int32 oldPenalty
= fPriorityPenalty
;
251 fPriorityPenalty
= 0;
253 if (oldPenalty
!= 0) {
254 TRACE("cancelling thread %ld penalty\n", fThread
->id
);
255 _ComputeEffectivePriority();
261 ThreadData::ShouldCancelPenalty() const
263 SCHEDULER_ENTER_FUNCTION();
267 return system_time() - fWentSleep
> gCurrentMode
->base_quantum
/ 2;
272 ThreadData::SetStolenInterruptTime(bigtime_t interruptTime
)
274 SCHEDULER_ENTER_FUNCTION();
276 interruptTime
-= fLastInterruptTime
;
277 fStolenTime
+= interruptTime
;
282 ThreadData::GetQuantumLeft()
284 SCHEDULER_ENTER_FUNCTION();
286 bigtime_t stolenTime
= std::min(fStolenTime
, gCurrentMode
->minimal_quantum
);
287 ASSERT(stolenTime
>= 0);
288 fStolenTime
-= stolenTime
;
290 bigtime_t quantum
= ComputeQuantum() - fTimeUsed
;
291 quantum
+= stolenTime
;
292 quantum
= std::max(quantum
, gCurrentMode
->minimal_quantum
);
299 ThreadData::StartQuantum()
301 SCHEDULER_ENTER_FUNCTION();
302 fQuantumStart
= system_time();
307 ThreadData::HasQuantumEnded(bool wasPreempted
, bool hasYielded
)
309 SCHEDULER_ENTER_FUNCTION();
311 bigtime_t timeUsed
= system_time() - fQuantumStart
;
312 ASSERT(timeUsed
>= 0);
313 fTimeUsed
+= timeUsed
;
315 bigtime_t timeLeft
= ComputeQuantum() - fTimeUsed
;
316 timeLeft
= std::max(bigtime_t(0), timeLeft
);
318 // too little time left, it's better make the next quantum a bit longer
319 bigtime_t skipTime
= gCurrentMode
->minimal_quantum
/ 2;
320 if (hasYielded
|| wasPreempted
|| timeLeft
<= skipTime
) {
321 fStolenTime
+= timeLeft
;
326 fAdditionalPenalty
++;
337 ThreadData::Continues()
339 SCHEDULER_ENTER_FUNCTION();
343 _ComputeNeededLoad();
348 ThreadData::GoesAway()
350 SCHEDULER_ENTER_FUNCTION();
354 if (!HasQuantumEnded(false, false)) {
355 fAdditionalPenalty
++;
356 _ComputeEffectivePriority();
359 fLastInterruptTime
= 0;
361 fWentSleep
= system_time();
362 fWentSleepActive
= fCore
->GetActiveTime();
365 fLoadMeasurementEpoch
= fCore
->RemoveLoad(fNeededLoad
, false);
373 SCHEDULER_ENTER_FUNCTION();
377 fCore
->RemoveLoad(fNeededLoad
, true);
383 ThreadData::PutBack()
385 SCHEDULER_ENTER_FUNCTION();
387 int32 priority
= GetEffectivePriority();
389 if (fThread
->pinned_to_cpu
> 0) {
390 ASSERT(fThread
->cpu
!= NULL
);
391 CPUEntry
* cpu
= CPUEntry::GetCPU(fThread
->cpu
->cpu_num
);
393 CPURunQueueLocker
_(cpu
);
397 cpu
->PushFront(this, priority
);
399 CoreRunQueueLocker
_(fCore
);
403 fCore
->PushFront(this, priority
);
409 ThreadData::Enqueue()
411 SCHEDULER_ENTER_FUNCTION();
414 if (gTrackCoreLoad
) {
415 bigtime_t timeSlept
= system_time() - fWentSleep
;
416 bool updateLoad
= timeSlept
> 0;
418 fCore
->AddLoad(fNeededLoad
, fLoadMeasurementEpoch
, !updateLoad
);
420 fMeasureAvailableTime
+= timeSlept
;
421 _ComputeNeededLoad();
428 fThread
->state
= B_THREAD_READY
;
430 int32 priority
= GetEffectivePriority();
432 if (fThread
->pinned_to_cpu
> 0) {
433 ASSERT(fThread
->previous_cpu
!= NULL
);
434 CPUEntry
* cpu
= CPUEntry::GetCPU(fThread
->previous_cpu
->cpu_num
);
436 CPURunQueueLocker
_(cpu
);
440 cpu
->PushBack(this, priority
);
442 CoreRunQueueLocker
_(fCore
);
446 fCore
->PushBack(this, priority
);
452 ThreadData::Dequeue()
454 SCHEDULER_ENTER_FUNCTION();
456 if (fThread
->pinned_to_cpu
> 0) {
457 ASSERT(fThread
->previous_cpu
!= NULL
);
458 CPUEntry
* cpu
= CPUEntry::GetCPU(fThread
->previous_cpu
->cpu_num
);
460 CPURunQueueLocker
_(cpu
);
468 CoreRunQueueLocker
_(fCore
);
479 ThreadData::UpdateActivity(bigtime_t active
)
481 SCHEDULER_ENTER_FUNCTION();
486 fMeasureAvailableTime
+= active
;
487 fMeasureAvailableActiveTime
+= active
;
491 } // namespace Scheduler
494 #endif // KERNEL_SCHEDULER_THREAD_H