btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / scheduler / scheduler_thread.h
blob73e581e282e6a258469ec2fd6ae58364804dd525
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef KERNEL_SCHEDULER_THREAD_H
6 #define KERNEL_SCHEDULER_THREAD_H
9 #include <thread.h>
10 #include <util/AutoLock.h>
12 #include "scheduler_common.h"
13 #include "scheduler_cpu.h"
14 #include "scheduler_locking.h"
15 #include "scheduler_profiler.h"
18 namespace Scheduler {
21 struct ThreadData : public DoublyLinkedListLinkImpl<ThreadData>,
22 RunQueueLinkImpl<ThreadData> {
23 private:
24 inline void _InitBase();
26 inline int32 _GetMinimalPriority() const;
28 inline CoreEntry* _ChooseCore() const;
29 inline CPUEntry* _ChooseCPU(CoreEntry* core,
30 bool& rescheduleNeeded) const;
32 public:
33 ThreadData(Thread* thread);
35 void Init();
36 void Init(CoreEntry* core);
38 void Dump() const;
40 inline int32 GetPriority() const { return fThread->priority; }
41 inline Thread* GetThread() const { return fThread; }
43 inline bool IsRealTime() const;
44 inline bool IsIdle() const;
46 inline bool HasCacheExpired() const;
47 inline CoreEntry* Rebalance() const;
49 inline int32 GetEffectivePriority() const;
51 inline void StartCPUTime();
52 inline void StopCPUTime();
54 inline void CancelPenalty();
55 inline bool ShouldCancelPenalty() const;
57 bool ChooseCoreAndCPU(CoreEntry*& targetCore,
58 CPUEntry*& targetCPU);
60 inline void SetLastInterruptTime(bigtime_t interruptTime)
61 { fLastInterruptTime = interruptTime; }
62 inline void SetStolenInterruptTime(bigtime_t interruptTime);
64 bigtime_t ComputeQuantum() const;
65 inline bigtime_t GetQuantumLeft();
66 inline void StartQuantum();
67 inline bool HasQuantumEnded(bool wasPreempted, bool hasYielded);
69 inline void Continues();
70 inline void GoesAway();
71 inline void Dies();
73 inline bigtime_t WentSleep() const { return fWentSleep; }
74 inline bigtime_t WentSleepActive() const { return fWentSleepActive; }
76 inline void PutBack();
77 inline void Enqueue();
78 inline bool Dequeue();
80 inline void UpdateActivity(bigtime_t active);
82 inline bool IsEnqueued() const { return fEnqueued; }
83 inline void SetDequeued() { fEnqueued = false; }
85 inline int32 GetLoad() const { return fNeededLoad; }
87 inline CoreEntry* Core() const { return fCore; }
88 void UnassignCore(bool running = false);
90 static void ComputeQuantumLengths();
92 private:
93 inline void _IncreasePenalty();
94 inline int32 _GetPenalty() const;
96 void _ComputeNeededLoad();
98 void _ComputeEffectivePriority() const;
100 static bigtime_t _ScaleQuantum(bigtime_t maxQuantum,
101 bigtime_t minQuantum, int32 maxPriority,
102 int32 minPriority, int32 priority);
104 bigtime_t fStolenTime;
105 bigtime_t fQuantumStart;
106 bigtime_t fLastInterruptTime;
108 bigtime_t fWentSleep;
109 bigtime_t fWentSleepActive;
111 bool fEnqueued;
112 bool fReady;
114 Thread* fThread;
116 int32 fPriorityPenalty;
117 int32 fAdditionalPenalty;
119 mutable int32 fEffectivePriority;
120 mutable bigtime_t fBaseQuantum;
122 bigtime_t fTimeUsed;
124 bigtime_t fMeasureAvailableActiveTime;
125 bigtime_t fMeasureAvailableTime;
126 bigtime_t fLastMeasureAvailableTime;
128 int32 fNeededLoad;
129 uint32 fLoadMeasurementEpoch;
131 CoreEntry* fCore;
134 class ThreadProcessing {
135 public:
136 virtual ~ThreadProcessing();
138 virtual void operator()(ThreadData* thread) = 0;
142 inline int32
143 ThreadData::_GetMinimalPriority() const
145 SCHEDULER_ENTER_FUNCTION();
147 const int32 kDivisor = 5;
149 const int32 kMaximalPriority = 25;
150 const int32 kMinimalPriority = B_LOWEST_ACTIVE_PRIORITY;
152 int32 priority = GetPriority() / kDivisor;
153 return std::max(std::min(priority, kMaximalPriority), kMinimalPriority);
157 inline bool
158 ThreadData::IsRealTime() const
160 return GetPriority() >= B_FIRST_REAL_TIME_PRIORITY;
164 inline bool
165 ThreadData::IsIdle() const
167 return GetPriority() == B_IDLE_PRIORITY;
171 inline bool
172 ThreadData::HasCacheExpired() const
174 SCHEDULER_ENTER_FUNCTION();
175 return gCurrentMode->has_cache_expired(this);
179 inline CoreEntry*
180 ThreadData::Rebalance() const
182 SCHEDULER_ENTER_FUNCTION();
184 ASSERT(!gSingleCore);
185 return gCurrentMode->rebalance(this);
189 inline int32
190 ThreadData::GetEffectivePriority() const
192 SCHEDULER_ENTER_FUNCTION();
193 return fEffectivePriority;
197 inline void
198 ThreadData::_IncreasePenalty()
200 SCHEDULER_ENTER_FUNCTION();
202 if (IsIdle() || IsRealTime())
203 return;
205 TRACE("increasing thread %ld penalty\n", fThread->id);
207 int32 oldPenalty = fPriorityPenalty++;
208 const int kMinimalPriority = _GetMinimalPriority();
209 if (GetPriority() - oldPenalty <= kMinimalPriority)
210 fPriorityPenalty = oldPenalty;
212 _ComputeEffectivePriority();
216 inline void
217 ThreadData::StartCPUTime()
219 SCHEDULER_ENTER_FUNCTION();
221 SpinLocker threadTimeLocker(fThread->time_lock);
222 fThread->last_time = system_time();
226 inline void
227 ThreadData::StopCPUTime()
229 SCHEDULER_ENTER_FUNCTION();
231 // User time is tracked in thread_at_kernel_entry()
232 SpinLocker threadTimeLocker(fThread->time_lock);
233 fThread->kernel_time += system_time() - fThread->last_time;
234 fThread->last_time = 0;
235 threadTimeLocker.Unlock();
237 // If the old thread's team has user time timers, check them now.
238 Team* team = fThread->team;
239 SpinLocker teamTimeLocker(team->time_lock);
240 if (team->HasActiveUserTimeUserTimers())
241 user_timer_check_team_user_timers(team);
245 inline void
246 ThreadData::CancelPenalty()
248 SCHEDULER_ENTER_FUNCTION();
250 int32 oldPenalty = fPriorityPenalty;
251 fPriorityPenalty = 0;
253 if (oldPenalty != 0) {
254 TRACE("cancelling thread %ld penalty\n", fThread->id);
255 _ComputeEffectivePriority();
260 inline bool
261 ThreadData::ShouldCancelPenalty() const
263 SCHEDULER_ENTER_FUNCTION();
265 if (fCore == NULL)
266 return false;
267 return system_time() - fWentSleep > gCurrentMode->base_quantum / 2;
271 inline void
272 ThreadData::SetStolenInterruptTime(bigtime_t interruptTime)
274 SCHEDULER_ENTER_FUNCTION();
276 interruptTime -= fLastInterruptTime;
277 fStolenTime += interruptTime;
281 inline bigtime_t
282 ThreadData::GetQuantumLeft()
284 SCHEDULER_ENTER_FUNCTION();
286 bigtime_t stolenTime = std::min(fStolenTime, gCurrentMode->minimal_quantum);
287 ASSERT(stolenTime >= 0);
288 fStolenTime -= stolenTime;
290 bigtime_t quantum = ComputeQuantum() - fTimeUsed;
291 quantum += stolenTime;
292 quantum = std::max(quantum, gCurrentMode->minimal_quantum);
294 return quantum;
298 inline void
299 ThreadData::StartQuantum()
301 SCHEDULER_ENTER_FUNCTION();
302 fQuantumStart = system_time();
306 inline bool
307 ThreadData::HasQuantumEnded(bool wasPreempted, bool hasYielded)
309 SCHEDULER_ENTER_FUNCTION();
311 bigtime_t timeUsed = system_time() - fQuantumStart;
312 ASSERT(timeUsed >= 0);
313 fTimeUsed += timeUsed;
315 bigtime_t timeLeft = ComputeQuantum() - fTimeUsed;
316 timeLeft = std::max(bigtime_t(0), timeLeft);
318 // too little time left, it's better make the next quantum a bit longer
319 bigtime_t skipTime = gCurrentMode->minimal_quantum / 2;
320 if (hasYielded || wasPreempted || timeLeft <= skipTime) {
321 fStolenTime += timeLeft;
322 timeLeft = 0;
325 if (timeLeft == 0) {
326 fAdditionalPenalty++;
327 _IncreasePenalty();
328 fTimeUsed = 0;
329 return true;
332 return false;
336 inline void
337 ThreadData::Continues()
339 SCHEDULER_ENTER_FUNCTION();
341 ASSERT(fReady);
342 if (gTrackCoreLoad)
343 _ComputeNeededLoad();
347 inline void
348 ThreadData::GoesAway()
350 SCHEDULER_ENTER_FUNCTION();
352 ASSERT(fReady);
354 if (!HasQuantumEnded(false, false)) {
355 fAdditionalPenalty++;
356 _ComputeEffectivePriority();
359 fLastInterruptTime = 0;
361 fWentSleep = system_time();
362 fWentSleepActive = fCore->GetActiveTime();
364 if (gTrackCoreLoad)
365 fLoadMeasurementEpoch = fCore->RemoveLoad(fNeededLoad, false);
366 fReady = false;
370 inline void
371 ThreadData::Dies()
373 SCHEDULER_ENTER_FUNCTION();
375 ASSERT(fReady);
376 if (gTrackCoreLoad)
377 fCore->RemoveLoad(fNeededLoad, true);
378 fReady = false;
382 inline void
383 ThreadData::PutBack()
385 SCHEDULER_ENTER_FUNCTION();
387 int32 priority = GetEffectivePriority();
389 if (fThread->pinned_to_cpu > 0) {
390 ASSERT(fThread->cpu != NULL);
391 CPUEntry* cpu = CPUEntry::GetCPU(fThread->cpu->cpu_num);
393 CPURunQueueLocker _(cpu);
394 ASSERT(!fEnqueued);
395 fEnqueued = true;
397 cpu->PushFront(this, priority);
398 } else {
399 CoreRunQueueLocker _(fCore);
400 ASSERT(!fEnqueued);
401 fEnqueued = true;
403 fCore->PushFront(this, priority);
408 inline void
409 ThreadData::Enqueue()
411 SCHEDULER_ENTER_FUNCTION();
413 if (!fReady) {
414 if (gTrackCoreLoad) {
415 bigtime_t timeSlept = system_time() - fWentSleep;
416 bool updateLoad = timeSlept > 0;
418 fCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, !updateLoad);
419 if (updateLoad) {
420 fMeasureAvailableTime += timeSlept;
421 _ComputeNeededLoad();
425 fReady = true;
428 fThread->state = B_THREAD_READY;
430 int32 priority = GetEffectivePriority();
432 if (fThread->pinned_to_cpu > 0) {
433 ASSERT(fThread->previous_cpu != NULL);
434 CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
436 CPURunQueueLocker _(cpu);
437 ASSERT(!fEnqueued);
438 fEnqueued = true;
440 cpu->PushBack(this, priority);
441 } else {
442 CoreRunQueueLocker _(fCore);
443 ASSERT(!fEnqueued);
444 fEnqueued = true;
446 fCore->PushBack(this, priority);
451 inline bool
452 ThreadData::Dequeue()
454 SCHEDULER_ENTER_FUNCTION();
456 if (fThread->pinned_to_cpu > 0) {
457 ASSERT(fThread->previous_cpu != NULL);
458 CPUEntry* cpu = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
460 CPURunQueueLocker _(cpu);
461 if (!fEnqueued)
462 return false;
463 cpu->Remove(this);
464 ASSERT(!fEnqueued);
465 return true;
468 CoreRunQueueLocker _(fCore);
469 if (!fEnqueued)
470 return false;
472 fCore->Remove(this);
473 ASSERT(!fEnqueued);
474 return true;
478 inline void
479 ThreadData::UpdateActivity(bigtime_t active)
481 SCHEDULER_ENTER_FUNCTION();
483 if (!gTrackCoreLoad)
484 return;
486 fMeasureAvailableTime += active;
487 fMeasureAvailableActiveTime += active;
491 } // namespace Scheduler
494 #endif // KERNEL_SCHEDULER_THREAD_H