btrfs: Attempt to fix GCC2 build.
[haiku.git] / src / system / kernel / scheduler / scheduler_thread.cpp
blob25a332bed49a5bbbdfecdf6837c512bc7d1006c5
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
6 #include "scheduler_thread.h"
9 using namespace Scheduler;
12 static bigtime_t sQuantumLengths[THREAD_MAX_SET_PRIORITY + 1];
14 const int32 kMaximumQuantumLengthsCount = 20;
15 static bigtime_t sMaximumQuantumLengths[kMaximumQuantumLengthsCount];
18 void
19 ThreadData::_InitBase()
21 fPriorityPenalty = 0;
22 fAdditionalPenalty = 0;
23 fEffectivePriority = GetPriority();
24 fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
26 fTimeUsed = 0;
27 fStolenTime = 0;
29 fMeasureAvailableActiveTime = 0;
30 fLastMeasureAvailableTime = 0;
31 fMeasureAvailableTime = 0;
33 fWentSleep = 0;
34 fWentSleepActive = 0;
36 fEnqueued = false;
37 fReady = false;
41 inline CoreEntry*
42 ThreadData::_ChooseCore() const
44 SCHEDULER_ENTER_FUNCTION();
46 ASSERT(!gSingleCore);
47 return gCurrentMode->choose_core(this);
51 inline CPUEntry*
52 ThreadData::_ChooseCPU(CoreEntry* core, bool& rescheduleNeeded) const
54 SCHEDULER_ENTER_FUNCTION();
56 int32 threadPriority = GetEffectivePriority();
58 if (fThread->previous_cpu != NULL) {
59 CPUEntry* previousCPU
60 = CPUEntry::GetCPU(fThread->previous_cpu->cpu_num);
61 if (previousCPU->Core() == core && !fThread->previous_cpu->disabled) {
62 CoreCPUHeapLocker _(core);
63 if (CPUPriorityHeap::GetKey(previousCPU) < threadPriority) {
64 previousCPU->UpdatePriority(threadPriority);
65 rescheduleNeeded = true;
66 return previousCPU;
71 CoreCPUHeapLocker _(core);
72 CPUEntry* cpu = core->CPUHeap()->PeekRoot();
73 ASSERT(cpu != NULL);
75 if (CPUPriorityHeap::GetKey(cpu) < threadPriority) {
76 cpu->UpdatePriority(threadPriority);
77 rescheduleNeeded = true;
78 } else
79 rescheduleNeeded = false;
81 return cpu;
85 ThreadData::ThreadData(Thread* thread)
87 fThread(thread)
92 void
93 ThreadData::Init()
95 _InitBase();
96 fCore = NULL;
98 Thread* currentThread = thread_get_current_thread();
99 ThreadData* currentThreadData = currentThread->scheduler_data;
100 fNeededLoad = currentThreadData->fNeededLoad;
102 if (!IsRealTime()) {
103 fPriorityPenalty = std::min(currentThreadData->fPriorityPenalty,
104 std::max(GetPriority() - _GetMinimalPriority(), int32(0)));
105 fAdditionalPenalty = currentThreadData->fAdditionalPenalty;
107 _ComputeEffectivePriority();
112 void
113 ThreadData::Init(CoreEntry* core)
115 _InitBase();
117 fCore = core;
118 fReady = true;
119 fNeededLoad = 0;
123 void
124 ThreadData::Dump() const
126 kprintf("\tpriority_penalty:\t%" B_PRId32 "\n", fPriorityPenalty);
128 int32 priority = GetPriority() - _GetPenalty();
129 priority = std::max(priority, int32(1));
130 kprintf("\tadditional_penalty:\t%" B_PRId32 " (%" B_PRId32 ")\n",
131 fAdditionalPenalty % priority, fAdditionalPenalty);
132 kprintf("\teffective_priority:\t%" B_PRId32 "\n", GetEffectivePriority());
134 kprintf("\ttime_used:\t\t%" B_PRId64 " us (quantum: %" B_PRId64 " us)\n",
135 fTimeUsed, ComputeQuantum());
136 kprintf("\tstolen_time:\t\t%" B_PRId64 " us\n", fStolenTime);
137 kprintf("\tquantum_start:\t\t%" B_PRId64 " us\n", fQuantumStart);
138 kprintf("\tneeded_load:\t\t%" B_PRId32 "%%\n", fNeededLoad / 10);
139 kprintf("\twent_sleep:\t\t%" B_PRId64 "\n", fWentSleep);
140 kprintf("\twent_sleep_active:\t%" B_PRId64 "\n", fWentSleepActive);
141 kprintf("\tcore:\t\t\t%" B_PRId32 "\n",
142 fCore != NULL ? fCore->ID() : -1);
143 if (fCore != NULL && HasCacheExpired())
144 kprintf("\tcache affinity has expired\n");
148 bool
149 ThreadData::ChooseCoreAndCPU(CoreEntry*& targetCore, CPUEntry*& targetCPU)
151 SCHEDULER_ENTER_FUNCTION();
153 bool rescheduleNeeded = false;
155 if (targetCore == NULL && targetCPU != NULL)
156 targetCore = targetCPU->Core();
157 else if (targetCore != NULL && targetCPU == NULL)
158 targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
159 else if (targetCore == NULL && targetCPU == NULL) {
160 targetCore = _ChooseCore();
161 targetCPU = _ChooseCPU(targetCore, rescheduleNeeded);
164 ASSERT(targetCore != NULL);
165 ASSERT(targetCPU != NULL);
167 if (fCore != targetCore) {
168 fLoadMeasurementEpoch = targetCore->LoadMeasurementEpoch() - 1;
169 if (fReady) {
170 if (fCore != NULL)
171 fCore->RemoveLoad(fNeededLoad, true);
172 targetCore->AddLoad(fNeededLoad, fLoadMeasurementEpoch, true);
176 fCore = targetCore;
177 return rescheduleNeeded;
181 bigtime_t
182 ThreadData::ComputeQuantum() const
184 SCHEDULER_ENTER_FUNCTION();
186 if (IsRealTime())
187 return fBaseQuantum;
189 int32 threadCount = fCore->ThreadCount();
190 if (fCore->CPUCount() > 0)
191 threadCount /= fCore->CPUCount();
193 bigtime_t quantum = fBaseQuantum;
194 if (threadCount < kMaximumQuantumLengthsCount)
195 quantum = std::min(sMaximumQuantumLengths[threadCount], quantum);
196 return quantum;
200 void
201 ThreadData::UnassignCore(bool running)
203 SCHEDULER_ENTER_FUNCTION();
205 ASSERT(fCore != NULL);
206 if (running || fThread->state == B_THREAD_READY)
207 fReady = false;
208 if (!fReady)
209 fCore = NULL;
213 /* static */ void
214 ThreadData::ComputeQuantumLengths()
216 SCHEDULER_ENTER_FUNCTION();
218 for (int32 priority = 0; priority <= THREAD_MAX_SET_PRIORITY; priority++) {
219 const bigtime_t kQuantum0 = gCurrentMode->base_quantum;
220 if (priority >= B_URGENT_DISPLAY_PRIORITY) {
221 sQuantumLengths[priority] = kQuantum0;
222 continue;
225 const bigtime_t kQuantum1
226 = kQuantum0 * gCurrentMode->quantum_multipliers[0];
227 if (priority > B_NORMAL_PRIORITY) {
228 sQuantumLengths[priority] = _ScaleQuantum(kQuantum1, kQuantum0,
229 B_URGENT_DISPLAY_PRIORITY, B_NORMAL_PRIORITY, priority);
230 continue;
233 const bigtime_t kQuantum2
234 = kQuantum0 * gCurrentMode->quantum_multipliers[1];
235 sQuantumLengths[priority] = _ScaleQuantum(kQuantum2, kQuantum1,
236 B_NORMAL_PRIORITY, B_IDLE_PRIORITY, priority);
239 for (int32 threadCount = 0; threadCount < kMaximumQuantumLengthsCount;
240 threadCount++) {
242 bigtime_t quantum = gCurrentMode->maximum_latency;
243 if (threadCount != 0)
244 quantum /= threadCount;
245 quantum = std::max(quantum, gCurrentMode->minimal_quantum);
246 sMaximumQuantumLengths[threadCount] = quantum;
251 inline int32
252 ThreadData::_GetPenalty() const
254 SCHEDULER_ENTER_FUNCTION();
255 return fPriorityPenalty;
259 void
260 ThreadData::_ComputeNeededLoad()
262 SCHEDULER_ENTER_FUNCTION();
263 ASSERT(!IsIdle());
265 int32 oldLoad = compute_load(fLastMeasureAvailableTime,
266 fMeasureAvailableActiveTime, fNeededLoad, fMeasureAvailableTime);
267 if (oldLoad < 0 || oldLoad == fNeededLoad)
268 return;
270 fCore->ChangeLoad(fNeededLoad - oldLoad);
274 void
275 ThreadData::_ComputeEffectivePriority() const
277 SCHEDULER_ENTER_FUNCTION();
279 if (IsIdle())
280 fEffectivePriority = B_IDLE_PRIORITY;
281 else if (IsRealTime())
282 fEffectivePriority = GetPriority();
283 else {
284 fEffectivePriority = GetPriority();
285 fEffectivePriority -= _GetPenalty();
286 if (fEffectivePriority > 0)
287 fEffectivePriority -= fAdditionalPenalty % fEffectivePriority;
289 ASSERT(fEffectivePriority < B_FIRST_REAL_TIME_PRIORITY);
290 ASSERT(fEffectivePriority >= B_LOWEST_ACTIVE_PRIORITY);
293 fBaseQuantum = sQuantumLengths[GetEffectivePriority()];
297 /* static */ bigtime_t
298 ThreadData::_ScaleQuantum(bigtime_t maxQuantum, bigtime_t minQuantum,
299 int32 maxPriority, int32 minPriority, int32 priority)
301 SCHEDULER_ENTER_FUNCTION();
303 ASSERT(priority <= maxPriority);
304 ASSERT(priority >= minPriority);
306 bigtime_t result = (maxQuantum - minQuantum) * (priority - minPriority);
307 result /= maxPriority - minPriority;
308 return maxQuantum - result;
312 ThreadProcessing::~ThreadProcessing()