libroot/posix/stdio: Remove unused portions.
[haiku.git] / src / system / kernel / scheduler / scheduler_cpu.cpp
blobfa2688be35ffb9a4a32585c30b274967eacce4c3
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
7 #include "scheduler_cpu.h"
9 #include <util/AutoLock.h>
11 #include <algorithm>
13 #include "scheduler_thread.h"
16 namespace Scheduler {
19 CPUEntry* gCPUEntries;
21 CoreEntry* gCoreEntries;
22 CoreLoadHeap gCoreLoadHeap;
23 CoreLoadHeap gCoreHighLoadHeap;
24 rw_spinlock gCoreHeapsLock = B_RW_SPINLOCK_INITIALIZER;
25 int32 gCoreCount;
27 PackageEntry* gPackageEntries;
28 IdlePackageList gIdlePackageList;
29 rw_spinlock gIdlePackageLock = B_RW_SPINLOCK_INITIALIZER;
30 int32 gPackageCount;
33 } // namespace Scheduler
35 using namespace Scheduler;
38 class Scheduler::DebugDumper {
39 public:
40 static void DumpCPURunQueue(CPUEntry* cpu);
41 static void DumpCoreRunQueue(CoreEntry* core);
42 static void DumpCoreLoadHeapEntry(CoreEntry* core);
43 static void DumpIdleCoresInPackage(PackageEntry* package);
45 private:
46 struct CoreThreadsData {
47 CoreEntry* fCore;
48 int32 fLoad;
51 static void _AnalyzeCoreThreads(Thread* thread, void* data);
55 static CPUPriorityHeap sDebugCPUHeap;
56 static CoreLoadHeap sDebugCoreHeap;
59 void
60 ThreadRunQueue::Dump() const
62 ThreadRunQueue::ConstIterator iterator = GetConstIterator();
63 if (!iterator.HasNext())
64 kprintf("Run queue is empty.\n");
65 else {
66 kprintf("thread id priority penalty name\n");
67 while (iterator.HasNext()) {
68 ThreadData* threadData = iterator.Next();
69 Thread* thread = threadData->GetThread();
71 kprintf("%p %-7" B_PRId32 " %-8" B_PRId32 " %-8" B_PRId32 " %s\n",
72 thread, thread->id, thread->priority,
73 thread->priority - threadData->GetEffectivePriority(),
74 thread->name);
80 CPUEntry::CPUEntry()
82 fLoad(0),
83 fMeasureActiveTime(0),
84 fMeasureTime(0),
85 fUpdateLoadEvent(false)
87 B_INITIALIZE_RW_SPINLOCK(&fSchedulerModeLock);
88 B_INITIALIZE_SPINLOCK(&fQueueLock);
92 void
93 CPUEntry::Init(int32 id, CoreEntry* core)
95 fCPUNumber = id;
96 fCore = core;
100 void
101 CPUEntry::Start()
103 fLoad = 0;
104 fCore->AddCPU(this);
108 void
109 CPUEntry::Stop()
111 cpu_ent* entry = &gCPU[fCPUNumber];
113 // get rid of irqs
114 SpinLocker locker(entry->irqs_lock);
115 irq_assignment* irq
116 = (irq_assignment*)list_get_first_item(&entry->irqs);
117 while (irq != NULL) {
118 locker.Unlock();
120 assign_io_interrupt_to_cpu(irq->irq, -1);
122 locker.Lock();
123 irq = (irq_assignment*)list_get_first_item(&entry->irqs);
125 locker.Unlock();
129 void
130 CPUEntry::PushFront(ThreadData* thread, int32 priority)
132 SCHEDULER_ENTER_FUNCTION();
133 fRunQueue.PushFront(thread, priority);
137 void
138 CPUEntry::PushBack(ThreadData* thread, int32 priority)
140 SCHEDULER_ENTER_FUNCTION();
141 fRunQueue.PushBack(thread, priority);
145 void
146 CPUEntry::Remove(ThreadData* thread)
148 SCHEDULER_ENTER_FUNCTION();
149 ASSERT(thread->IsEnqueued());
150 thread->SetDequeued();
151 fRunQueue.Remove(thread);
155 inline ThreadData*
156 CoreEntry::PeekThread() const
158 SCHEDULER_ENTER_FUNCTION();
159 return fRunQueue.PeekMaximum();
163 inline ThreadData*
164 CPUEntry::PeekThread() const
166 SCHEDULER_ENTER_FUNCTION();
167 return fRunQueue.PeekMaximum();
171 ThreadData*
172 CPUEntry::PeekIdleThread() const
174 SCHEDULER_ENTER_FUNCTION();
175 return fRunQueue.GetHead(B_IDLE_PRIORITY);
179 void
180 CPUEntry::UpdatePriority(int32 priority)
182 SCHEDULER_ENTER_FUNCTION();
184 ASSERT(!gCPU[fCPUNumber].disabled);
186 int32 oldPriority = CPUPriorityHeap::GetKey(this);
187 if (oldPriority == priority)
188 return;
189 fCore->CPUHeap()->ModifyKey(this, priority);
191 if (oldPriority == B_IDLE_PRIORITY)
192 fCore->CPUWakesUp(this);
193 else if (priority == B_IDLE_PRIORITY)
194 fCore->CPUGoesIdle(this);
198 void
199 CPUEntry::ComputeLoad()
201 SCHEDULER_ENTER_FUNCTION();
203 ASSERT(gTrackCPULoad);
204 ASSERT(!gCPU[fCPUNumber].disabled);
205 ASSERT(fCPUNumber == smp_get_current_cpu());
207 int oldLoad = compute_load(fMeasureTime, fMeasureActiveTime, fLoad,
208 system_time());
209 if (oldLoad < 0)
210 return;
212 if (fLoad > kVeryHighLoad)
213 gCurrentMode->rebalance_irqs(false);
217 ThreadData*
218 CPUEntry::ChooseNextThread(ThreadData* oldThread, bool putAtBack)
220 SCHEDULER_ENTER_FUNCTION();
222 int32 oldPriority = -1;
223 if (oldThread != NULL)
224 oldPriority = oldThread->GetEffectivePriority();
226 CPURunQueueLocker cpuLocker(this);
228 ThreadData* pinnedThread = fRunQueue.PeekMaximum();
229 int32 pinnedPriority = -1;
230 if (pinnedThread != NULL)
231 pinnedPriority = pinnedThread->GetEffectivePriority();
233 CoreRunQueueLocker coreLocker(fCore);
235 ThreadData* sharedThread = fCore->PeekThread();
236 ASSERT(sharedThread != NULL || pinnedThread != NULL || oldThread != NULL);
238 int32 sharedPriority = -1;
239 if (sharedThread != NULL)
240 sharedPriority = sharedThread->GetEffectivePriority();
242 int32 rest = std::max(pinnedPriority, sharedPriority);
243 if (oldPriority > rest || (!putAtBack && oldPriority == rest))
244 return oldThread;
246 if (sharedPriority > pinnedPriority) {
247 fCore->Remove(sharedThread);
248 return sharedThread;
251 coreLocker.Unlock();
253 Remove(pinnedThread);
254 return pinnedThread;
258 void
259 CPUEntry::TrackActivity(ThreadData* oldThreadData, ThreadData* nextThreadData)
261 SCHEDULER_ENTER_FUNCTION();
263 cpu_ent* cpuEntry = &gCPU[fCPUNumber];
265 Thread* oldThread = oldThreadData->GetThread();
266 if (!thread_is_idle_thread(oldThread)) {
267 bigtime_t active
268 = (oldThread->kernel_time - cpuEntry->last_kernel_time)
269 + (oldThread->user_time - cpuEntry->last_user_time);
271 WriteSequentialLocker locker(cpuEntry->active_time_lock);
272 cpuEntry->active_time += active;
273 locker.Unlock();
275 fMeasureActiveTime += active;
276 fCore->IncreaseActiveTime(active);
278 oldThreadData->UpdateActivity(active);
281 if (gTrackCPULoad) {
282 if (!cpuEntry->disabled)
283 ComputeLoad();
284 _RequestPerformanceLevel(nextThreadData);
287 Thread* nextThread = nextThreadData->GetThread();
288 if (!thread_is_idle_thread(nextThread)) {
289 cpuEntry->last_kernel_time = nextThread->kernel_time;
290 cpuEntry->last_user_time = nextThread->user_time;
292 nextThreadData->SetLastInterruptTime(cpuEntry->interrupt_time);
297 void
298 CPUEntry::StartQuantumTimer(ThreadData* thread, bool wasPreempted)
300 cpu_ent* cpu = &gCPU[ID()];
302 if (!wasPreempted || fUpdateLoadEvent)
303 cancel_timer(&cpu->quantum_timer);
304 fUpdateLoadEvent = false;
306 if (!thread->IsIdle()) {
307 bigtime_t quantum = thread->GetQuantumLeft();
308 add_timer(&cpu->quantum_timer, &CPUEntry::_RescheduleEvent, quantum,
309 B_ONE_SHOT_RELATIVE_TIMER);
310 } else if (gTrackCoreLoad) {
311 add_timer(&cpu->quantum_timer, &CPUEntry::_UpdateLoadEvent,
312 kLoadMeasureInterval * 2, B_ONE_SHOT_RELATIVE_TIMER);
313 fUpdateLoadEvent = true;
318 void
319 CPUEntry::_RequestPerformanceLevel(ThreadData* threadData)
321 SCHEDULER_ENTER_FUNCTION();
323 if (gCPU[fCPUNumber].disabled) {
324 decrease_cpu_performance(kCPUPerformanceScaleMax);
325 return;
328 int32 load = std::max(threadData->GetLoad(), fCore->GetLoad());
329 ASSERT(load >= 0 && load <= kMaxLoad);
331 if (load < kTargetLoad) {
332 int32 delta = kTargetLoad - load;
334 delta *= kTargetLoad;
335 delta /= kCPUPerformanceScaleMax;
337 decrease_cpu_performance(delta);
338 } else {
339 int32 delta = load - kTargetLoad;
340 delta *= kMaxLoad - kTargetLoad;
341 delta /= kCPUPerformanceScaleMax;
343 increase_cpu_performance(delta);
348 /* static */ int32
349 CPUEntry::_RescheduleEvent(timer* /* unused */)
351 get_cpu_struct()->invoke_scheduler = true;
352 get_cpu_struct()->preempted = true;
353 return B_HANDLED_INTERRUPT;
357 /* static */ int32
358 CPUEntry::_UpdateLoadEvent(timer* /* unused */)
360 CoreEntry::GetCore(smp_get_current_cpu())->ChangeLoad(0);
361 CPUEntry::GetCPU(smp_get_current_cpu())->fUpdateLoadEvent = false;
362 return B_HANDLED_INTERRUPT;
366 CPUPriorityHeap::CPUPriorityHeap(int32 cpuCount)
368 Heap<CPUEntry, int32>(cpuCount)
373 void
374 CPUPriorityHeap::Dump()
376 kprintf("cpu priority load\n");
377 CPUEntry* entry = PeekRoot();
378 while (entry) {
379 int32 cpu = entry->ID();
380 int32 key = GetKey(entry);
381 kprintf("%3" B_PRId32 " %8" B_PRId32 " %3" B_PRId32 "%%\n", cpu, key,
382 entry->GetLoad() / 10);
384 RemoveRoot();
385 sDebugCPUHeap.Insert(entry, key);
387 entry = PeekRoot();
390 entry = sDebugCPUHeap.PeekRoot();
391 while (entry) {
392 int32 key = GetKey(entry);
393 sDebugCPUHeap.RemoveRoot();
394 Insert(entry, key);
395 entry = sDebugCPUHeap.PeekRoot();
400 CoreEntry::CoreEntry()
402 fCPUCount(0),
403 fIdleCPUCount(0),
404 fThreadCount(0),
405 fActiveTime(0),
406 fLoad(0),
407 fCurrentLoad(0),
408 fLoadMeasurementEpoch(0),
409 fHighLoad(false),
410 fLastLoadUpdate(0)
412 B_INITIALIZE_SPINLOCK(&fCPULock);
413 B_INITIALIZE_SPINLOCK(&fQueueLock);
414 B_INITIALIZE_SEQLOCK(&fActiveTimeLock);
415 B_INITIALIZE_RW_SPINLOCK(&fLoadLock);
419 void
420 CoreEntry::Init(int32 id, PackageEntry* package)
422 fCoreID = id;
423 fPackage = package;
427 void
428 CoreEntry::PushFront(ThreadData* thread, int32 priority)
430 SCHEDULER_ENTER_FUNCTION();
432 fRunQueue.PushFront(thread, priority);
433 atomic_add(&fThreadCount, 1);
437 void
438 CoreEntry::PushBack(ThreadData* thread, int32 priority)
440 SCHEDULER_ENTER_FUNCTION();
442 fRunQueue.PushBack(thread, priority);
443 atomic_add(&fThreadCount, 1);
447 void
448 CoreEntry::Remove(ThreadData* thread)
450 SCHEDULER_ENTER_FUNCTION();
452 ASSERT(!thread->IsIdle());
454 ASSERT(thread->IsEnqueued());
455 thread->SetDequeued();
457 fRunQueue.Remove(thread);
458 atomic_add(&fThreadCount, -1);
462 void
463 CoreEntry::AddCPU(CPUEntry* cpu)
465 ASSERT(fCPUCount >= 0);
466 ASSERT(fIdleCPUCount >= 0);
468 fIdleCPUCount++;
469 if (fCPUCount++ == 0) {
470 // core has been reenabled
471 fLoad = 0;
472 fCurrentLoad = 0;
473 fHighLoad = false;
474 gCoreLoadHeap.Insert(this, 0);
476 fPackage->AddIdleCore(this);
479 fCPUHeap.Insert(cpu, B_IDLE_PRIORITY);
483 void
484 CoreEntry::RemoveCPU(CPUEntry* cpu, ThreadProcessing& threadPostProcessing)
486 ASSERT(fCPUCount > 0);
487 ASSERT(fIdleCPUCount > 0);
489 fIdleCPUCount--;
490 if (--fCPUCount == 0) {
491 // unassign threads
492 thread_map(CoreEntry::_UnassignThread, this);
494 // core has been disabled
495 if (fHighLoad) {
496 gCoreHighLoadHeap.ModifyKey(this, -1);
497 ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
498 gCoreHighLoadHeap.RemoveMinimum();
499 } else {
500 gCoreLoadHeap.ModifyKey(this, -1);
501 ASSERT(gCoreLoadHeap.PeekMinimum() == this);
502 gCoreLoadHeap.RemoveMinimum();
505 fPackage->RemoveIdleCore(this);
507 // get rid of threads
508 while (fRunQueue.PeekMaximum() != NULL) {
509 ThreadData* threadData = fRunQueue.PeekMaximum();
511 Remove(threadData);
513 ASSERT(threadData->Core() == NULL);
514 threadPostProcessing(threadData);
517 fThreadCount = 0;
520 fCPUHeap.ModifyKey(cpu, -1);
521 ASSERT(fCPUHeap.PeekRoot() == cpu);
522 fCPUHeap.RemoveRoot();
524 ASSERT(cpu->GetLoad() >= 0 && cpu->GetLoad() <= kMaxLoad);
525 ASSERT(fLoad >= 0);
529 void
530 CoreEntry::_UpdateLoad(bool forceUpdate)
532 SCHEDULER_ENTER_FUNCTION();
534 if (fCPUCount <= 0)
535 return;
537 bigtime_t now = system_time();
538 bool intervalEnded = now >= kLoadMeasureInterval + fLastLoadUpdate;
539 bool intervalSkipped = now >= kLoadMeasureInterval * 2 + fLastLoadUpdate;
541 if (!intervalEnded && !forceUpdate)
542 return;
544 WriteSpinLocker coreLocker(gCoreHeapsLock);
546 int32 newKey;
547 if (intervalEnded) {
548 WriteSpinLocker locker(fLoadLock);
550 newKey = intervalSkipped ? fCurrentLoad : GetLoad();
552 ASSERT(fCurrentLoad >= 0);
553 ASSERT(fLoad >= fCurrentLoad);
555 fLoad = fCurrentLoad;
556 fLoadMeasurementEpoch++;
557 fLastLoadUpdate = now;
558 } else
559 newKey = GetLoad();
561 int32 oldKey = CoreLoadHeap::GetKey(this);
563 ASSERT(oldKey >= 0);
564 ASSERT(newKey >= 0);
566 if (oldKey == newKey)
567 return;
569 if (newKey > kHighLoad) {
570 if (!fHighLoad) {
571 gCoreLoadHeap.ModifyKey(this, -1);
572 ASSERT(gCoreLoadHeap.PeekMinimum() == this);
573 gCoreLoadHeap.RemoveMinimum();
575 gCoreHighLoadHeap.Insert(this, newKey);
577 fHighLoad = true;
578 } else
579 gCoreHighLoadHeap.ModifyKey(this, newKey);
580 } else if (newKey < kMediumLoad) {
581 if (fHighLoad) {
582 gCoreHighLoadHeap.ModifyKey(this, -1);
583 ASSERT(gCoreHighLoadHeap.PeekMinimum() == this);
584 gCoreHighLoadHeap.RemoveMinimum();
586 gCoreLoadHeap.Insert(this, newKey);
588 fHighLoad = false;
589 } else
590 gCoreLoadHeap.ModifyKey(this, newKey);
591 } else {
592 if (fHighLoad)
593 gCoreHighLoadHeap.ModifyKey(this, newKey);
594 else
595 gCoreLoadHeap.ModifyKey(this, newKey);
600 /* static */ void
601 CoreEntry::_UnassignThread(Thread* thread, void* data)
603 CoreEntry* core = static_cast<CoreEntry*>(data);
604 ThreadData* threadData = thread->scheduler_data;
606 if (threadData->Core() == core && thread->pinned_to_cpu == 0)
607 threadData->UnassignCore();
611 CoreLoadHeap::CoreLoadHeap(int32 coreCount)
613 MinMaxHeap<CoreEntry, int32>(coreCount)
618 void
619 CoreLoadHeap::Dump()
621 CoreEntry* entry = PeekMinimum();
622 while (entry) {
623 int32 key = GetKey(entry);
625 DebugDumper::DumpCoreLoadHeapEntry(entry);
627 RemoveMinimum();
628 sDebugCoreHeap.Insert(entry, key);
630 entry = PeekMinimum();
633 entry = sDebugCoreHeap.PeekMinimum();
634 while (entry) {
635 int32 key = GetKey(entry);
636 sDebugCoreHeap.RemoveMinimum();
637 Insert(entry, key);
638 entry = sDebugCoreHeap.PeekMinimum();
643 PackageEntry::PackageEntry()
645 fIdleCoreCount(0),
646 fCoreCount(0)
648 B_INITIALIZE_RW_SPINLOCK(&fCoreLock);
652 void
653 PackageEntry::Init(int32 id)
655 fPackageID = id;
659 void
660 PackageEntry::AddIdleCore(CoreEntry* core)
662 fCoreCount++;
663 fIdleCoreCount++;
664 fIdleCores.Add(core);
666 if (fCoreCount == 1)
667 gIdlePackageList.Add(this);
671 void
672 PackageEntry::RemoveIdleCore(CoreEntry* core)
674 fIdleCores.Remove(core);
675 fIdleCoreCount--;
676 fCoreCount--;
678 if (fCoreCount == 0)
679 gIdlePackageList.Remove(this);
683 /* static */ void
684 DebugDumper::DumpCPURunQueue(CPUEntry* cpu)
686 ThreadRunQueue::ConstIterator iterator = cpu->fRunQueue.GetConstIterator();
688 if (iterator.HasNext()
689 && !thread_is_idle_thread(iterator.Next()->GetThread())) {
690 kprintf("\nCPU %" B_PRId32 " run queue:\n", cpu->ID());
691 cpu->fRunQueue.Dump();
696 /* static */ void
697 DebugDumper::DumpCoreRunQueue(CoreEntry* core)
699 core->fRunQueue.Dump();
703 /* static */ void
704 DebugDumper::DumpCoreLoadHeapEntry(CoreEntry* entry)
706 CoreThreadsData threadsData;
707 threadsData.fCore = entry;
708 threadsData.fLoad = 0;
709 thread_map(DebugDumper::_AnalyzeCoreThreads, &threadsData);
711 kprintf("%4" B_PRId32 " %11" B_PRId32 "%% %11" B_PRId32 "%% %11" B_PRId32
712 "%% %7" B_PRId32 " %5" B_PRIu32 "\n", entry->ID(), entry->fLoad / 10,
713 entry->fCurrentLoad / 10, threadsData.fLoad, entry->ThreadCount(),
714 entry->fLoadMeasurementEpoch);
718 /* static */ void
719 DebugDumper::DumpIdleCoresInPackage(PackageEntry* package)
721 kprintf("%-7" B_PRId32 " ", package->fPackageID);
723 DoublyLinkedList<CoreEntry>::ReverseIterator iterator
724 = package->fIdleCores.GetReverseIterator();
725 if (iterator.HasNext()) {
726 while (iterator.HasNext()) {
727 CoreEntry* coreEntry = iterator.Next();
728 kprintf("%" B_PRId32 "%s", coreEntry->ID(),
729 iterator.HasNext() ? ", " : "");
731 } else
732 kprintf("-");
733 kprintf("\n");
737 /* static */ void
738 DebugDumper::_AnalyzeCoreThreads(Thread* thread, void* data)
740 CoreThreadsData* threadsData = static_cast<CoreThreadsData*>(data);
741 if (thread->scheduler_data->Core() == threadsData->fCore)
742 threadsData->fLoad += thread->scheduler_data->GetLoad();
746 static int
747 dump_run_queue(int /* argc */, char** /* argv */)
749 int32 cpuCount = smp_get_num_cpus();
750 int32 coreCount = gCoreCount;
752 for (int32 i = 0; i < coreCount; i++) {
753 kprintf("%sCore %" B_PRId32 " run queue:\n", i > 0 ? "\n" : "", i);
754 DebugDumper::DumpCoreRunQueue(&gCoreEntries[i]);
757 for (int32 i = 0; i < cpuCount; i++)
758 DebugDumper::DumpCPURunQueue(&gCPUEntries[i]);
760 return 0;
764 static int
765 dump_cpu_heap(int /* argc */, char** /* argv */)
767 kprintf("core average_load current_load threads_load threads epoch\n");
768 gCoreLoadHeap.Dump();
769 kprintf("\n");
770 gCoreHighLoadHeap.Dump();
772 for (int32 i = 0; i < gCoreCount; i++) {
773 if (gCoreEntries[i].CPUCount() < 2)
774 continue;
776 kprintf("\nCore %" B_PRId32 " heap:\n", i);
777 gCoreEntries[i].CPUHeap()->Dump();
780 return 0;
784 static int
785 dump_idle_cores(int /* argc */, char** /* argv */)
787 kprintf("Idle packages:\n");
788 IdlePackageList::ReverseIterator idleIterator
789 = gIdlePackageList.GetReverseIterator();
791 if (idleIterator.HasNext()) {
792 kprintf("package cores\n");
794 while (idleIterator.HasNext())
795 DebugDumper::DumpIdleCoresInPackage(idleIterator.Next());
796 } else
797 kprintf("No idle packages.\n");
799 return 0;
803 void Scheduler::init_debug_commands()
805 new(&sDebugCPUHeap) CPUPriorityHeap(smp_get_num_cpus());
806 new(&sDebugCoreHeap) CoreLoadHeap(smp_get_num_cpus());
808 add_debugger_command_etc("run_queue", &dump_run_queue,
809 "List threads in run queue", "\nLists threads in run queue", 0);
810 if (!gSingleCore) {
811 add_debugger_command_etc("cpu_heap", &dump_cpu_heap,
812 "List CPUs in CPU priority heap",
813 "\nList CPUs in CPU priority heap", 0);
814 add_debugger_command_etc("idle_cores", &dump_idle_cores,
815 "List idle cores", "\nList idle cores", 0);