2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
5 #ifndef KERNEL_SCHEDULER_CPU_H
6 #define KERNEL_SCHEDULER_CPU_H
12 #include <util/AutoLock.h>
13 #include <util/Heap.h>
14 #include <util/MinMaxHeap.h>
19 #include "scheduler_common.h"
20 #include "scheduler_modes.h"
21 #include "scheduler_profiler.h"
30 class ThreadProcessing
;
36 // The run queues. Holds the threads ready to run ordered by priority.
37 // One queue per schedulable target per core. Additionally, each
38 // logical processor has its sPinnedRunQueues used for scheduling
40 class ThreadRunQueue
: public RunQueue
<ThreadData
, THREAD_MAX_SET_PRIORITY
> {
45 class CPUEntry
: public HeapLinkImpl
<CPUEntry
, int32
> {
49 void Init(int32 id
, CoreEntry
* core
);
51 inline int32
ID() const { return fCPUNumber
; }
52 inline CoreEntry
* Core() const { return fCore
; }
57 inline void EnterScheduler();
58 inline void ExitScheduler();
60 inline void LockScheduler();
61 inline void UnlockScheduler();
63 inline void LockRunQueue();
64 inline void UnlockRunQueue();
66 void PushFront(ThreadData
* thread
,
68 void PushBack(ThreadData
* thread
,
70 void Remove(ThreadData
* thread
);
71 inline ThreadData
* PeekThread() const;
72 ThreadData
* PeekIdleThread() const;
74 void UpdatePriority(int32 priority
);
76 inline int32
GetLoad() const { return fLoad
; }
79 ThreadData
* ChooseNextThread(ThreadData
* oldThread
,
82 void TrackActivity(ThreadData
* oldThreadData
,
83 ThreadData
* nextThreadData
);
85 void StartQuantumTimer(ThreadData
* thread
,
88 static inline CPUEntry
* GetCPU(int32 cpu
);
91 void _RequestPerformanceLevel(
92 ThreadData
* threadData
);
94 static int32
_RescheduleEvent(timer
* /* unused */);
95 static int32
_UpdateLoadEvent(timer
* /* unused */);
100 rw_spinlock fSchedulerModeLock
;
102 ThreadRunQueue fRunQueue
;
107 bigtime_t fMeasureActiveTime
;
108 bigtime_t fMeasureTime
;
110 bool fUpdateLoadEvent
;
112 friend class DebugDumper
;
115 class CPUPriorityHeap
: public Heap
<CPUEntry
, int32
> {
117 CPUPriorityHeap() { }
118 CPUPriorityHeap(int32 cpuCount
);
123 class CoreEntry
: public MinMaxHeapLinkImpl
<CoreEntry
, int32
>,
124 public DoublyLinkedListLinkImpl
<CoreEntry
> {
128 void Init(int32 id
, PackageEntry
* package
);
130 inline int32
ID() const { return fCoreID
; }
131 inline PackageEntry
* Package() const { return fPackage
; }
132 inline int32
CPUCount() const
133 { return fCPUCount
; }
135 inline void LockCPUHeap();
136 inline void UnlockCPUHeap();
138 inline CPUPriorityHeap
* CPUHeap();
140 inline int32
ThreadCount() const;
142 inline void LockRunQueue();
143 inline void UnlockRunQueue();
145 void PushFront(ThreadData
* thread
,
147 void PushBack(ThreadData
* thread
,
149 void Remove(ThreadData
* thread
);
150 inline ThreadData
* PeekThread() const;
152 inline bigtime_t
GetActiveTime() const;
153 inline void IncreaseActiveTime(
154 bigtime_t activeTime
);
156 inline int32
GetLoad() const;
157 inline uint32
LoadMeasurementEpoch() const
158 { return fLoadMeasurementEpoch
; }
160 inline void AddLoad(int32 load
, uint32 epoch
,
162 inline uint32
RemoveLoad(int32 load
, bool force
);
163 inline void ChangeLoad(int32 delta
);
165 inline void CPUGoesIdle(CPUEntry
* cpu
);
166 inline void CPUWakesUp(CPUEntry
* cpu
);
168 void AddCPU(CPUEntry
* cpu
);
169 void RemoveCPU(CPUEntry
* cpu
,
171 threadPostProcessing
);
173 static inline CoreEntry
* GetCore(int32 cpu
);
176 void _UpdateLoad(bool forceUpdate
= false);
178 static void _UnassignThread(Thread
* thread
,
182 PackageEntry
* fPackage
;
186 CPUPriorityHeap fCPUHeap
;
190 ThreadRunQueue fRunQueue
;
193 bigtime_t fActiveTime
;
194 mutable seqlock fActiveTimeLock
;
198 uint32 fLoadMeasurementEpoch
;
200 bigtime_t fLastLoadUpdate
;
201 rw_spinlock fLoadLock
;
203 friend class DebugDumper
;
206 class CoreLoadHeap
: public MinMaxHeap
<CoreEntry
, int32
> {
209 CoreLoadHeap(int32 coreCount
);
214 // gPackageEntries are used to decide which core should be woken up from the
215 // idle state. When aiming for performance we should use as many packages as
216 // possible with as little cores active in each package as possible (so that the
217 // package can enter any boost mode if it has one and the active core have more
218 // of the shared cache for themselves. If power saving is the main priority we
219 // should keep active cores on as little packages as possible (so that other
220 // packages can go to the deep state of sleep). The heap stores only packages
221 // with at least one core active and one core idle. The packages with all cores
222 // idle are stored in gPackageIdleList (in LIFO manner).
223 class PackageEntry
: public DoublyLinkedListLinkImpl
<PackageEntry
> {
229 inline void CoreGoesIdle(CoreEntry
* core
);
230 inline void CoreWakesUp(CoreEntry
* core
);
232 inline CoreEntry
* GetIdleCore() const;
234 void AddIdleCore(CoreEntry
* core
);
235 void RemoveIdleCore(CoreEntry
* core
);
237 static inline PackageEntry
* GetMostIdlePackage();
238 static inline PackageEntry
* GetLeastIdlePackage();
243 DoublyLinkedList
<CoreEntry
> fIdleCores
;
244 int32 fIdleCoreCount
;
246 rw_spinlock fCoreLock
;
248 friend class DebugDumper
;
250 typedef DoublyLinkedList
<PackageEntry
> IdlePackageList
;
252 extern CPUEntry
* gCPUEntries
;
254 extern CoreEntry
* gCoreEntries
;
255 extern CoreLoadHeap gCoreLoadHeap
;
256 extern CoreLoadHeap gCoreHighLoadHeap
;
257 extern rw_spinlock gCoreHeapsLock
;
258 extern int32 gCoreCount
;
260 extern PackageEntry
* gPackageEntries
;
261 extern IdlePackageList gIdlePackageList
;
262 extern rw_spinlock gIdlePackageLock
;
263 extern int32 gPackageCount
;
267 CPUEntry::EnterScheduler()
269 SCHEDULER_ENTER_FUNCTION();
270 acquire_read_spinlock(&fSchedulerModeLock
);
275 CPUEntry::ExitScheduler()
277 SCHEDULER_ENTER_FUNCTION();
278 release_read_spinlock(&fSchedulerModeLock
);
283 CPUEntry::LockScheduler()
285 SCHEDULER_ENTER_FUNCTION();
286 acquire_write_spinlock(&fSchedulerModeLock
);
291 CPUEntry::UnlockScheduler()
293 SCHEDULER_ENTER_FUNCTION();
294 release_write_spinlock(&fSchedulerModeLock
);
299 CPUEntry::LockRunQueue()
301 SCHEDULER_ENTER_FUNCTION();
302 acquire_spinlock(&fQueueLock
);
307 CPUEntry::UnlockRunQueue()
309 SCHEDULER_ENTER_FUNCTION();
310 release_spinlock(&fQueueLock
);
314 /* static */ inline CPUEntry
*
315 CPUEntry::GetCPU(int32 cpu
)
317 SCHEDULER_ENTER_FUNCTION();
318 return &gCPUEntries
[cpu
];
323 CoreEntry::LockCPUHeap()
325 SCHEDULER_ENTER_FUNCTION();
326 acquire_spinlock(&fCPULock
);
331 CoreEntry::UnlockCPUHeap()
333 SCHEDULER_ENTER_FUNCTION();
334 release_spinlock(&fCPULock
);
338 inline CPUPriorityHeap
*
341 SCHEDULER_ENTER_FUNCTION();
347 CoreEntry::ThreadCount() const
349 SCHEDULER_ENTER_FUNCTION();
350 return fThreadCount
+ fCPUCount
- fIdleCPUCount
;
355 CoreEntry::LockRunQueue()
357 SCHEDULER_ENTER_FUNCTION();
358 acquire_spinlock(&fQueueLock
);
363 CoreEntry::UnlockRunQueue()
365 SCHEDULER_ENTER_FUNCTION();
366 release_spinlock(&fQueueLock
);
371 CoreEntry::IncreaseActiveTime(bigtime_t activeTime
)
373 SCHEDULER_ENTER_FUNCTION();
374 WriteSequentialLocker
_(fActiveTimeLock
);
375 fActiveTime
+= activeTime
;
380 CoreEntry::GetActiveTime() const
382 SCHEDULER_ENTER_FUNCTION();
384 bigtime_t activeTime
;
387 count
= acquire_read_seqlock(&fActiveTimeLock
);
388 activeTime
= fActiveTime
;
389 } while (!release_read_seqlock(&fActiveTimeLock
, count
));
395 CoreEntry::GetLoad() const
397 SCHEDULER_ENTER_FUNCTION();
399 ASSERT(fCPUCount
> 0);
400 return fLoad
/ fCPUCount
;
405 CoreEntry::AddLoad(int32 load
, uint32 epoch
, bool updateLoad
)
407 SCHEDULER_ENTER_FUNCTION();
409 ASSERT(gTrackCoreLoad
);
410 ASSERT(load
>= 0 && load
<= kMaxLoad
);
412 ReadSpinLocker
locker(fLoadLock
);
413 atomic_add(&fCurrentLoad
, load
);
414 if (fLoadMeasurementEpoch
!= epoch
)
415 atomic_add(&fLoad
, load
);
424 CoreEntry::RemoveLoad(int32 load
, bool force
)
426 SCHEDULER_ENTER_FUNCTION();
428 ASSERT(gTrackCoreLoad
);
429 ASSERT(load
>= 0 && load
<= kMaxLoad
);
431 ReadSpinLocker
locker(fLoadLock
);
432 atomic_add(&fCurrentLoad
, -load
);
434 atomic_add(&fLoad
, -load
);
439 return fLoadMeasurementEpoch
;
444 CoreEntry::ChangeLoad(int32 delta
)
446 SCHEDULER_ENTER_FUNCTION();
448 ASSERT(gTrackCoreLoad
);
449 ASSERT(delta
>= -kMaxLoad
&& delta
<= kMaxLoad
);
452 ReadSpinLocker
locker(fLoadLock
);
453 atomic_add(&fCurrentLoad
, delta
);
454 atomic_add(&fLoad
, delta
);
461 /* PackageEntry::CoreGoesIdle and PackageEntry::CoreWakesUp have to be defined
462 before CoreEntry::CPUGoesIdle and CoreEntry::CPUWakesUp. If they weren't
463 GCC2 wouldn't inline them as, apparently, it doesn't do enough optimization
467 PackageEntry::CoreGoesIdle(CoreEntry
* core
)
469 SCHEDULER_ENTER_FUNCTION();
471 WriteSpinLocker
_(fCoreLock
);
473 ASSERT(fIdleCoreCount
>= 0);
474 ASSERT(fIdleCoreCount
< fCoreCount
);
477 fIdleCores
.Add(core
);
479 if (fIdleCoreCount
== fCoreCount
) {
481 WriteSpinLocker
_(gIdlePackageLock
);
482 gIdlePackageList
.Add(this);
488 PackageEntry::CoreWakesUp(CoreEntry
* core
)
490 SCHEDULER_ENTER_FUNCTION();
492 WriteSpinLocker
_(fCoreLock
);
494 ASSERT(fIdleCoreCount
> 0);
495 ASSERT(fIdleCoreCount
<= fCoreCount
);
498 fIdleCores
.Remove(core
);
500 if (fIdleCoreCount
+ 1 == fCoreCount
) {
502 WriteSpinLocker
_(gIdlePackageLock
);
503 gIdlePackageList
.Remove(this);
509 CoreEntry::CPUGoesIdle(CPUEntry
* /* cpu */)
514 ASSERT(fIdleCPUCount
< fCPUCount
);
515 if (++fIdleCPUCount
== fCPUCount
)
516 fPackage
->CoreGoesIdle(this);
521 CoreEntry::CPUWakesUp(CPUEntry
* /* cpu */)
526 ASSERT(fIdleCPUCount
> 0);
527 if (fIdleCPUCount
-- == fCPUCount
)
528 fPackage
->CoreWakesUp(this);
532 /* static */ inline CoreEntry
*
533 CoreEntry::GetCore(int32 cpu
)
535 SCHEDULER_ENTER_FUNCTION();
536 return gCPUEntries
[cpu
].Core();
541 PackageEntry::GetIdleCore() const
543 SCHEDULER_ENTER_FUNCTION();
544 return fIdleCores
.Last();
548 /* static */ inline PackageEntry
*
549 PackageEntry::GetMostIdlePackage()
551 SCHEDULER_ENTER_FUNCTION();
553 PackageEntry
* current
= &gPackageEntries
[0];
554 for (int32 i
= 1; i
< gPackageCount
; i
++) {
555 if (gPackageEntries
[i
].fIdleCoreCount
> current
->fIdleCoreCount
)
556 current
= &gPackageEntries
[i
];
559 if (current
->fIdleCoreCount
== 0)
566 /* static */ inline PackageEntry
*
567 PackageEntry::GetLeastIdlePackage()
569 SCHEDULER_ENTER_FUNCTION();
571 PackageEntry
* package
= NULL
;
573 for (int32 i
= 0; i
< gPackageCount
; i
++) {
574 PackageEntry
* current
= &gPackageEntries
[i
];
576 int32 currentIdleCoreCount
= current
->fIdleCoreCount
;
577 if (currentIdleCoreCount
!= 0 && (package
== NULL
578 || currentIdleCoreCount
< package
->fIdleCoreCount
)) {
587 } // namespace Scheduler
590 #endif // KERNEL_SCHEDULER_CPU_H