vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / scheduler / scheduler_cpu.h
blob11a681b707ab1b8b33d68b646292cf9dfada1fdc
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
5 #ifndef KERNEL_SCHEDULER_CPU_H
6 #define KERNEL_SCHEDULER_CPU_H
9 #include <OS.h>
11 #include <thread.h>
12 #include <util/AutoLock.h>
13 #include <util/Heap.h>
14 #include <util/MinMaxHeap.h>
16 #include <cpufreq.h>
18 #include "RunQueue.h"
19 #include "scheduler_common.h"
20 #include "scheduler_modes.h"
21 #include "scheduler_profiler.h"
24 namespace Scheduler {
27 class DebugDumper;
29 struct ThreadData;
30 class ThreadProcessing;
32 struct CPUEntry;
33 struct CoreEntry;
34 struct PackageEntry;
36 // The run queues. Holds the threads ready to run ordered by priority.
37 // One queue per schedulable target per core. Additionally, each
38 // logical processor has its sPinnedRunQueues used for scheduling
39 // pinned threads.
40 class ThreadRunQueue : public RunQueue<ThreadData, THREAD_MAX_SET_PRIORITY> {
41 public:
42 void Dump() const;
45 class CPUEntry : public HeapLinkImpl<CPUEntry, int32> {
46 public:
47 CPUEntry();
49 void Init(int32 id, CoreEntry* core);
51 inline int32 ID() const { return fCPUNumber; }
52 inline CoreEntry* Core() const { return fCore; }
54 void Start();
55 void Stop();
57 inline void EnterScheduler();
58 inline void ExitScheduler();
60 inline void LockScheduler();
61 inline void UnlockScheduler();
63 inline void LockRunQueue();
64 inline void UnlockRunQueue();
66 void PushFront(ThreadData* thread,
67 int32 priority);
68 void PushBack(ThreadData* thread,
69 int32 priority);
70 void Remove(ThreadData* thread);
71 inline ThreadData* PeekThread() const;
72 ThreadData* PeekIdleThread() const;
74 void UpdatePriority(int32 priority);
76 inline int32 GetLoad() const { return fLoad; }
77 void ComputeLoad();
79 ThreadData* ChooseNextThread(ThreadData* oldThread,
80 bool putAtBack);
82 void TrackActivity(ThreadData* oldThreadData,
83 ThreadData* nextThreadData);
85 void StartQuantumTimer(ThreadData* thread,
86 bool wasPreempted);
88 static inline CPUEntry* GetCPU(int32 cpu);
90 private:
91 void _RequestPerformanceLevel(
92 ThreadData* threadData);
94 static int32 _RescheduleEvent(timer* /* unused */);
95 static int32 _UpdateLoadEvent(timer* /* unused */);
97 int32 fCPUNumber;
98 CoreEntry* fCore;
100 rw_spinlock fSchedulerModeLock;
102 ThreadRunQueue fRunQueue;
103 spinlock fQueueLock;
105 int32 fLoad;
107 bigtime_t fMeasureActiveTime;
108 bigtime_t fMeasureTime;
110 bool fUpdateLoadEvent;
112 friend class DebugDumper;
113 } CACHE_LINE_ALIGN;
115 class CPUPriorityHeap : public Heap<CPUEntry, int32> {
116 public:
117 CPUPriorityHeap() { }
118 CPUPriorityHeap(int32 cpuCount);
120 void Dump();
123 class CoreEntry : public MinMaxHeapLinkImpl<CoreEntry, int32>,
124 public DoublyLinkedListLinkImpl<CoreEntry> {
125 public:
126 CoreEntry();
128 void Init(int32 id, PackageEntry* package);
130 inline int32 ID() const { return fCoreID; }
131 inline PackageEntry* Package() const { return fPackage; }
132 inline int32 CPUCount() const
133 { return fCPUCount; }
135 inline void LockCPUHeap();
136 inline void UnlockCPUHeap();
138 inline CPUPriorityHeap* CPUHeap();
140 inline int32 ThreadCount() const;
142 inline void LockRunQueue();
143 inline void UnlockRunQueue();
145 void PushFront(ThreadData* thread,
146 int32 priority);
147 void PushBack(ThreadData* thread,
148 int32 priority);
149 void Remove(ThreadData* thread);
150 inline ThreadData* PeekThread() const;
152 inline bigtime_t GetActiveTime() const;
153 inline void IncreaseActiveTime(
154 bigtime_t activeTime);
156 inline int32 GetLoad() const;
157 inline uint32 LoadMeasurementEpoch() const
158 { return fLoadMeasurementEpoch; }
160 inline void AddLoad(int32 load, uint32 epoch,
161 bool updateLoad);
162 inline uint32 RemoveLoad(int32 load, bool force);
163 inline void ChangeLoad(int32 delta);
165 inline void CPUGoesIdle(CPUEntry* cpu);
166 inline void CPUWakesUp(CPUEntry* cpu);
168 void AddCPU(CPUEntry* cpu);
169 void RemoveCPU(CPUEntry* cpu,
170 ThreadProcessing&
171 threadPostProcessing);
173 static inline CoreEntry* GetCore(int32 cpu);
175 private:
176 void _UpdateLoad(bool forceUpdate = false);
178 static void _UnassignThread(Thread* thread,
179 void* core);
181 int32 fCoreID;
182 PackageEntry* fPackage;
184 int32 fCPUCount;
185 int32 fIdleCPUCount;
186 CPUPriorityHeap fCPUHeap;
187 spinlock fCPULock;
189 int32 fThreadCount;
190 ThreadRunQueue fRunQueue;
191 spinlock fQueueLock;
193 bigtime_t fActiveTime;
194 mutable seqlock fActiveTimeLock;
196 int32 fLoad;
197 int32 fCurrentLoad;
198 uint32 fLoadMeasurementEpoch;
199 bool fHighLoad;
200 bigtime_t fLastLoadUpdate;
201 rw_spinlock fLoadLock;
203 friend class DebugDumper;
204 } CACHE_LINE_ALIGN;
206 class CoreLoadHeap : public MinMaxHeap<CoreEntry, int32> {
207 public:
208 CoreLoadHeap() { }
209 CoreLoadHeap(int32 coreCount);
211 void Dump();
214 // gPackageEntries are used to decide which core should be woken up from the
215 // idle state. When aiming for performance we should use as many packages as
216 // possible with as little cores active in each package as possible (so that the
217 // package can enter any boost mode if it has one and the active core have more
218 // of the shared cache for themselves. If power saving is the main priority we
219 // should keep active cores on as little packages as possible (so that other
220 // packages can go to the deep state of sleep). The heap stores only packages
221 // with at least one core active and one core idle. The packages with all cores
222 // idle are stored in gPackageIdleList (in LIFO manner).
223 class PackageEntry : public DoublyLinkedListLinkImpl<PackageEntry> {
224 public:
225 PackageEntry();
227 void Init(int32 id);
229 inline void CoreGoesIdle(CoreEntry* core);
230 inline void CoreWakesUp(CoreEntry* core);
232 inline CoreEntry* GetIdleCore() const;
234 void AddIdleCore(CoreEntry* core);
235 void RemoveIdleCore(CoreEntry* core);
237 static inline PackageEntry* GetMostIdlePackage();
238 static inline PackageEntry* GetLeastIdlePackage();
240 private:
241 int32 fPackageID;
243 DoublyLinkedList<CoreEntry> fIdleCores;
244 int32 fIdleCoreCount;
245 int32 fCoreCount;
246 rw_spinlock fCoreLock;
248 friend class DebugDumper;
249 } CACHE_LINE_ALIGN;
250 typedef DoublyLinkedList<PackageEntry> IdlePackageList;
252 extern CPUEntry* gCPUEntries;
254 extern CoreEntry* gCoreEntries;
255 extern CoreLoadHeap gCoreLoadHeap;
256 extern CoreLoadHeap gCoreHighLoadHeap;
257 extern rw_spinlock gCoreHeapsLock;
258 extern int32 gCoreCount;
260 extern PackageEntry* gPackageEntries;
261 extern IdlePackageList gIdlePackageList;
262 extern rw_spinlock gIdlePackageLock;
263 extern int32 gPackageCount;
266 inline void
267 CPUEntry::EnterScheduler()
269 SCHEDULER_ENTER_FUNCTION();
270 acquire_read_spinlock(&fSchedulerModeLock);
274 inline void
275 CPUEntry::ExitScheduler()
277 SCHEDULER_ENTER_FUNCTION();
278 release_read_spinlock(&fSchedulerModeLock);
282 inline void
283 CPUEntry::LockScheduler()
285 SCHEDULER_ENTER_FUNCTION();
286 acquire_write_spinlock(&fSchedulerModeLock);
290 inline void
291 CPUEntry::UnlockScheduler()
293 SCHEDULER_ENTER_FUNCTION();
294 release_write_spinlock(&fSchedulerModeLock);
298 inline void
299 CPUEntry::LockRunQueue()
301 SCHEDULER_ENTER_FUNCTION();
302 acquire_spinlock(&fQueueLock);
306 inline void
307 CPUEntry::UnlockRunQueue()
309 SCHEDULER_ENTER_FUNCTION();
310 release_spinlock(&fQueueLock);
314 /* static */ inline CPUEntry*
315 CPUEntry::GetCPU(int32 cpu)
317 SCHEDULER_ENTER_FUNCTION();
318 return &gCPUEntries[cpu];
322 inline void
323 CoreEntry::LockCPUHeap()
325 SCHEDULER_ENTER_FUNCTION();
326 acquire_spinlock(&fCPULock);
330 inline void
331 CoreEntry::UnlockCPUHeap()
333 SCHEDULER_ENTER_FUNCTION();
334 release_spinlock(&fCPULock);
338 inline CPUPriorityHeap*
339 CoreEntry::CPUHeap()
341 SCHEDULER_ENTER_FUNCTION();
342 return &fCPUHeap;
346 inline int32
347 CoreEntry::ThreadCount() const
349 SCHEDULER_ENTER_FUNCTION();
350 return fThreadCount + fCPUCount - fIdleCPUCount;
354 inline void
355 CoreEntry::LockRunQueue()
357 SCHEDULER_ENTER_FUNCTION();
358 acquire_spinlock(&fQueueLock);
362 inline void
363 CoreEntry::UnlockRunQueue()
365 SCHEDULER_ENTER_FUNCTION();
366 release_spinlock(&fQueueLock);
370 inline void
371 CoreEntry::IncreaseActiveTime(bigtime_t activeTime)
373 SCHEDULER_ENTER_FUNCTION();
374 WriteSequentialLocker _(fActiveTimeLock);
375 fActiveTime += activeTime;
379 inline bigtime_t
380 CoreEntry::GetActiveTime() const
382 SCHEDULER_ENTER_FUNCTION();
384 bigtime_t activeTime;
385 uint32 count;
386 do {
387 count = acquire_read_seqlock(&fActiveTimeLock);
388 activeTime = fActiveTime;
389 } while (!release_read_seqlock(&fActiveTimeLock, count));
390 return activeTime;
394 inline int32
395 CoreEntry::GetLoad() const
397 SCHEDULER_ENTER_FUNCTION();
399 ASSERT(fCPUCount > 0);
400 return fLoad / fCPUCount;
404 inline void
405 CoreEntry::AddLoad(int32 load, uint32 epoch, bool updateLoad)
407 SCHEDULER_ENTER_FUNCTION();
409 ASSERT(gTrackCoreLoad);
410 ASSERT(load >= 0 && load <= kMaxLoad);
412 ReadSpinLocker locker(fLoadLock);
413 atomic_add(&fCurrentLoad, load);
414 if (fLoadMeasurementEpoch != epoch)
415 atomic_add(&fLoad, load);
416 locker.Unlock();
418 if (updateLoad)
419 _UpdateLoad(true);
423 inline uint32
424 CoreEntry::RemoveLoad(int32 load, bool force)
426 SCHEDULER_ENTER_FUNCTION();
428 ASSERT(gTrackCoreLoad);
429 ASSERT(load >= 0 && load <= kMaxLoad);
431 ReadSpinLocker locker(fLoadLock);
432 atomic_add(&fCurrentLoad, -load);
433 if (force) {
434 atomic_add(&fLoad, -load);
435 locker.Unlock();
437 _UpdateLoad(true);
439 return fLoadMeasurementEpoch;
443 inline void
444 CoreEntry::ChangeLoad(int32 delta)
446 SCHEDULER_ENTER_FUNCTION();
448 ASSERT(gTrackCoreLoad);
449 ASSERT(delta >= -kMaxLoad && delta <= kMaxLoad);
451 if (delta != 0) {
452 ReadSpinLocker locker(fLoadLock);
453 atomic_add(&fCurrentLoad, delta);
454 atomic_add(&fLoad, delta);
457 _UpdateLoad();
461 /* PackageEntry::CoreGoesIdle and PackageEntry::CoreWakesUp have to be defined
462 before CoreEntry::CPUGoesIdle and CoreEntry::CPUWakesUp. If they weren't
463 GCC2 wouldn't inline them as, apparently, it doesn't do enough optimization
464 passes.
466 inline void
467 PackageEntry::CoreGoesIdle(CoreEntry* core)
469 SCHEDULER_ENTER_FUNCTION();
471 WriteSpinLocker _(fCoreLock);
473 ASSERT(fIdleCoreCount >= 0);
474 ASSERT(fIdleCoreCount < fCoreCount);
476 fIdleCoreCount++;
477 fIdleCores.Add(core);
479 if (fIdleCoreCount == fCoreCount) {
480 // package goes idle
481 WriteSpinLocker _(gIdlePackageLock);
482 gIdlePackageList.Add(this);
487 inline void
488 PackageEntry::CoreWakesUp(CoreEntry* core)
490 SCHEDULER_ENTER_FUNCTION();
492 WriteSpinLocker _(fCoreLock);
494 ASSERT(fIdleCoreCount > 0);
495 ASSERT(fIdleCoreCount <= fCoreCount);
497 fIdleCoreCount--;
498 fIdleCores.Remove(core);
500 if (fIdleCoreCount + 1 == fCoreCount) {
501 // package wakes up
502 WriteSpinLocker _(gIdlePackageLock);
503 gIdlePackageList.Remove(this);
508 inline void
509 CoreEntry::CPUGoesIdle(CPUEntry* /* cpu */)
511 if (gSingleCore)
512 return;
514 ASSERT(fIdleCPUCount < fCPUCount);
515 if (++fIdleCPUCount == fCPUCount)
516 fPackage->CoreGoesIdle(this);
520 inline void
521 CoreEntry::CPUWakesUp(CPUEntry* /* cpu */)
523 if (gSingleCore)
524 return;
526 ASSERT(fIdleCPUCount > 0);
527 if (fIdleCPUCount-- == fCPUCount)
528 fPackage->CoreWakesUp(this);
532 /* static */ inline CoreEntry*
533 CoreEntry::GetCore(int32 cpu)
535 SCHEDULER_ENTER_FUNCTION();
536 return gCPUEntries[cpu].Core();
540 inline CoreEntry*
541 PackageEntry::GetIdleCore() const
543 SCHEDULER_ENTER_FUNCTION();
544 return fIdleCores.Last();
548 /* static */ inline PackageEntry*
549 PackageEntry::GetMostIdlePackage()
551 SCHEDULER_ENTER_FUNCTION();
553 PackageEntry* current = &gPackageEntries[0];
554 for (int32 i = 1; i < gPackageCount; i++) {
555 if (gPackageEntries[i].fIdleCoreCount > current->fIdleCoreCount)
556 current = &gPackageEntries[i];
559 if (current->fIdleCoreCount == 0)
560 return NULL;
562 return current;
566 /* static */ inline PackageEntry*
567 PackageEntry::GetLeastIdlePackage()
569 SCHEDULER_ENTER_FUNCTION();
571 PackageEntry* package = NULL;
573 for (int32 i = 0; i < gPackageCount; i++) {
574 PackageEntry* current = &gPackageEntries[i];
576 int32 currentIdleCoreCount = current->fIdleCoreCount;
577 if (currentIdleCoreCount != 0 && (package == NULL
578 || currentIdleCoreCount < package->fIdleCoreCount)) {
579 package = current;
583 return package;
587 } // namespace Scheduler
590 #endif // KERNEL_SCHEDULER_CPU_H