vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / scheduler / power_saving.cpp
blob9cbe24b8748ea4f7930913b8c27a6fd5a8cd8ac4
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
7 #include <util/atomic.h>
8 #include <util/AutoLock.h>
10 #include "scheduler_common.h"
11 #include "scheduler_cpu.h"
12 #include "scheduler_modes.h"
13 #include "scheduler_profiler.h"
14 #include "scheduler_thread.h"
17 using namespace Scheduler;
20 const bigtime_t kCacheExpire = 100000;
22 static CoreEntry* sSmallTaskCore;
25 static void
26 switch_to_mode()
28 sSmallTaskCore = NULL;
32 static void
33 set_cpu_enabled(int32 cpu, bool enabled)
35 if (!enabled)
36 sSmallTaskCore = NULL;
40 static bool
41 has_cache_expired(const ThreadData* threadData)
43 SCHEDULER_ENTER_FUNCTION();
44 if (threadData->WentSleep() == 0)
45 return false;
46 return system_time() - threadData->WentSleep() > kCacheExpire;
50 static CoreEntry*
51 choose_small_task_core()
53 SCHEDULER_ENTER_FUNCTION();
55 ReadSpinLocker coreLocker(gCoreHeapsLock);
56 CoreEntry* core = gCoreLoadHeap.PeekMaximum();
57 if (core == NULL)
58 return sSmallTaskCore;
60 CoreEntry* smallTaskCore
61 = atomic_pointer_test_and_set(&sSmallTaskCore, core, (CoreEntry*)NULL);
62 if (smallTaskCore == NULL)
63 return core;
64 return smallTaskCore;
68 static CoreEntry*
69 choose_idle_core()
71 SCHEDULER_ENTER_FUNCTION();
73 PackageEntry* package = PackageEntry::GetLeastIdlePackage();
75 if (package == NULL)
76 package = gIdlePackageList.Last();
78 if (package != NULL)
79 return package->GetIdleCore();
81 return NULL;
85 static CoreEntry*
86 choose_core(const ThreadData* threadData)
88 SCHEDULER_ENTER_FUNCTION();
90 CoreEntry* core = NULL;
92 // try to pack all threads on one core
93 core = choose_small_task_core();
95 if (core == NULL || core->GetLoad() + threadData->GetLoad() >= kHighLoad) {
96 ReadSpinLocker coreLocker(gCoreHeapsLock);
98 // run immediately on already woken core
99 core = gCoreLoadHeap.PeekMinimum();
100 if (core == NULL) {
101 coreLocker.Unlock();
103 core = choose_idle_core();
105 if (core == NULL) {
106 coreLocker.Lock();
107 core = gCoreHighLoadHeap.PeekMinimum();
112 ASSERT(core != NULL);
113 return core;
117 static CoreEntry*
118 rebalance(const ThreadData* threadData)
120 SCHEDULER_ENTER_FUNCTION();
122 ASSERT(!gSingleCore);
124 CoreEntry* core = threadData->Core();
126 int32 coreLoad = core->GetLoad();
127 int32 threadLoad = threadData->GetLoad() / core->CPUCount();
128 if (coreLoad > kHighLoad) {
129 if (sSmallTaskCore == core) {
130 sSmallTaskCore = NULL;
131 CoreEntry* smallTaskCore = choose_small_task_core();
133 if (threadLoad > coreLoad / 3)
134 return core;
135 return coreLoad > kVeryHighLoad ? smallTaskCore : core;
138 if (threadLoad >= coreLoad / 2)
139 return core;
141 ReadSpinLocker coreLocker(gCoreHeapsLock);
142 CoreEntry* other = gCoreLoadHeap.PeekMaximum();
143 if (other == NULL)
144 other = gCoreHighLoadHeap.PeekMinimum();
145 coreLocker.Unlock();
146 ASSERT(other != NULL);
148 int32 coreNewLoad = coreLoad - threadLoad;
149 int32 otherNewLoad = other->GetLoad() + threadLoad;
150 return coreNewLoad - otherNewLoad >= kLoadDifference / 2 ? other : core;
153 if (coreLoad >= kMediumLoad)
154 return core;
156 CoreEntry* smallTaskCore = choose_small_task_core();
157 if (smallTaskCore == NULL)
158 return core;
159 return smallTaskCore->GetLoad() + threadLoad < kHighLoad
160 ? smallTaskCore : core;
164 static inline void
165 pack_irqs()
167 SCHEDULER_ENTER_FUNCTION();
169 CoreEntry* smallTaskCore = atomic_pointer_get(&sSmallTaskCore);
170 if (smallTaskCore == NULL)
171 return;
173 cpu_ent* cpu = get_cpu_struct();
174 if (smallTaskCore == CoreEntry::GetCore(cpu->cpu_num))
175 return;
177 SpinLocker locker(cpu->irqs_lock);
178 while (list_get_first_item(&cpu->irqs) != NULL) {
179 irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
180 locker.Unlock();
182 int32 newCPU = smallTaskCore->CPUHeap()->PeekRoot()->ID();
184 if (newCPU != cpu->cpu_num)
185 assign_io_interrupt_to_cpu(irq->irq, newCPU);
187 locker.Lock();
192 static void
193 rebalance_irqs(bool idle)
195 SCHEDULER_ENTER_FUNCTION();
197 if (idle && sSmallTaskCore != NULL) {
198 pack_irqs();
199 return;
202 if (idle || sSmallTaskCore != NULL)
203 return;
205 cpu_ent* cpu = get_cpu_struct();
206 SpinLocker locker(cpu->irqs_lock);
208 irq_assignment* chosen = NULL;
209 irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
211 while (irq != NULL) {
212 if (chosen == NULL || chosen->load < irq->load)
213 chosen = irq;
214 irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
217 locker.Unlock();
219 if (chosen == NULL || chosen->load < kLowLoad)
220 return;
222 ReadSpinLocker coreLocker(gCoreHeapsLock);
223 CoreEntry* other = gCoreLoadHeap.PeekMinimum();
224 coreLocker.Unlock();
225 if (other == NULL)
226 return;
227 int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
229 CoreEntry* core = CoreEntry::GetCore(smp_get_current_cpu());
230 if (other == core)
231 return;
232 if (other->GetLoad() + kLoadDifference >= core->GetLoad())
233 return;
235 assign_io_interrupt_to_cpu(chosen->irq, newCPU);
239 scheduler_mode_operations gSchedulerPowerSavingMode = {
240 "power saving",
242 2000,
243 500,
244 { 3, 10 },
246 20000,
248 switch_to_mode,
249 set_cpu_enabled,
250 has_cache_expired,
251 choose_core,
252 rebalance,
253 rebalance_irqs,