vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / scheduler / low_latency.cpp
blobe3e110a98332967b988c0b42fee190e1db214e59
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Distributed under the terms of the MIT License.
4 */
7 #include <util/AutoLock.h>
9 #include "scheduler_common.h"
10 #include "scheduler_cpu.h"
11 #include "scheduler_modes.h"
12 #include "scheduler_profiler.h"
13 #include "scheduler_thread.h"
16 using namespace Scheduler;
19 const bigtime_t kCacheExpire = 100000;
22 static void
23 switch_to_mode()
28 static void
29 set_cpu_enabled(int32 /* cpu */, bool /* enabled */)
34 static bool
35 has_cache_expired(const ThreadData* threadData)
37 SCHEDULER_ENTER_FUNCTION();
38 if (threadData->WentSleepActive() == 0)
39 return false;
40 CoreEntry* core = threadData->Core();
41 bigtime_t activeTime = core->GetActiveTime();
42 return activeTime - threadData->WentSleepActive() > kCacheExpire;
46 static CoreEntry*
47 choose_core(const ThreadData* /* threadData */)
49 SCHEDULER_ENTER_FUNCTION();
51 // wake new package
52 PackageEntry* package = gIdlePackageList.Last();
53 if (package == NULL) {
54 // wake new core
55 package = PackageEntry::GetMostIdlePackage();
58 CoreEntry* core = NULL;
59 if (package != NULL)
60 core = package->GetIdleCore();
62 if (core == NULL) {
63 ReadSpinLocker coreLocker(gCoreHeapsLock);
64 // no idle cores, use least occupied core
65 core = gCoreLoadHeap.PeekMinimum();
66 if (core == NULL)
67 core = gCoreHighLoadHeap.PeekMinimum();
70 ASSERT(core != NULL);
71 return core;
75 static CoreEntry*
76 rebalance(const ThreadData* threadData)
78 SCHEDULER_ENTER_FUNCTION();
80 CoreEntry* core = threadData->Core();
81 ASSERT(core != NULL);
83 // Get the least loaded core.
84 ReadSpinLocker coreLocker(gCoreHeapsLock);
85 CoreEntry* other = gCoreLoadHeap.PeekMinimum();
86 if (other == NULL)
87 other = gCoreHighLoadHeap.PeekMinimum();
88 coreLocker.Unlock();
89 ASSERT(other != NULL);
91 // Check if the least loaded core is significantly less loaded than
92 // the current one.
93 int32 coreLoad = core->GetLoad();
94 int32 otherLoad = other->GetLoad();
95 if (other == core || otherLoad + kLoadDifference >= coreLoad)
96 return core;
98 // Check whether migrating the current thread would result in both core
99 // loads become closer to the average.
100 int32 difference = coreLoad - otherLoad - kLoadDifference;
101 ASSERT(difference > 0);
103 int32 threadLoad = threadData->GetLoad() / core->CPUCount();
104 return difference >= threadLoad ? other : core;
108 static void
109 rebalance_irqs(bool idle)
111 SCHEDULER_ENTER_FUNCTION();
113 if (idle)
114 return;
116 cpu_ent* cpu = get_cpu_struct();
117 SpinLocker locker(cpu->irqs_lock);
119 irq_assignment* chosen = NULL;
120 irq_assignment* irq = (irq_assignment*)list_get_first_item(&cpu->irqs);
122 int32 totalLoad = 0;
123 while (irq != NULL) {
124 if (chosen == NULL || chosen->load < irq->load)
125 chosen = irq;
126 totalLoad += irq->load;
127 irq = (irq_assignment*)list_get_next_item(&cpu->irqs, irq);
130 locker.Unlock();
132 if (chosen == NULL || totalLoad < kLowLoad)
133 return;
135 ReadSpinLocker coreLocker(gCoreHeapsLock);
136 CoreEntry* other = gCoreLoadHeap.PeekMinimum();
137 if (other == NULL)
138 other = gCoreHighLoadHeap.PeekMinimum();
139 coreLocker.Unlock();
141 int32 newCPU = other->CPUHeap()->PeekRoot()->ID();
143 ASSERT(other != NULL);
145 CoreEntry* core = CoreEntry::GetCore(cpu->cpu_num);
146 if (other == core)
147 return;
148 if (other->GetLoad() + kLoadDifference >= core->GetLoad())
149 return;
151 assign_io_interrupt_to_cpu(chosen->irq, newCPU);
155 scheduler_mode_operations gSchedulerLowLatencyMode = {
156 "low latency",
158 1000,
159 100,
160 { 2, 5 },
162 5000,
164 switch_to_mode,
165 set_cpu_enabled,
166 has_cache_expired,
167 choose_core,
168 rebalance,
169 rebalance_irqs,