libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / cpu.cpp
blobc984f353d1214ee771049eafa0ed4fa62ec2790a
1 /*
2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
8 */
10 /* This file contains the cpu functions (init, etc). */
13 #include <cpu.h>
14 #include <arch/cpu.h>
16 #include <string.h>
18 #include <cpufreq.h>
19 #include <cpuidle.h>
21 #include <boot/kernel_args.h>
22 #include <kscheduler.h>
23 #include <thread_types.h>
24 #include <util/AutoLock.h>
27 /* global per-cpu structure */
28 cpu_ent gCPU[SMP_MAX_CPUS];
30 uint32 gCPUCacheLevelCount;
31 static cpu_topology_node sCPUTopology;
33 static cpufreq_module_info* sCPUPerformanceModule;
34 static cpuidle_module_info* sCPUIdleModule;
36 static spinlock sSetCpuLock;
39 status_t
40 cpu_init(kernel_args *args)
42 return arch_cpu_init(args);
46 status_t
47 cpu_init_percpu(kernel_args *args, int curr_cpu)
49 return arch_cpu_init_percpu(args, curr_cpu);
53 status_t
54 cpu_init_post_vm(kernel_args *args)
56 return arch_cpu_init_post_vm(args);
60 static void
61 load_cpufreq_module()
63 void* cookie = open_module_list(CPUFREQ_MODULES_PREFIX);
65 while (true) {
66 char name[B_FILE_NAME_LENGTH];
67 size_t nameLength = sizeof(name);
68 cpufreq_module_info* current = NULL;
70 if (read_next_module_name(cookie, name, &nameLength) != B_OK)
71 break;
73 if (get_module(name, (module_info**)&current) == B_OK) {
74 dprintf("found cpufreq module: %s\n", name);
76 if (sCPUPerformanceModule != NULL) {
77 if (sCPUPerformanceModule->rank < current->rank) {
78 put_module(sCPUPerformanceModule->info.name);
79 sCPUPerformanceModule = current;
80 } else
81 put_module(name);
82 } else
83 sCPUPerformanceModule = current;
87 close_module_list(cookie);
89 if (sCPUPerformanceModule == NULL)
90 dprintf("no valid cpufreq module found\n");
94 static void
95 load_cpuidle_module()
97 void* cookie = open_module_list(CPUIDLE_MODULES_PREFIX);
99 while (true) {
100 char name[B_FILE_NAME_LENGTH];
101 size_t nameLength = sizeof(name);
102 cpuidle_module_info* current = NULL;
104 if (read_next_module_name(cookie, name, &nameLength) != B_OK)
105 break;
107 if (get_module(name, (module_info**)&current) == B_OK) {
108 dprintf("found cpuidle module: %s\n", name);
110 if (sCPUIdleModule != NULL) {
111 if (sCPUIdleModule->rank < current->rank) {
112 put_module(sCPUIdleModule->info.name);
113 sCPUIdleModule = current;
114 } else
115 put_module(name);
116 } else
117 sCPUIdleModule = current;
121 close_module_list(cookie);
123 if (sCPUIdleModule == NULL)
124 dprintf("no valid cpuidle module found\n");
128 status_t
129 cpu_init_post_modules(kernel_args *args)
131 status_t result = arch_cpu_init_post_modules(args);
132 if (result != B_OK)
133 return result;
135 load_cpufreq_module();
136 load_cpuidle_module();
137 return B_OK;
141 status_t
142 cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
144 // set the cpu number in the local cpu structure so that
145 // we can use it for get_current_cpu
146 memset(&gCPU[curr_cpu], 0, sizeof(gCPU[curr_cpu]));
147 gCPU[curr_cpu].cpu_num = curr_cpu;
149 list_init(&gCPU[curr_cpu].irqs);
150 B_INITIALIZE_SPINLOCK(&gCPU[curr_cpu].irqs_lock);
152 return arch_cpu_preboot_init_percpu(args, curr_cpu);
156 bigtime_t
157 cpu_get_active_time(int32 cpu)
159 if (cpu < 0 || cpu > smp_get_num_cpus())
160 return 0;
162 bigtime_t activeTime;
163 uint32 count;
165 do {
166 count = acquire_read_seqlock(&gCPU[cpu].active_time_lock);
167 activeTime = gCPU[cpu].active_time;
168 } while (!release_read_seqlock(&gCPU[cpu].active_time_lock, count));
170 return activeTime;
174 void
175 clear_caches(void *address, size_t length, uint32 flags)
177 // ToDo: implement me!
181 static status_t
182 cpu_create_topology_node(cpu_topology_node* node, int32* maxID, int32 id)
184 cpu_topology_level level = static_cast<cpu_topology_level>(node->level - 1);
185 ASSERT(level >= 0);
187 cpu_topology_node* newNode = new(std::nothrow) cpu_topology_node;
188 if (newNode == NULL)
189 return B_NO_MEMORY;
190 node->children[id] = newNode;
192 newNode->level = level;
193 if (level != CPU_TOPOLOGY_SMT) {
194 newNode->children_count = maxID[level - 1];
195 newNode->children
196 = new(std::nothrow) cpu_topology_node*[maxID[level - 1]];
197 if (newNode->children == NULL)
198 return B_NO_MEMORY;
200 memset(newNode->children, 0,
201 maxID[level - 1] * sizeof(cpu_topology_node*));
202 } else {
203 newNode->children_count = 0;
204 newNode->children = NULL;
207 return B_OK;
211 static void
212 cpu_rebuild_topology_tree(cpu_topology_node* node, int32* lastID)
214 if (node->children == NULL)
215 return;
217 int32 count = 0;
218 for (int32 i = 0; i < node->children_count; i++) {
219 if (node->children[i] == NULL)
220 continue;
222 if (count != i)
223 node->children[count] = node->children[i];
225 if (node->children[count]->level != CPU_TOPOLOGY_SMT)
226 node->children[count]->id = lastID[node->children[count]->level]++;
228 cpu_rebuild_topology_tree(node->children[count], lastID);
229 count++;
231 node->children_count = count;
235 status_t
236 cpu_build_topology_tree(void)
238 sCPUTopology.level = CPU_TOPOLOGY_LEVELS;
240 int32 maxID[CPU_TOPOLOGY_LEVELS];
241 memset(&maxID, 0, sizeof(maxID));
243 const int32 kCPUCount = smp_get_num_cpus();
244 for (int32 i = 0; i < kCPUCount; i++) {
245 for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
246 maxID[j] = max_c(maxID[j], gCPU[i].topology_id[j]);
249 for (int32 j = 0; j < CPU_TOPOLOGY_LEVELS; j++)
250 maxID[j]++;
252 sCPUTopology.children_count = maxID[CPU_TOPOLOGY_LEVELS - 1];
253 sCPUTopology.children
254 = new(std::nothrow) cpu_topology_node*[maxID[CPU_TOPOLOGY_LEVELS - 1]];
255 if (sCPUTopology.children == NULL)
256 return B_NO_MEMORY;
257 memset(sCPUTopology.children, 0,
258 maxID[CPU_TOPOLOGY_LEVELS - 1] * sizeof(cpu_topology_node*));
260 for (int32 i = 0; i < kCPUCount; i++) {
261 cpu_topology_node* node = &sCPUTopology;
262 for (int32 j = CPU_TOPOLOGY_LEVELS - 1; j >= 0; j--) {
263 int32 id = gCPU[i].topology_id[j];
264 if (node->children[id] == NULL) {
265 status_t result = cpu_create_topology_node(node, maxID, id);
266 if (result != B_OK)
267 return result;
270 node = node->children[id];
273 ASSERT(node->level == CPU_TOPOLOGY_SMT);
274 node->id = i;
277 int32 lastID[CPU_TOPOLOGY_LEVELS];
278 memset(&lastID, 0, sizeof(lastID));
279 cpu_rebuild_topology_tree(&sCPUTopology, lastID);
281 return B_OK;
285 const cpu_topology_node*
286 get_cpu_topology(void)
288 return &sCPUTopology;
292 void
293 cpu_set_scheduler_mode(enum scheduler_mode mode)
295 if (sCPUPerformanceModule != NULL)
296 sCPUPerformanceModule->cpufreq_set_scheduler_mode(mode);
297 if (sCPUIdleModule != NULL)
298 sCPUIdleModule->cpuidle_set_scheduler_mode(mode);
302 status_t
303 increase_cpu_performance(int delta)
305 if (sCPUPerformanceModule != NULL)
306 return sCPUPerformanceModule->cpufreq_increase_performance(delta);
307 return B_NOT_SUPPORTED;
311 status_t
312 decrease_cpu_performance(int delta)
314 if (sCPUPerformanceModule != NULL)
315 return sCPUPerformanceModule->cpufreq_decrease_performance(delta);
316 return B_NOT_SUPPORTED;
320 void
321 cpu_idle(void)
323 #if KDEBUG
324 if (!are_interrupts_enabled())
325 panic("cpu_idle() called with interrupts disabled.");
326 #endif
328 if (sCPUIdleModule != NULL)
329 sCPUIdleModule->cpuidle_idle();
330 else
331 arch_cpu_idle();
335 void
336 cpu_wait(int32* variable, int32 test)
338 if (sCPUIdleModule != NULL)
339 sCPUIdleModule->cpuidle_wait(variable, test);
340 else
341 arch_cpu_pause();
345 // #pragma mark -
348 void
349 _user_clear_caches(void *address, size_t length, uint32 flags)
351 clear_caches(address, length, flags);
355 bool
356 _user_cpu_enabled(int32 cpu)
358 if (cpu < 0 || cpu >= smp_get_num_cpus())
359 return false;
361 return !gCPU[cpu].disabled;
365 status_t
366 _user_set_cpu_enabled(int32 cpu, bool enabled)
368 int32 i, count;
370 if (cpu < 0 || cpu >= smp_get_num_cpus())
371 return B_BAD_VALUE;
373 // We need to lock here to make sure that no one can disable
374 // the last CPU
376 InterruptsSpinLocker locker(sSetCpuLock);
378 if (!enabled) {
379 // check if this is the last CPU to be disabled
380 for (i = 0, count = 0; i < smp_get_num_cpus(); i++) {
381 if (!gCPU[i].disabled)
382 count++;
385 if (count == 1)
386 return B_NOT_ALLOWED;
389 bool oldState = gCPU[cpu].disabled;
391 if (oldState != !enabled)
392 scheduler_set_cpu_enabled(cpu, enabled);
394 if (!enabled) {
395 if (smp_get_current_cpu() == cpu) {
396 locker.Unlock();
397 thread_yield();
398 locker.Lock();
401 // someone reenabled the CPU while we were rescheduling
402 if (!gCPU[cpu].disabled)
403 return B_OK;
405 ASSERT(smp_get_current_cpu() != cpu);
406 while (!thread_is_idle_thread(gCPU[cpu].running_thread)) {
407 locker.Unlock();
408 thread_yield();
409 locker.Lock();
411 if (!gCPU[cpu].disabled)
412 return B_OK;
413 ASSERT(smp_get_current_cpu() != cpu);
417 return B_OK;