2 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
3 * Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2002, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
10 /* This file contains the cpu functions (init, etc). */
21 #include <boot/kernel_args.h>
22 #include <kscheduler.h>
23 #include <thread_types.h>
24 #include <util/AutoLock.h>
27 /* global per-cpu structure */
28 cpu_ent gCPU
[SMP_MAX_CPUS
];
30 uint32 gCPUCacheLevelCount
;
31 static cpu_topology_node sCPUTopology
;
33 static cpufreq_module_info
* sCPUPerformanceModule
;
34 static cpuidle_module_info
* sCPUIdleModule
;
36 static spinlock sSetCpuLock
;
40 cpu_init(kernel_args
*args
)
42 return arch_cpu_init(args
);
47 cpu_init_percpu(kernel_args
*args
, int curr_cpu
)
49 return arch_cpu_init_percpu(args
, curr_cpu
);
54 cpu_init_post_vm(kernel_args
*args
)
56 return arch_cpu_init_post_vm(args
);
63 void* cookie
= open_module_list(CPUFREQ_MODULES_PREFIX
);
66 char name
[B_FILE_NAME_LENGTH
];
67 size_t nameLength
= sizeof(name
);
68 cpufreq_module_info
* current
= NULL
;
70 if (read_next_module_name(cookie
, name
, &nameLength
) != B_OK
)
73 if (get_module(name
, (module_info
**)¤t
) == B_OK
) {
74 dprintf("found cpufreq module: %s\n", name
);
76 if (sCPUPerformanceModule
!= NULL
) {
77 if (sCPUPerformanceModule
->rank
< current
->rank
) {
78 put_module(sCPUPerformanceModule
->info
.name
);
79 sCPUPerformanceModule
= current
;
83 sCPUPerformanceModule
= current
;
87 close_module_list(cookie
);
89 if (sCPUPerformanceModule
== NULL
)
90 dprintf("no valid cpufreq module found\n");
97 void* cookie
= open_module_list(CPUIDLE_MODULES_PREFIX
);
100 char name
[B_FILE_NAME_LENGTH
];
101 size_t nameLength
= sizeof(name
);
102 cpuidle_module_info
* current
= NULL
;
104 if (read_next_module_name(cookie
, name
, &nameLength
) != B_OK
)
107 if (get_module(name
, (module_info
**)¤t
) == B_OK
) {
108 dprintf("found cpuidle module: %s\n", name
);
110 if (sCPUIdleModule
!= NULL
) {
111 if (sCPUIdleModule
->rank
< current
->rank
) {
112 put_module(sCPUIdleModule
->info
.name
);
113 sCPUIdleModule
= current
;
117 sCPUIdleModule
= current
;
121 close_module_list(cookie
);
123 if (sCPUIdleModule
== NULL
)
124 dprintf("no valid cpuidle module found\n");
129 cpu_init_post_modules(kernel_args
*args
)
131 status_t result
= arch_cpu_init_post_modules(args
);
135 load_cpufreq_module();
136 load_cpuidle_module();
142 cpu_preboot_init_percpu(kernel_args
*args
, int curr_cpu
)
144 // set the cpu number in the local cpu structure so that
145 // we can use it for get_current_cpu
146 memset(&gCPU
[curr_cpu
], 0, sizeof(gCPU
[curr_cpu
]));
147 gCPU
[curr_cpu
].cpu_num
= curr_cpu
;
149 list_init(&gCPU
[curr_cpu
].irqs
);
150 B_INITIALIZE_SPINLOCK(&gCPU
[curr_cpu
].irqs_lock
);
152 return arch_cpu_preboot_init_percpu(args
, curr_cpu
);
157 cpu_get_active_time(int32 cpu
)
159 if (cpu
< 0 || cpu
> smp_get_num_cpus())
162 bigtime_t activeTime
;
166 count
= acquire_read_seqlock(&gCPU
[cpu
].active_time_lock
);
167 activeTime
= gCPU
[cpu
].active_time
;
168 } while (!release_read_seqlock(&gCPU
[cpu
].active_time_lock
, count
));
175 clear_caches(void *address
, size_t length
, uint32 flags
)
177 // ToDo: implement me!
182 cpu_create_topology_node(cpu_topology_node
* node
, int32
* maxID
, int32 id
)
184 cpu_topology_level level
= static_cast<cpu_topology_level
>(node
->level
- 1);
187 cpu_topology_node
* newNode
= new(std::nothrow
) cpu_topology_node
;
190 node
->children
[id
] = newNode
;
192 newNode
->level
= level
;
193 if (level
!= CPU_TOPOLOGY_SMT
) {
194 newNode
->children_count
= maxID
[level
- 1];
196 = new(std::nothrow
) cpu_topology_node
*[maxID
[level
- 1]];
197 if (newNode
->children
== NULL
)
200 memset(newNode
->children
, 0,
201 maxID
[level
- 1] * sizeof(cpu_topology_node
*));
203 newNode
->children_count
= 0;
204 newNode
->children
= NULL
;
212 cpu_rebuild_topology_tree(cpu_topology_node
* node
, int32
* lastID
)
214 if (node
->children
== NULL
)
218 for (int32 i
= 0; i
< node
->children_count
; i
++) {
219 if (node
->children
[i
] == NULL
)
223 node
->children
[count
] = node
->children
[i
];
225 if (node
->children
[count
]->level
!= CPU_TOPOLOGY_SMT
)
226 node
->children
[count
]->id
= lastID
[node
->children
[count
]->level
]++;
228 cpu_rebuild_topology_tree(node
->children
[count
], lastID
);
231 node
->children_count
= count
;
236 cpu_build_topology_tree(void)
238 sCPUTopology
.level
= CPU_TOPOLOGY_LEVELS
;
240 int32 maxID
[CPU_TOPOLOGY_LEVELS
];
241 memset(&maxID
, 0, sizeof(maxID
));
243 const int32 kCPUCount
= smp_get_num_cpus();
244 for (int32 i
= 0; i
< kCPUCount
; i
++) {
245 for (int32 j
= 0; j
< CPU_TOPOLOGY_LEVELS
; j
++)
246 maxID
[j
] = max_c(maxID
[j
], gCPU
[i
].topology_id
[j
]);
249 for (int32 j
= 0; j
< CPU_TOPOLOGY_LEVELS
; j
++)
252 sCPUTopology
.children_count
= maxID
[CPU_TOPOLOGY_LEVELS
- 1];
253 sCPUTopology
.children
254 = new(std::nothrow
) cpu_topology_node
*[maxID
[CPU_TOPOLOGY_LEVELS
- 1]];
255 if (sCPUTopology
.children
== NULL
)
257 memset(sCPUTopology
.children
, 0,
258 maxID
[CPU_TOPOLOGY_LEVELS
- 1] * sizeof(cpu_topology_node
*));
260 for (int32 i
= 0; i
< kCPUCount
; i
++) {
261 cpu_topology_node
* node
= &sCPUTopology
;
262 for (int32 j
= CPU_TOPOLOGY_LEVELS
- 1; j
>= 0; j
--) {
263 int32 id
= gCPU
[i
].topology_id
[j
];
264 if (node
->children
[id
] == NULL
) {
265 status_t result
= cpu_create_topology_node(node
, maxID
, id
);
270 node
= node
->children
[id
];
273 ASSERT(node
->level
== CPU_TOPOLOGY_SMT
);
277 int32 lastID
[CPU_TOPOLOGY_LEVELS
];
278 memset(&lastID
, 0, sizeof(lastID
));
279 cpu_rebuild_topology_tree(&sCPUTopology
, lastID
);
285 const cpu_topology_node
*
286 get_cpu_topology(void)
288 return &sCPUTopology
;
293 cpu_set_scheduler_mode(enum scheduler_mode mode
)
295 if (sCPUPerformanceModule
!= NULL
)
296 sCPUPerformanceModule
->cpufreq_set_scheduler_mode(mode
);
297 if (sCPUIdleModule
!= NULL
)
298 sCPUIdleModule
->cpuidle_set_scheduler_mode(mode
);
303 increase_cpu_performance(int delta
)
305 if (sCPUPerformanceModule
!= NULL
)
306 return sCPUPerformanceModule
->cpufreq_increase_performance(delta
);
307 return B_NOT_SUPPORTED
;
312 decrease_cpu_performance(int delta
)
314 if (sCPUPerformanceModule
!= NULL
)
315 return sCPUPerformanceModule
->cpufreq_decrease_performance(delta
);
316 return B_NOT_SUPPORTED
;
324 if (!are_interrupts_enabled())
325 panic("cpu_idle() called with interrupts disabled.");
328 if (sCPUIdleModule
!= NULL
)
329 sCPUIdleModule
->cpuidle_idle();
336 cpu_wait(int32
* variable
, int32 test
)
338 if (sCPUIdleModule
!= NULL
)
339 sCPUIdleModule
->cpuidle_wait(variable
, test
);
349 _user_clear_caches(void *address
, size_t length
, uint32 flags
)
351 clear_caches(address
, length
, flags
);
356 _user_cpu_enabled(int32 cpu
)
358 if (cpu
< 0 || cpu
>= smp_get_num_cpus())
361 return !gCPU
[cpu
].disabled
;
366 _user_set_cpu_enabled(int32 cpu
, bool enabled
)
370 if (cpu
< 0 || cpu
>= smp_get_num_cpus())
373 // We need to lock here to make sure that no one can disable
376 InterruptsSpinLocker
locker(sSetCpuLock
);
379 // check if this is the last CPU to be disabled
380 for (i
= 0, count
= 0; i
< smp_get_num_cpus(); i
++) {
381 if (!gCPU
[i
].disabled
)
386 return B_NOT_ALLOWED
;
389 bool oldState
= gCPU
[cpu
].disabled
;
391 if (oldState
!= !enabled
)
392 scheduler_set_cpu_enabled(cpu
, enabled
);
395 if (smp_get_current_cpu() == cpu
) {
401 // someone reenabled the CPU while we were rescheduling
402 if (!gCPU
[cpu
].disabled
)
405 ASSERT(smp_get_current_cpu() != cpu
);
406 while (!thread_is_idle_thread(gCPU
[cpu
].running_thread
)) {
411 if (!gCPU
[cpu
].disabled
)
413 ASSERT(smp_get_current_cpu() != cpu
);