headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / arch / x86 / arch_cpu.cpp
blob65ea7fce1c486571609142eaec472c71b32dc177
1 /*
2 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
3 * Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
4 * Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
5 * Distributed under the terms of the MIT License.
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
12 #include <cpu.h>
14 #include <string.h>
15 #include <stdlib.h>
16 #include <stdio.h>
18 #include <algorithm>
20 #include <ACPI.h>
22 #include <boot_device.h>
23 #include <commpage.h>
24 #include <debug.h>
25 #include <elf.h>
26 #include <smp.h>
27 #include <util/BitUtils.h>
28 #include <vm/vm.h>
29 #include <vm/vm_types.h>
30 #include <vm/VMAddressSpace.h>
32 #include <arch_system_info.h>
33 #include <arch/x86/apic.h>
34 #include <boot/kernel_args.h>
36 #include "paging/X86PagingStructures.h"
37 #include "paging/X86VMTranslationMap.h"
40 #define DUMP_FEATURE_STRING 1
41 #define DUMP_CPU_TOPOLOGY 1
44 /* cpu vendor info */
45 struct cpu_vendor_info {
46 const char *vendor;
47 const char *ident_string[2];
50 static const struct cpu_vendor_info vendor_info[VENDOR_NUM] = {
51 { "Intel", { "GenuineIntel" } },
52 { "AMD", { "AuthenticAMD" } },
53 { "Cyrix", { "CyrixInstead" } },
54 { "UMC", { "UMC UMC UMC" } },
55 { "NexGen", { "NexGenDriven" } },
56 { "Centaur", { "CentaurHauls" } },
57 { "Rise", { "RiseRiseRise" } },
58 { "Transmeta", { "GenuineTMx86", "TransmetaCPU" } },
59 { "NSC", { "Geode by NSC" } },
62 #define K8_SMIONCMPHALT (1ULL << 27)
63 #define K8_C1EONCMPHALT (1ULL << 28)
65 #define K8_CMPHALT (K8_SMIONCMPHALT | K8_C1EONCMPHALT)
67 struct set_mtrr_parameter {
68 int32 index;
69 uint64 base;
70 uint64 length;
71 uint8 type;
74 struct set_mtrrs_parameter {
75 const x86_mtrr_info* infos;
76 uint32 count;
77 uint8 defaultType;
81 extern "C" void x86_reboot(void);
82 // from arch.S
84 void (*gCpuIdleFunc)(void);
85 #ifndef __x86_64__
86 void (*gX86SwapFPUFunc)(void* oldState, const void* newState) = x86_noop_swap;
87 bool gHasSSE = false;
88 #endif
90 static uint32 sCpuRendezvous;
91 static uint32 sCpuRendezvous2;
92 static uint32 sCpuRendezvous3;
93 static vint32 sTSCSyncRendezvous;
95 /* Some specials for the double fault handler */
96 static uint8* sDoubleFaultStacks;
97 static const size_t kDoubleFaultStackSize = 4096; // size per CPU
99 static x86_cpu_module_info* sCpuModule;
102 /* CPU topology information */
103 static uint32 (*sGetCPUTopologyID)(int currentCPU);
104 static uint32 sHierarchyMask[CPU_TOPOLOGY_LEVELS];
105 static uint32 sHierarchyShift[CPU_TOPOLOGY_LEVELS];
107 /* Cache topology information */
108 static uint32 sCacheSharingMask[CPU_MAX_CACHE_LEVEL];
111 static status_t
112 acpi_shutdown(bool rebootSystem)
114 if (debug_debugger_running() || !are_interrupts_enabled())
115 return B_ERROR;
117 acpi_module_info* acpi;
118 if (get_module(B_ACPI_MODULE_NAME, (module_info**)&acpi) != B_OK)
119 return B_NOT_SUPPORTED;
121 status_t status;
122 if (rebootSystem) {
123 status = acpi->reboot();
124 } else {
125 // Make sure we run on the boot CPU (apparently needed for some ACPI
126 // implementations)
127 _user_set_cpu_enabled(0, true);
128 for (int32 cpu = 1; cpu < smp_get_num_cpus(); cpu++) {
129 _user_set_cpu_enabled(cpu, false);
131 // TODO: must not be called from the idle thread!
132 thread_yield();
134 status = acpi->prepare_sleep_state(ACPI_POWER_STATE_OFF, NULL, 0);
135 if (status == B_OK) {
136 //cpu_status state = disable_interrupts();
137 status = acpi->enter_sleep_state(ACPI_POWER_STATE_OFF);
138 //restore_interrupts(state);
142 put_module(B_ACPI_MODULE_NAME);
143 return status;
147 /*! Disable CPU caches, and invalidate them. */
148 static void
149 disable_caches()
151 x86_write_cr0((x86_read_cr0() | CR0_CACHE_DISABLE)
152 & ~CR0_NOT_WRITE_THROUGH);
153 wbinvd();
154 arch_cpu_global_TLB_invalidate();
158 /*! Invalidate CPU caches, and enable them. */
159 static void
160 enable_caches()
162 wbinvd();
163 arch_cpu_global_TLB_invalidate();
164 x86_write_cr0(x86_read_cr0()
165 & ~(CR0_CACHE_DISABLE | CR0_NOT_WRITE_THROUGH));
169 static void
170 set_mtrr(void* _parameter, int cpu)
172 struct set_mtrr_parameter* parameter
173 = (struct set_mtrr_parameter*)_parameter;
175 // wait until all CPUs have arrived here
176 smp_cpu_rendezvous(&sCpuRendezvous);
178 // One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
179 // that initiated the call_all_cpus() from doing that again and clearing
180 // sCpuRendezvous2 before the last CPU has actually left the loop in
181 // smp_cpu_rendezvous();
182 if (cpu == 0)
183 atomic_set((int32*)&sCpuRendezvous3, 0);
185 disable_caches();
187 sCpuModule->set_mtrr(parameter->index, parameter->base, parameter->length,
188 parameter->type);
190 enable_caches();
192 // wait until all CPUs have arrived here
193 smp_cpu_rendezvous(&sCpuRendezvous2);
194 smp_cpu_rendezvous(&sCpuRendezvous3);
198 static void
199 set_mtrrs(void* _parameter, int cpu)
201 set_mtrrs_parameter* parameter = (set_mtrrs_parameter*)_parameter;
203 // wait until all CPUs have arrived here
204 smp_cpu_rendezvous(&sCpuRendezvous);
206 // One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
207 // that initiated the call_all_cpus() from doing that again and clearing
208 // sCpuRendezvous2 before the last CPU has actually left the loop in
209 // smp_cpu_rendezvous();
210 if (cpu == 0)
211 atomic_set((int32*)&sCpuRendezvous3, 0);
213 disable_caches();
215 sCpuModule->set_mtrrs(parameter->defaultType, parameter->infos,
216 parameter->count);
218 enable_caches();
220 // wait until all CPUs have arrived here
221 smp_cpu_rendezvous(&sCpuRendezvous2);
222 smp_cpu_rendezvous(&sCpuRendezvous3);
226 static void
227 init_mtrrs(void* _unused, int cpu)
229 // wait until all CPUs have arrived here
230 smp_cpu_rendezvous(&sCpuRendezvous);
232 // One CPU has to reset sCpuRendezvous3 -- it is needed to prevent the CPU
233 // that initiated the call_all_cpus() from doing that again and clearing
234 // sCpuRendezvous2 before the last CPU has actually left the loop in
235 // smp_cpu_rendezvous();
236 if (cpu == 0)
237 atomic_set((int32*)&sCpuRendezvous3, 0);
239 disable_caches();
241 sCpuModule->init_mtrrs();
243 enable_caches();
245 // wait until all CPUs have arrived here
246 smp_cpu_rendezvous(&sCpuRendezvous2);
247 smp_cpu_rendezvous(&sCpuRendezvous3);
251 uint32
252 x86_count_mtrrs(void)
254 if (sCpuModule == NULL)
255 return 0;
257 return sCpuModule->count_mtrrs();
261 void
262 x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
264 struct set_mtrr_parameter parameter;
265 parameter.index = index;
266 parameter.base = base;
267 parameter.length = length;
268 parameter.type = type;
270 sCpuRendezvous = sCpuRendezvous2 = 0;
271 call_all_cpus(&set_mtrr, &parameter);
275 status_t
276 x86_get_mtrr(uint32 index, uint64* _base, uint64* _length, uint8* _type)
278 // the MTRRs are identical on all CPUs, so it doesn't matter
279 // on which CPU this runs
280 return sCpuModule->get_mtrr(index, _base, _length, _type);
284 void
285 x86_set_mtrrs(uint8 defaultType, const x86_mtrr_info* infos, uint32 count)
287 if (sCpuModule == NULL)
288 return;
290 struct set_mtrrs_parameter parameter;
291 parameter.defaultType = defaultType;
292 parameter.infos = infos;
293 parameter.count = count;
295 sCpuRendezvous = sCpuRendezvous2 = 0;
296 call_all_cpus(&set_mtrrs, &parameter);
300 void
301 x86_init_fpu(void)
303 // All x86_64 CPUs support SSE, don't need to bother checking for it.
304 #ifndef __x86_64__
305 if (!x86_check_feature(IA32_FEATURE_FPU, FEATURE_COMMON)) {
306 // No FPU... time to install one in your 386?
307 dprintf("%s: Warning: CPU has no reported FPU.\n", __func__);
308 gX86SwapFPUFunc = x86_noop_swap;
309 return;
312 if (!x86_check_feature(IA32_FEATURE_SSE, FEATURE_COMMON)
313 || !x86_check_feature(IA32_FEATURE_FXSR, FEATURE_COMMON)) {
314 dprintf("%s: CPU has no SSE... just enabling FPU.\n", __func__);
315 // we don't have proper SSE support, just enable FPU
316 x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
317 gX86SwapFPUFunc = x86_fnsave_swap;
318 return;
320 #endif
322 dprintf("%s: CPU has SSE... enabling FXSR and XMM.\n", __func__);
323 #ifndef __x86_64__
324 // enable OS support for SSE
325 x86_write_cr4(x86_read_cr4() | CR4_OS_FXSR | CR4_OS_XMM_EXCEPTION);
326 x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
328 gX86SwapFPUFunc = x86_fxsave_swap;
329 gHasSSE = true;
330 #endif
334 #if DUMP_FEATURE_STRING
335 static void
336 dump_feature_string(int currentCPU, cpu_ent* cpu)
338 char features[384];
339 features[0] = 0;
341 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_FPU)
342 strlcat(features, "fpu ", sizeof(features));
343 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_VME)
344 strlcat(features, "vme ", sizeof(features));
345 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_DE)
346 strlcat(features, "de ", sizeof(features));
347 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSE)
348 strlcat(features, "pse ", sizeof(features));
349 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_TSC)
350 strlcat(features, "tsc ", sizeof(features));
351 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MSR)
352 strlcat(features, "msr ", sizeof(features));
353 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PAE)
354 strlcat(features, "pae ", sizeof(features));
355 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MCE)
356 strlcat(features, "mce ", sizeof(features));
357 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CX8)
358 strlcat(features, "cx8 ", sizeof(features));
359 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_APIC)
360 strlcat(features, "apic ", sizeof(features));
361 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SEP)
362 strlcat(features, "sep ", sizeof(features));
363 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MTRR)
364 strlcat(features, "mtrr ", sizeof(features));
365 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PGE)
366 strlcat(features, "pge ", sizeof(features));
367 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MCA)
368 strlcat(features, "mca ", sizeof(features));
369 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CMOV)
370 strlcat(features, "cmov ", sizeof(features));
371 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PAT)
372 strlcat(features, "pat ", sizeof(features));
373 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSE36)
374 strlcat(features, "pse36 ", sizeof(features));
375 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PSN)
376 strlcat(features, "psn ", sizeof(features));
377 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_CLFSH)
378 strlcat(features, "clfsh ", sizeof(features));
379 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_DS)
380 strlcat(features, "ds ", sizeof(features));
381 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_ACPI)
382 strlcat(features, "acpi ", sizeof(features));
383 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_MMX)
384 strlcat(features, "mmx ", sizeof(features));
385 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_FXSR)
386 strlcat(features, "fxsr ", sizeof(features));
387 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SSE)
388 strlcat(features, "sse ", sizeof(features));
389 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SSE2)
390 strlcat(features, "sse2 ", sizeof(features));
391 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_SS)
392 strlcat(features, "ss ", sizeof(features));
393 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_HTT)
394 strlcat(features, "htt ", sizeof(features));
395 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_TM)
396 strlcat(features, "tm ", sizeof(features));
397 if (cpu->arch.feature[FEATURE_COMMON] & IA32_FEATURE_PBE)
398 strlcat(features, "pbe ", sizeof(features));
399 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE3)
400 strlcat(features, "sse3 ", sizeof(features));
401 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PCLMULQDQ)
402 strlcat(features, "pclmulqdq ", sizeof(features));
403 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DTES64)
404 strlcat(features, "dtes64 ", sizeof(features));
405 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_MONITOR)
406 strlcat(features, "monitor ", sizeof(features));
407 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DSCPL)
408 strlcat(features, "dscpl ", sizeof(features));
409 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_VMX)
410 strlcat(features, "vmx ", sizeof(features));
411 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SMX)
412 strlcat(features, "smx ", sizeof(features));
413 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_EST)
414 strlcat(features, "est ", sizeof(features));
415 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_TM2)
416 strlcat(features, "tm2 ", sizeof(features));
417 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSSE3)
418 strlcat(features, "ssse3 ", sizeof(features));
419 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_CNXTID)
420 strlcat(features, "cnxtid ", sizeof(features));
421 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_FMA)
422 strlcat(features, "fma ", sizeof(features));
423 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_CX16)
424 strlcat(features, "cx16 ", sizeof(features));
425 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_XTPR)
426 strlcat(features, "xtpr ", sizeof(features));
427 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PDCM)
428 strlcat(features, "pdcm ", sizeof(features));
429 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_PCID)
430 strlcat(features, "pcid ", sizeof(features));
431 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_DCA)
432 strlcat(features, "dca ", sizeof(features));
433 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE4_1)
434 strlcat(features, "sse4_1 ", sizeof(features));
435 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_SSE4_2)
436 strlcat(features, "sse4_2 ", sizeof(features));
437 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_X2APIC)
438 strlcat(features, "x2apic ", sizeof(features));
439 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_MOVBE)
440 strlcat(features, "movbe ", sizeof(features));
441 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_POPCNT)
442 strlcat(features, "popcnt ", sizeof(features));
443 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_TSCDEADLINE)
444 strlcat(features, "tscdeadline ", sizeof(features));
445 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_AES)
446 strlcat(features, "aes ", sizeof(features));
447 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_XSAVE)
448 strlcat(features, "xsave ", sizeof(features));
449 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_OSXSAVE)
450 strlcat(features, "osxsave ", sizeof(features));
451 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_AVX)
452 strlcat(features, "avx ", sizeof(features));
453 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_F16C)
454 strlcat(features, "f16c ", sizeof(features));
455 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_RDRND)
456 strlcat(features, "rdrnd ", sizeof(features));
457 if (cpu->arch.feature[FEATURE_EXT] & IA32_FEATURE_EXT_HYPERVISOR)
458 strlcat(features, "hypervisor ", sizeof(features));
459 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_SYSCALL)
460 strlcat(features, "syscall ", sizeof(features));
461 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_NX)
462 strlcat(features, "nx ", sizeof(features));
463 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_MMXEXT)
464 strlcat(features, "mmxext ", sizeof(features));
465 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_FFXSR)
466 strlcat(features, "ffxsr ", sizeof(features));
467 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_LONG)
468 strlcat(features, "long ", sizeof(features));
469 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_3DNOWEXT)
470 strlcat(features, "3dnowext ", sizeof(features));
471 if (cpu->arch.feature[FEATURE_EXT_AMD] & IA32_FEATURE_AMD_EXT_3DNOW)
472 strlcat(features, "3dnow ", sizeof(features));
473 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_DTS)
474 strlcat(features, "dts ", sizeof(features));
475 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ITB)
476 strlcat(features, "itb ", sizeof(features));
477 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ARAT)
478 strlcat(features, "arat ", sizeof(features));
479 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_PLN)
480 strlcat(features, "pln ", sizeof(features));
481 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_ECMD)
482 strlcat(features, "ecmd ", sizeof(features));
483 if (cpu->arch.feature[FEATURE_6_EAX] & IA32_FEATURE_PTM)
484 strlcat(features, "ptm ", sizeof(features));
485 if (cpu->arch.feature[FEATURE_6_ECX] & IA32_FEATURE_APERFMPERF)
486 strlcat(features, "aperfmperf ", sizeof(features));
487 if (cpu->arch.feature[FEATURE_6_ECX] & IA32_FEATURE_EPB)
488 strlcat(features, "epb ", sizeof(features));
490 dprintf("CPU %d: features: %s\n", currentCPU, features);
492 #endif // DUMP_FEATURE_STRING
495 static void
496 compute_cpu_hierarchy_masks(int maxLogicalID, int maxCoreID)
498 ASSERT(maxLogicalID >= maxCoreID);
499 const int kMaxSMTID = maxLogicalID / maxCoreID;
501 sHierarchyMask[CPU_TOPOLOGY_SMT] = kMaxSMTID - 1;
502 sHierarchyShift[CPU_TOPOLOGY_SMT] = 0;
504 sHierarchyMask[CPU_TOPOLOGY_CORE] = (maxCoreID - 1) * kMaxSMTID;
505 sHierarchyShift[CPU_TOPOLOGY_CORE]
506 = count_set_bits(sHierarchyMask[CPU_TOPOLOGY_SMT]);
508 const uint32 kSinglePackageMask = sHierarchyMask[CPU_TOPOLOGY_SMT]
509 | sHierarchyMask[CPU_TOPOLOGY_CORE];
510 sHierarchyMask[CPU_TOPOLOGY_PACKAGE] = ~kSinglePackageMask;
511 sHierarchyShift[CPU_TOPOLOGY_PACKAGE] = count_set_bits(kSinglePackageMask);
515 static uint32
516 get_cpu_legacy_initial_apic_id(int /* currentCPU */)
518 cpuid_info cpuid;
519 get_current_cpuid(&cpuid, 1, 0);
520 return cpuid.regs.ebx >> 24;
524 static inline status_t
525 detect_amd_cpu_topology(uint32 maxBasicLeaf, uint32 maxExtendedLeaf)
527 sGetCPUTopologyID = get_cpu_legacy_initial_apic_id;
529 cpuid_info cpuid;
530 get_current_cpuid(&cpuid, 1, 0);
531 int maxLogicalID = next_power_of_2((cpuid.regs.ebx >> 16) & 0xff);
533 int maxCoreID = 1;
534 if (maxExtendedLeaf >= 0x80000008) {
535 get_current_cpuid(&cpuid, 0x80000008, 0);
536 maxCoreID = (cpuid.regs.ecx >> 12) & 0xf;
537 if (maxCoreID != 0)
538 maxCoreID = 1 << maxCoreID;
539 else
540 maxCoreID = next_power_of_2((cpuid.regs.edx & 0xf) + 1);
543 if (maxExtendedLeaf >= 0x80000001) {
544 get_current_cpuid(&cpuid, 0x80000001, 0);
545 if (x86_check_feature(IA32_FEATURE_AMD_EXT_CMPLEGACY,
546 FEATURE_EXT_AMD_ECX))
547 maxCoreID = maxLogicalID;
550 compute_cpu_hierarchy_masks(maxLogicalID, maxCoreID);
552 return B_OK;
556 static void
557 detect_amd_cache_topology(uint32 maxExtendedLeaf)
559 if (!x86_check_feature(IA32_FEATURE_AMD_EXT_TOPOLOGY, FEATURE_EXT_AMD_ECX))
560 return;
562 if (maxExtendedLeaf < 0x8000001d)
563 return;
565 uint8 hierarchyLevels[CPU_MAX_CACHE_LEVEL];
566 int maxCacheLevel = 0;
568 int currentLevel = 0;
569 int cacheType;
570 do {
571 cpuid_info cpuid;
572 get_current_cpuid(&cpuid, 0x8000001d, currentLevel);
574 cacheType = cpuid.regs.eax & 0x1f;
575 if (cacheType == 0)
576 break;
578 int cacheLevel = (cpuid.regs.eax >> 5) & 0x7;
579 int coresCount = next_power_of_2(((cpuid.regs.eax >> 14) & 0x3f) + 1);
580 hierarchyLevels[cacheLevel - 1]
581 = coresCount * (sHierarchyMask[CPU_TOPOLOGY_SMT] + 1);
582 maxCacheLevel = std::max(maxCacheLevel, cacheLevel);
584 currentLevel++;
585 } while (true);
587 for (int i = 0; i < maxCacheLevel; i++)
588 sCacheSharingMask[i] = ~uint32(hierarchyLevels[i] - 1);
589 gCPUCacheLevelCount = maxCacheLevel;
593 static uint32
594 get_intel_cpu_initial_x2apic_id(int /* currentCPU */)
596 cpuid_info cpuid;
597 get_current_cpuid(&cpuid, 11, 0);
598 return cpuid.regs.edx;
602 static inline status_t
603 detect_intel_cpu_topology_x2apic(uint32 maxBasicLeaf)
605 if (maxBasicLeaf < 11)
606 return B_UNSUPPORTED;
608 uint8 hierarchyLevels[CPU_TOPOLOGY_LEVELS] = { 0 };
610 int currentLevel = 0;
611 int levelType;
612 unsigned int levelsSet = 0;
614 do {
615 cpuid_info cpuid;
616 get_current_cpuid(&cpuid, 11, currentLevel);
617 if (currentLevel == 0 && cpuid.regs.ebx == 0)
618 return B_UNSUPPORTED;
620 levelType = (cpuid.regs.ecx >> 8) & 0xff;
621 int levelValue = cpuid.regs.eax & 0x1f;
623 switch (levelType) {
624 case 1: // SMT
625 hierarchyLevels[CPU_TOPOLOGY_SMT] = levelValue;
626 levelsSet |= 1;
627 break;
628 case 2: // core
629 hierarchyLevels[CPU_TOPOLOGY_CORE] = levelValue;
630 levelsSet |= 2;
631 break;
634 currentLevel++;
635 } while (levelType != 0 && levelsSet != 3);
637 sGetCPUTopologyID = get_intel_cpu_initial_x2apic_id;
639 for (int i = 1; i < CPU_TOPOLOGY_LEVELS; i++) {
640 if ((levelsSet & (1u << i)) != 0)
641 continue;
642 hierarchyLevels[i] = hierarchyLevels[i - 1];
645 for (int i = 0; i < CPU_TOPOLOGY_LEVELS; i++) {
646 uint32 mask = ~uint32(0);
647 if (i < CPU_TOPOLOGY_LEVELS - 1)
648 mask = (1u << hierarchyLevels[i]) - 1;
649 if (i > 0)
650 mask &= ~sHierarchyMask[i - 1];
651 sHierarchyMask[i] = mask;
652 sHierarchyShift[i] = i > 0 ? hierarchyLevels[i - 1] : 0;
655 return B_OK;
659 static inline status_t
660 detect_intel_cpu_topology_legacy(uint32 maxBasicLeaf)
662 sGetCPUTopologyID = get_cpu_legacy_initial_apic_id;
664 cpuid_info cpuid;
666 get_current_cpuid(&cpuid, 1, 0);
667 int maxLogicalID = next_power_of_2((cpuid.regs.ebx >> 16) & 0xff);
669 int maxCoreID = 1;
670 if (maxBasicLeaf >= 4) {
671 get_current_cpuid(&cpuid, 4, 0);
672 maxCoreID = next_power_of_2((cpuid.regs.eax >> 26) + 1);
675 compute_cpu_hierarchy_masks(maxLogicalID, maxCoreID);
677 return B_OK;
681 static void
682 detect_intel_cache_topology(uint32 maxBasicLeaf)
684 if (maxBasicLeaf < 4)
685 return;
687 uint8 hierarchyLevels[CPU_MAX_CACHE_LEVEL];
688 int maxCacheLevel = 0;
690 int currentLevel = 0;
691 int cacheType;
692 do {
693 cpuid_info cpuid;
694 get_current_cpuid(&cpuid, 4, currentLevel);
696 cacheType = cpuid.regs.eax & 0x1f;
697 if (cacheType == 0)
698 break;
700 int cacheLevel = (cpuid.regs.eax >> 5) & 0x7;
701 hierarchyLevels[cacheLevel - 1]
702 = next_power_of_2(((cpuid.regs.eax >> 14) & 0x3f) + 1);
703 maxCacheLevel = std::max(maxCacheLevel, cacheLevel);
705 currentLevel++;
706 } while (true);
708 for (int i = 0; i < maxCacheLevel; i++)
709 sCacheSharingMask[i] = ~uint32(hierarchyLevels[i] - 1);
711 gCPUCacheLevelCount = maxCacheLevel;
715 static uint32
716 get_simple_cpu_topology_id(int currentCPU)
718 return currentCPU;
722 static inline int
723 get_topology_level_id(uint32 id, cpu_topology_level level)
725 ASSERT(level < CPU_TOPOLOGY_LEVELS);
726 return (id & sHierarchyMask[level]) >> sHierarchyShift[level];
730 static void
731 detect_cpu_topology(int currentCPU, cpu_ent* cpu, uint32 maxBasicLeaf,
732 uint32 maxExtendedLeaf)
734 if (currentCPU == 0) {
735 memset(sCacheSharingMask, 0xff, sizeof(sCacheSharingMask));
737 status_t result = B_UNSUPPORTED;
738 if (x86_check_feature(IA32_FEATURE_HTT, FEATURE_COMMON)) {
739 if (cpu->arch.vendor == VENDOR_AMD) {
740 result = detect_amd_cpu_topology(maxBasicLeaf, maxExtendedLeaf);
742 if (result == B_OK)
743 detect_amd_cache_topology(maxExtendedLeaf);
746 if (cpu->arch.vendor == VENDOR_INTEL) {
747 result = detect_intel_cpu_topology_x2apic(maxBasicLeaf);
748 if (result != B_OK)
749 result = detect_intel_cpu_topology_legacy(maxBasicLeaf);
751 if (result == B_OK)
752 detect_intel_cache_topology(maxBasicLeaf);
756 if (result != B_OK) {
757 dprintf("No CPU topology information available.\n");
759 sGetCPUTopologyID = get_simple_cpu_topology_id;
761 sHierarchyMask[CPU_TOPOLOGY_PACKAGE] = ~uint32(0);
765 ASSERT(sGetCPUTopologyID != NULL);
766 int topologyID = sGetCPUTopologyID(currentCPU);
767 cpu->topology_id[CPU_TOPOLOGY_SMT]
768 = get_topology_level_id(topologyID, CPU_TOPOLOGY_SMT);
769 cpu->topology_id[CPU_TOPOLOGY_CORE]
770 = get_topology_level_id(topologyID, CPU_TOPOLOGY_CORE);
771 cpu->topology_id[CPU_TOPOLOGY_PACKAGE]
772 = get_topology_level_id(topologyID, CPU_TOPOLOGY_PACKAGE);
774 unsigned int i;
775 for (i = 0; i < gCPUCacheLevelCount; i++)
776 cpu->cache_id[i] = topologyID & sCacheSharingMask[i];
777 for (; i < CPU_MAX_CACHE_LEVEL; i++)
778 cpu->cache_id[i] = -1;
780 #if DUMP_CPU_TOPOLOGY
781 dprintf("CPU %d: apic id %d, package %d, core %d, smt %d\n", currentCPU,
782 topologyID, cpu->topology_id[CPU_TOPOLOGY_PACKAGE],
783 cpu->topology_id[CPU_TOPOLOGY_CORE],
784 cpu->topology_id[CPU_TOPOLOGY_SMT]);
786 if (gCPUCacheLevelCount > 0) {
787 char cacheLevels[256];
788 unsigned int offset = 0;
789 for (i = 0; i < gCPUCacheLevelCount; i++) {
790 offset += snprintf(cacheLevels + offset,
791 sizeof(cacheLevels) - offset,
792 " L%d id %d%s", i + 1, cpu->cache_id[i],
793 i < gCPUCacheLevelCount - 1 ? "," : "");
795 if (offset >= sizeof(cacheLevels))
796 break;
799 dprintf("CPU %d: cache sharing:%s\n", currentCPU, cacheLevels);
801 #endif
805 static void
806 detect_cpu(int currentCPU)
808 cpu_ent* cpu = get_cpu_struct();
809 char vendorString[17];
810 cpuid_info cpuid;
812 // clear out the cpu info data
813 cpu->arch.vendor = VENDOR_UNKNOWN;
814 cpu->arch.vendor_name = "UNKNOWN VENDOR";
815 cpu->arch.feature[FEATURE_COMMON] = 0;
816 cpu->arch.feature[FEATURE_EXT] = 0;
817 cpu->arch.feature[FEATURE_EXT_AMD] = 0;
818 cpu->arch.model_name[0] = 0;
820 // print some fun data
821 get_current_cpuid(&cpuid, 0, 0);
822 uint32 maxBasicLeaf = cpuid.eax_0.max_eax;
824 // build the vendor string
825 memset(vendorString, 0, sizeof(vendorString));
826 memcpy(vendorString, cpuid.eax_0.vendor_id, sizeof(cpuid.eax_0.vendor_id));
828 // get the family, model, stepping
829 get_current_cpuid(&cpuid, 1, 0);
830 cpu->arch.type = cpuid.eax_1.type;
831 cpu->arch.family = cpuid.eax_1.family;
832 cpu->arch.extended_family = cpuid.eax_1.extended_family;
833 cpu->arch.model = cpuid.eax_1.model;
834 cpu->arch.extended_model = cpuid.eax_1.extended_model;
835 cpu->arch.stepping = cpuid.eax_1.stepping;
836 dprintf("CPU %d: type %d family %d extended_family %d model %d "
837 "extended_model %d stepping %d, string '%s'\n",
838 currentCPU, cpu->arch.type, cpu->arch.family,
839 cpu->arch.extended_family, cpu->arch.model,
840 cpu->arch.extended_model, cpu->arch.stepping, vendorString);
842 // figure out what vendor we have here
844 for (int32 i = 0; i < VENDOR_NUM; i++) {
845 if (vendor_info[i].ident_string[0]
846 && !strcmp(vendorString, vendor_info[i].ident_string[0])) {
847 cpu->arch.vendor = (x86_vendors)i;
848 cpu->arch.vendor_name = vendor_info[i].vendor;
849 break;
851 if (vendor_info[i].ident_string[1]
852 && !strcmp(vendorString, vendor_info[i].ident_string[1])) {
853 cpu->arch.vendor = (x86_vendors)i;
854 cpu->arch.vendor_name = vendor_info[i].vendor;
855 break;
859 // see if we can get the model name
860 get_current_cpuid(&cpuid, 0x80000000, 0);
861 uint32 maxExtendedLeaf = cpuid.eax_0.max_eax;
862 if (maxExtendedLeaf >= 0x80000004) {
863 // build the model string (need to swap ecx/edx data before copying)
864 unsigned int temp;
865 memset(cpu->arch.model_name, 0, sizeof(cpu->arch.model_name));
867 get_current_cpuid(&cpuid, 0x80000002, 0);
868 temp = cpuid.regs.edx;
869 cpuid.regs.edx = cpuid.regs.ecx;
870 cpuid.regs.ecx = temp;
871 memcpy(cpu->arch.model_name, cpuid.as_chars, sizeof(cpuid.as_chars));
873 get_current_cpuid(&cpuid, 0x80000003, 0);
874 temp = cpuid.regs.edx;
875 cpuid.regs.edx = cpuid.regs.ecx;
876 cpuid.regs.ecx = temp;
877 memcpy(cpu->arch.model_name + 16, cpuid.as_chars,
878 sizeof(cpuid.as_chars));
880 get_current_cpuid(&cpuid, 0x80000004, 0);
881 temp = cpuid.regs.edx;
882 cpuid.regs.edx = cpuid.regs.ecx;
883 cpuid.regs.ecx = temp;
884 memcpy(cpu->arch.model_name + 32, cpuid.as_chars,
885 sizeof(cpuid.as_chars));
887 // some cpus return a right-justified string
888 int32 i = 0;
889 while (cpu->arch.model_name[i] == ' ')
890 i++;
891 if (i > 0) {
892 memmove(cpu->arch.model_name, &cpu->arch.model_name[i],
893 strlen(&cpu->arch.model_name[i]) + 1);
896 dprintf("CPU %d: vendor '%s' model name '%s'\n",
897 currentCPU, cpu->arch.vendor_name, cpu->arch.model_name);
898 } else {
899 strlcpy(cpu->arch.model_name, "unknown", sizeof(cpu->arch.model_name));
902 // load feature bits
903 get_current_cpuid(&cpuid, 1, 0);
904 cpu->arch.feature[FEATURE_COMMON] = cpuid.eax_1.features; // edx
905 cpu->arch.feature[FEATURE_EXT] = cpuid.eax_1.extended_features; // ecx
907 if (maxExtendedLeaf >= 0x80000001) {
908 get_current_cpuid(&cpuid, 0x80000001, 0);
909 if (cpu->arch.vendor == VENDOR_AMD)
910 cpu->arch.feature[FEATURE_EXT_AMD_ECX] = cpuid.regs.ecx; // ecx
911 cpu->arch.feature[FEATURE_EXT_AMD] = cpuid.regs.edx; // edx
912 if (cpu->arch.vendor != VENDOR_AMD)
913 cpu->arch.feature[FEATURE_EXT_AMD] &= IA32_FEATURES_INTEL_EXT;
916 if (maxBasicLeaf >= 5) {
917 get_current_cpuid(&cpuid, 5, 0);
918 cpu->arch.feature[FEATURE_5_ECX] = cpuid.regs.ecx;
921 if (maxBasicLeaf >= 6) {
922 get_current_cpuid(&cpuid, 6, 0);
923 cpu->arch.feature[FEATURE_6_EAX] = cpuid.regs.eax;
924 cpu->arch.feature[FEATURE_6_ECX] = cpuid.regs.ecx;
927 if (maxExtendedLeaf >= 0x80000007) {
928 get_current_cpuid(&cpuid, 0x80000007, 0);
929 cpu->arch.feature[FEATURE_EXT_7_EDX] = cpuid.regs.edx;
932 detect_cpu_topology(currentCPU, cpu, maxBasicLeaf, maxExtendedLeaf);
934 #if DUMP_FEATURE_STRING
935 dump_feature_string(currentCPU, cpu);
936 #endif
940 bool
941 x86_check_feature(uint32 feature, enum x86_feature_type type)
943 cpu_ent* cpu = get_cpu_struct();
945 #if 0
946 int i;
947 dprintf("x86_check_feature: feature 0x%x, type %d\n", feature, type);
948 for (i = 0; i < FEATURE_NUM; i++) {
949 dprintf("features %d: 0x%x\n", i, cpu->arch.feature[i]);
951 #endif
953 return (cpu->arch.feature[type] & feature) != 0;
957 void*
958 x86_get_double_fault_stack(int32 cpu, size_t* _size)
960 *_size = kDoubleFaultStackSize;
961 return sDoubleFaultStacks + kDoubleFaultStackSize * cpu;
965 /*! Returns the index of the current CPU. Can only be called from the double
966 fault handler.
968 int32
969 x86_double_fault_get_cpu(void)
971 addr_t stack = x86_get_stack_frame();
972 return (stack - (addr_t)sDoubleFaultStacks) / kDoubleFaultStackSize;
976 // #pragma mark -
979 status_t
980 arch_cpu_preboot_init_percpu(kernel_args* args, int cpu)
982 // On SMP system we want to synchronize the CPUs' TSCs, so system_time()
983 // will return consistent values.
984 if (smp_get_num_cpus() > 1) {
985 // let the first CPU prepare the rendezvous point
986 if (cpu == 0)
987 sTSCSyncRendezvous = smp_get_num_cpus() - 1;
989 // One CPU after the other will drop out of this loop and be caught by
990 // the loop below, until the last CPU (0) gets there. Save for +/- a few
991 // cycles the CPUs should pass the second loop at the same time.
992 while (sTSCSyncRendezvous != cpu) {
995 sTSCSyncRendezvous = cpu - 1;
997 while (sTSCSyncRendezvous != -1) {
1000 // reset TSC to 0
1001 x86_write_msr(IA32_MSR_TSC, 0);
1004 x86_descriptors_preboot_init_percpu(args, cpu);
1006 return B_OK;
1010 static void
1011 halt_idle(void)
1013 asm("hlt");
1017 static void
1018 amdc1e_noarat_idle(void)
1020 uint64 msr = x86_read_msr(K8_MSR_IPM);
1021 if (msr & K8_CMPHALT)
1022 x86_write_msr(K8_MSR_IPM, msr & ~K8_CMPHALT);
1023 halt_idle();
1027 static bool
1028 detect_amdc1e_noarat()
1030 cpu_ent* cpu = get_cpu_struct();
1032 if (cpu->arch.vendor != VENDOR_AMD)
1033 return false;
1035 // Family 0x12 and higher processors support ARAT
1036 // Family lower than 0xf processors doesn't support C1E
1037 // Family 0xf with model <= 0x40 procssors doesn't support C1E
1038 uint32 family = cpu->arch.family + cpu->arch.extended_family;
1039 uint32 model = (cpu->arch.extended_model << 4) | cpu->arch.model;
1040 return (family < 0x12 && family > 0xf) || (family == 0xf && model > 0x40);
1044 status_t
1045 arch_cpu_init_percpu(kernel_args* args, int cpu)
1047 detect_cpu(cpu);
1049 if (!gCpuIdleFunc) {
1050 if (detect_amdc1e_noarat())
1051 gCpuIdleFunc = amdc1e_noarat_idle;
1052 else
1053 gCpuIdleFunc = halt_idle;
1056 return B_OK;
1060 status_t
1061 arch_cpu_init(kernel_args* args)
1063 // init the TSC -> system_time() conversion factors
1065 uint32 conversionFactor = args->arch_args.system_time_cv_factor;
1066 uint64 conversionFactorNsecs = (uint64)conversionFactor * 1000;
1068 #ifdef __x86_64__
1069 // The x86_64 system_time() implementation uses 64-bit multiplication and
1070 // therefore shifting is not necessary for low frequencies (it's also not
1071 // too likely that there'll be any x86_64 CPUs clocked under 1GHz).
1072 __x86_setup_system_time((uint64)conversionFactor << 32,
1073 conversionFactorNsecs);
1074 #else
1075 if (conversionFactorNsecs >> 32 != 0) {
1076 // the TSC frequency is < 1 GHz, which forces us to shift the factor
1077 __x86_setup_system_time(conversionFactor, conversionFactorNsecs >> 16,
1078 true);
1079 } else {
1080 // the TSC frequency is >= 1 GHz
1081 __x86_setup_system_time(conversionFactor, conversionFactorNsecs, false);
1083 #endif
1085 // Initialize descriptor tables.
1086 x86_descriptors_init(args);
1088 return B_OK;
1092 status_t
1093 arch_cpu_init_post_vm(kernel_args* args)
1095 uint32 i;
1097 // allocate an area for the double fault stacks
1098 virtual_address_restrictions virtualRestrictions = {};
1099 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
1100 physical_address_restrictions physicalRestrictions = {};
1101 create_area_etc(B_SYSTEM_TEAM, "double fault stacks",
1102 kDoubleFaultStackSize * smp_get_num_cpus(), B_FULL_LOCK,
1103 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT, 0,
1104 &virtualRestrictions, &physicalRestrictions,
1105 (void**)&sDoubleFaultStacks);
1107 X86PagingStructures* kernelPagingStructures
1108 = static_cast<X86VMTranslationMap*>(
1109 VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
1111 // Set active translation map on each CPU.
1112 for (i = 0; i < args->num_cpus; i++) {
1113 gCPU[i].arch.active_paging_structures = kernelPagingStructures;
1114 kernelPagingStructures->AddReference();
1117 if (!apic_available())
1118 x86_init_fpu();
1119 // else fpu gets set up in smp code
1121 return B_OK;
1125 status_t
1126 arch_cpu_init_post_modules(kernel_args* args)
1128 // initialize CPU module
1130 void* cookie = open_module_list("cpu");
1132 while (true) {
1133 char name[B_FILE_NAME_LENGTH];
1134 size_t nameLength = sizeof(name);
1136 if (read_next_module_name(cookie, name, &nameLength) != B_OK
1137 || get_module(name, (module_info**)&sCpuModule) == B_OK)
1138 break;
1141 close_module_list(cookie);
1143 // initialize MTRRs if available
1144 if (x86_count_mtrrs() > 0) {
1145 sCpuRendezvous = sCpuRendezvous2 = 0;
1146 call_all_cpus(&init_mtrrs, NULL);
1149 size_t threadExitLen = (addr_t)x86_end_userspace_thread_exit
1150 - (addr_t)x86_userspace_thread_exit;
1151 addr_t threadExitPosition = fill_commpage_entry(
1152 COMMPAGE_ENTRY_X86_THREAD_EXIT, (const void*)x86_userspace_thread_exit,
1153 threadExitLen);
1155 // add the functions to the commpage image
1156 image_id image = get_commpage_image();
1158 elf_add_memory_image_symbol(image, "commpage_thread_exit",
1159 threadExitPosition, threadExitLen, B_SYMBOL_TYPE_TEXT);
1161 return B_OK;
1165 void
1166 arch_cpu_user_TLB_invalidate(void)
1168 x86_write_cr3(x86_read_cr3());
1172 void
1173 arch_cpu_global_TLB_invalidate(void)
1175 uint32 flags = x86_read_cr4();
1177 if (flags & IA32_CR4_GLOBAL_PAGES) {
1178 // disable and reenable the global pages to flush all TLBs regardless
1179 // of the global page bit
1180 x86_write_cr4(flags & ~IA32_CR4_GLOBAL_PAGES);
1181 x86_write_cr4(flags | IA32_CR4_GLOBAL_PAGES);
1182 } else {
1183 cpu_status state = disable_interrupts();
1184 arch_cpu_user_TLB_invalidate();
1185 restore_interrupts(state);
1190 void
1191 arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
1193 int32 num_pages = end / B_PAGE_SIZE - start / B_PAGE_SIZE;
1194 while (num_pages-- >= 0) {
1195 invalidate_TLB(start);
1196 start += B_PAGE_SIZE;
1201 void
1202 arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
1204 int i;
1205 for (i = 0; i < num_pages; i++) {
1206 invalidate_TLB(pages[i]);
1211 status_t
1212 arch_cpu_shutdown(bool rebootSystem)
1214 if (acpi_shutdown(rebootSystem) == B_OK)
1215 return B_OK;
1217 if (!rebootSystem) {
1218 #ifndef __x86_64__
1219 return apm_shutdown();
1220 #else
1221 return B_NOT_SUPPORTED;
1222 #endif
1225 cpu_status state = disable_interrupts();
1227 // try to reset the system using the keyboard controller
1228 out8(0xfe, 0x64);
1230 // Give some time to the controller to do its job (0.5s)
1231 snooze(500000);
1233 // if that didn't help, try it this way
1234 x86_reboot();
1236 restore_interrupts(state);
1237 return B_ERROR;
1241 void
1242 arch_cpu_sync_icache(void* address, size_t length)
1244 // instruction cache is always consistent on x86