2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/bootmem.h>
46 #include <linux/thread_info.h>
47 #include <linux/module.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <linux/smp.h>
51 #include <linux/kdebug.h>
54 #include <asm/pgalloc.h>
56 #include <asm/tlbflush.h>
57 #include <asm/proto.h>
60 #include <asm/hw_irq.h>
63 /* Number of siblings per CPU package */
64 int smp_num_siblings
= 1;
65 EXPORT_SYMBOL(smp_num_siblings
);
67 /* Last level cache ID of each logical CPU */
68 DEFINE_PER_CPU(u8
, cpu_llc_id
) = BAD_APICID
;
70 /* Bitmask of currently online CPUs */
71 cpumask_t cpu_online_map __read_mostly
;
73 EXPORT_SYMBOL(cpu_online_map
);
76 * Private maps to synchronize booting between AP and BP.
77 * Probably not needed anymore, but it makes for easier debugging. -AK
79 cpumask_t cpu_callin_map
;
80 cpumask_t cpu_callout_map
;
81 EXPORT_SYMBOL(cpu_callout_map
);
83 cpumask_t cpu_possible_map
;
84 EXPORT_SYMBOL(cpu_possible_map
);
86 /* Per CPU bogomips and other parameters */
87 struct cpuinfo_x86 cpu_data
[NR_CPUS
] __cacheline_aligned
;
88 EXPORT_SYMBOL(cpu_data
);
90 /* Set when the idlers are all forked */
91 int smp_threads_ready
;
93 /* representing HT siblings of each logical CPU */
94 DEFINE_PER_CPU(cpumask_t
, cpu_sibling_map
);
95 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
97 /* representing HT and core siblings of each logical CPU */
98 DEFINE_PER_CPU(cpumask_t
, cpu_core_map
);
99 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
102 * Trampoline 80x86 program as an array.
105 extern const unsigned char trampoline_data
[];
106 extern const unsigned char trampoline_end
[];
108 /* State of each CPU */
109 DEFINE_PER_CPU(int, cpu_state
) = { 0 };
112 * Store all idle threads, this can be reused instead of creating
113 * a new thread. Also avoids complicated thread destroy functionality
116 struct task_struct
*idle_thread_array
[NR_CPUS
] __cpuinitdata
;
118 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
119 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
122 * Currently trivial. Write the real->protected mode
123 * bootstrap into the page concerned. The caller
124 * has made sure it's suitably aligned.
127 static unsigned long __cpuinit
setup_trampoline(void)
129 void *tramp
= __va(SMP_TRAMPOLINE_BASE
);
130 memcpy(tramp
, trampoline_data
, trampoline_end
- trampoline_data
);
131 return virt_to_phys(tramp
);
135 * The bootstrap kernel entry code has set these up. Save them for
139 static void __cpuinit
smp_store_cpu_info(int id
)
141 struct cpuinfo_x86
*c
= cpu_data
+ id
;
148 static atomic_t init_deasserted __cpuinitdata
;
151 * Report back to the Boot Processor.
154 void __cpuinit
smp_callin(void)
157 unsigned long timeout
;
160 * If waken up by an INIT in an 82489DX configuration
161 * we may get here before an INIT-deassert IPI reaches
162 * our local APIC. We have to wait for the IPI or we'll
163 * lock up on an APIC access.
165 while (!atomic_read(&init_deasserted
))
169 * (This works even if the APIC is not enabled.)
171 phys_id
= GET_APIC_ID(apic_read(APIC_ID
));
172 cpuid
= smp_processor_id();
173 if (cpu_isset(cpuid
, cpu_callin_map
)) {
174 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
177 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid
, phys_id
);
180 * STARTUP IPIs are fragile beasts as they might sometimes
181 * trigger some glue motherboard logic. Complete APIC bus
182 * silence for 1 second, this overestimates the time the
183 * boot CPU is spending to send the up to 2 STARTUP IPIs
184 * by a factor of two. This should be enough.
188 * Waiting 2s total for startup (udelay is not yet working)
190 timeout
= jiffies
+ 2*HZ
;
191 while (time_before(jiffies
, timeout
)) {
193 * Has the boot CPU finished it's STARTUP sequence?
195 if (cpu_isset(cpuid
, cpu_callout_map
))
200 if (!time_before(jiffies
, timeout
)) {
201 panic("smp_callin: CPU%d started up but did not get a callout!\n",
206 * the boot CPU has finished the init stage and is spinning
207 * on callin_map until we finish. We are free to set up this
208 * CPU, first the APIC. (this is probably redundant on most
212 Dprintk("CALLIN, before setup_local_APIC().\n");
218 * Need to enable IRQs because it can take longer and then
219 * the NMI watchdog might kill us.
224 Dprintk("Stack at about %p\n",&cpuid
);
227 * Save our processor parameters
229 smp_store_cpu_info(cpuid
);
232 * Allow the master to continue.
234 cpu_set(cpuid
, cpu_callin_map
);
237 /* maps the cpu to the sched domain representing multi-core */
238 cpumask_t
cpu_coregroup_map(int cpu
)
240 struct cpuinfo_x86
*c
= cpu_data
+ cpu
;
242 * For perf, we return last level cache shared map.
243 * And for power savings, we return cpu_core_map
245 if (sched_mc_power_savings
|| sched_smt_power_savings
)
246 return per_cpu(cpu_core_map
, cpu
);
248 return c
->llc_shared_map
;
251 /* representing cpus for which sibling maps can be computed */
252 static cpumask_t cpu_sibling_setup_map
;
254 static inline void set_cpu_sibling_map(int cpu
)
257 struct cpuinfo_x86
*c
= cpu_data
;
259 cpu_set(cpu
, cpu_sibling_setup_map
);
261 if (smp_num_siblings
> 1) {
262 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
263 if (c
[cpu
].phys_proc_id
== c
[i
].phys_proc_id
&&
264 c
[cpu
].cpu_core_id
== c
[i
].cpu_core_id
) {
265 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
266 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
267 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
268 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
269 cpu_set(i
, c
[cpu
].llc_shared_map
);
270 cpu_set(cpu
, c
[i
].llc_shared_map
);
274 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
277 cpu_set(cpu
, c
[cpu
].llc_shared_map
);
279 if (current_cpu_data
.x86_max_cores
== 1) {
280 per_cpu(cpu_core_map
, cpu
) = per_cpu(cpu_sibling_map
, cpu
);
281 c
[cpu
].booted_cores
= 1;
285 for_each_cpu_mask(i
, cpu_sibling_setup_map
) {
286 if (per_cpu(cpu_llc_id
, cpu
) != BAD_APICID
&&
287 per_cpu(cpu_llc_id
, cpu
) == per_cpu(cpu_llc_id
, i
)) {
288 cpu_set(i
, c
[cpu
].llc_shared_map
);
289 cpu_set(cpu
, c
[i
].llc_shared_map
);
291 if (c
[cpu
].phys_proc_id
== c
[i
].phys_proc_id
) {
292 cpu_set(i
, per_cpu(cpu_core_map
, cpu
));
293 cpu_set(cpu
, per_cpu(cpu_core_map
, i
));
295 * Does this new cpu bringup a new core?
297 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1) {
299 * for each core in package, increment
300 * the booted_cores for this new cpu
302 if (first_cpu(per_cpu(cpu_sibling_map
, i
)) == i
)
303 c
[cpu
].booted_cores
++;
305 * increment the core count for all
306 * the other cpus in this package
310 } else if (i
!= cpu
&& !c
[cpu
].booted_cores
)
311 c
[cpu
].booted_cores
= c
[i
].booted_cores
;
317 * Setup code on secondary processor (after comming out of the trampoline)
319 void __cpuinit
start_secondary(void)
322 * Dont put anything before smp_callin(), SMP
323 * booting is too fragile that we want to limit the
324 * things done here to the most necessary things.
330 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
334 * Check TSC sync first:
336 check_tsc_sync_target();
338 if (nmi_watchdog
== NMI_IO_APIC
) {
339 disable_8259A_irq(0);
340 enable_NMI_through_LVT0(NULL
);
345 * The sibling maps must be set before turing the online map on for
348 set_cpu_sibling_map(smp_processor_id());
351 * We need to hold call_lock, so there is no inconsistency
352 * between the time smp_call_function() determines number of
353 * IPI receipients, and the time when the determination is made
354 * for which cpus receive the IPI in genapic_flat.c. Holding this
355 * lock helps us to not include this cpu in a currently in progress
356 * smp_call_function().
358 lock_ipi_call_lock();
359 spin_lock(&vector_lock
);
361 /* Setup the per cpu irq handling data structures */
362 __setup_vector_irq(smp_processor_id());
364 * Allow the master to continue.
366 cpu_set(smp_processor_id(), cpu_online_map
);
367 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
368 spin_unlock(&vector_lock
);
370 unlock_ipi_call_lock();
372 setup_secondary_APIC_clock();
377 extern volatile unsigned long init_rsp
;
378 extern void (*initial_code
)(void);
381 static void inquire_remote_apic(int apicid
)
383 unsigned i
, regs
[] = { APIC_ID
>> 4, APIC_LVR
>> 4, APIC_SPIV
>> 4 };
384 char *names
[] = { "ID", "VERSION", "SPIV" };
388 printk(KERN_INFO
"Inquiring remote APIC #%d...\n", apicid
);
390 for (i
= 0; i
< sizeof(regs
) / sizeof(*regs
); i
++) {
391 printk("... APIC #%d %s: ", apicid
, names
[i
]);
396 status
= safe_apic_wait_icr_idle();
398 printk("a previous APIC delivery may have failed\n");
400 apic_write(APIC_ICR2
, SET_APIC_DEST_FIELD(apicid
));
401 apic_write(APIC_ICR
, APIC_DM_REMRD
| regs
[i
]);
406 status
= apic_read(APIC_ICR
) & APIC_ICR_RR_MASK
;
407 } while (status
== APIC_ICR_RR_INPROG
&& timeout
++ < 1000);
410 case APIC_ICR_RR_VALID
:
411 status
= apic_read(APIC_RRR
);
412 printk("%08x\n", status
);
422 * Kick the secondary to wake up.
424 static int __cpuinit
wakeup_secondary_via_INIT(int phys_apicid
, unsigned int start_rip
)
426 unsigned long send_status
, accept_status
= 0;
427 int maxlvt
, num_starts
, j
;
429 Dprintk("Asserting INIT.\n");
432 * Turn INIT on target chip
434 apic_write(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
439 apic_write(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_INT_ASSERT
442 Dprintk("Waiting for send to finish...\n");
443 send_status
= safe_apic_wait_icr_idle();
447 Dprintk("Deasserting INIT.\n");
450 apic_write(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
453 apic_write(APIC_ICR
, APIC_INT_LEVELTRIG
| APIC_DM_INIT
);
455 Dprintk("Waiting for send to finish...\n");
456 send_status
= safe_apic_wait_icr_idle();
459 atomic_set(&init_deasserted
, 1);
464 * Run STARTUP IPI loop.
466 Dprintk("#startup loops: %d.\n", num_starts
);
468 maxlvt
= get_maxlvt();
470 for (j
= 1; j
<= num_starts
; j
++) {
471 Dprintk("Sending STARTUP #%d.\n",j
);
472 apic_write(APIC_ESR
, 0);
474 Dprintk("After apic_write.\n");
481 apic_write(APIC_ICR2
, SET_APIC_DEST_FIELD(phys_apicid
));
483 /* Boot on the stack */
484 /* Kick the second */
485 apic_write(APIC_ICR
, APIC_DM_STARTUP
| (start_rip
>> 12));
488 * Give the other CPU some time to accept the IPI.
492 Dprintk("Startup point 1.\n");
494 Dprintk("Waiting for send to finish...\n");
495 send_status
= safe_apic_wait_icr_idle();
498 * Give the other CPU some time to accept the IPI.
502 * Due to the Pentium erratum 3AP.
505 apic_write(APIC_ESR
, 0);
507 accept_status
= (apic_read(APIC_ESR
) & 0xEF);
508 if (send_status
|| accept_status
)
511 Dprintk("After Startup.\n");
514 printk(KERN_ERR
"APIC never delivered???\n");
516 printk(KERN_ERR
"APIC delivery error (%lx).\n", accept_status
);
518 return (send_status
| accept_status
);
522 struct work_struct work
;
523 struct task_struct
*idle
;
524 struct completion done
;
528 void do_fork_idle(struct work_struct
*work
)
530 struct create_idle
*c_idle
=
531 container_of(work
, struct create_idle
, work
);
533 c_idle
->idle
= fork_idle(c_idle
->cpu
);
534 complete(&c_idle
->done
);
540 static int __cpuinit
do_boot_cpu(int cpu
, int apicid
)
542 unsigned long boot_error
;
544 unsigned long start_rip
;
545 struct create_idle c_idle
= {
546 .work
= __WORK_INITIALIZER(c_idle
.work
, do_fork_idle
),
548 .done
= COMPLETION_INITIALIZER_ONSTACK(c_idle
.done
),
551 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
552 if (!cpu_gdt_descr
[cpu
].address
&&
553 !(cpu_gdt_descr
[cpu
].address
= get_zeroed_page(GFP_KERNEL
))) {
554 printk(KERN_ERR
"Failed to allocate GDT for CPU %d\n", cpu
);
558 /* Allocate node local memory for AP pdas */
559 if (cpu_pda(cpu
) == &boot_cpu_pda
[cpu
]) {
560 struct x8664_pda
*newpda
, *pda
;
561 int node
= cpu_to_node(cpu
);
563 newpda
= kmalloc_node(sizeof (struct x8664_pda
), GFP_ATOMIC
,
566 memcpy(newpda
, pda
, sizeof (struct x8664_pda
));
567 cpu_pda(cpu
) = newpda
;
570 "Could not allocate node local PDA for CPU %d on node %d\n",
574 alternatives_smp_switch(1);
576 c_idle
.idle
= get_idle_for_cpu(cpu
);
579 c_idle
.idle
->thread
.rsp
= (unsigned long) (((struct pt_regs
*)
580 (THREAD_SIZE
+ task_stack_page(c_idle
.idle
))) - 1);
581 init_idle(c_idle
.idle
, cpu
);
586 * During cold boot process, keventd thread is not spun up yet.
587 * When we do cpu hot-add, we create idle threads on the fly, we should
588 * not acquire any attributes from the calling context. Hence the clean
589 * way to create kernel_threads() is to do that from keventd().
590 * We do the current_is_keventd() due to the fact that ACPI notifier
591 * was also queuing to keventd() and when the caller is already running
592 * in context of keventd(), we would end up with locking up the keventd
595 if (!keventd_up() || current_is_keventd())
596 c_idle
.work
.func(&c_idle
.work
);
598 schedule_work(&c_idle
.work
);
599 wait_for_completion(&c_idle
.done
);
602 if (IS_ERR(c_idle
.idle
)) {
603 printk("failed fork for CPU %d\n", cpu
);
604 return PTR_ERR(c_idle
.idle
);
607 set_idle_for_cpu(cpu
, c_idle
.idle
);
611 cpu_pda(cpu
)->pcurrent
= c_idle
.idle
;
613 start_rip
= setup_trampoline();
615 init_rsp
= c_idle
.idle
->thread
.rsp
;
616 per_cpu(init_tss
,cpu
).rsp0
= init_rsp
;
617 initial_code
= start_secondary
;
618 clear_tsk_thread_flag(c_idle
.idle
, TIF_FORK
);
620 printk(KERN_INFO
"Booting processor %d/%d APIC 0x%x\n", cpu
,
621 cpus_weight(cpu_present_map
),
625 * This grunge runs the startup process for
626 * the targeted processor.
629 atomic_set(&init_deasserted
, 0);
631 Dprintk("Setting warm reset code and vector.\n");
633 CMOS_WRITE(0xa, 0xf);
636 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip
>> 4;
638 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip
& 0xf;
642 * Be paranoid about clearing APIC errors.
644 apic_write(APIC_ESR
, 0);
648 * Status is now clean
653 * Starting actual IPI sequence...
655 boot_error
= wakeup_secondary_via_INIT(apicid
, start_rip
);
659 * allow APs to start initializing.
661 Dprintk("Before Callout %d.\n", cpu
);
662 cpu_set(cpu
, cpu_callout_map
);
663 Dprintk("After Callout %d.\n", cpu
);
666 * Wait 5s total for a response
668 for (timeout
= 0; timeout
< 50000; timeout
++) {
669 if (cpu_isset(cpu
, cpu_callin_map
))
670 break; /* It has booted */
674 if (cpu_isset(cpu
, cpu_callin_map
)) {
675 /* number CPUs logically, starting from 1 (BSP is 0) */
676 Dprintk("CPU has booted.\n");
679 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE
))
681 /* trampoline started but...? */
682 printk("Stuck ??\n");
684 /* trampoline code not run */
685 printk("Not responding.\n");
687 inquire_remote_apic(apicid
);
692 cpu_clear(cpu
, cpu_callout_map
); /* was set here (do_boot_cpu()) */
693 clear_bit(cpu
, &cpu_initialized
); /* was set by cpu_init() */
694 clear_node_cpumask(cpu
); /* was set by numa_add_cpu */
695 cpu_clear(cpu
, cpu_present_map
);
696 cpu_clear(cpu
, cpu_possible_map
);
697 per_cpu(x86_cpu_to_apicid
, cpu
) = BAD_APICID
;
704 cycles_t cacheflush_time
;
705 unsigned long cache_decay_ticks
;
708 * Cleanup possible dangling ends...
710 static __cpuinit
void smp_cleanup_boot(void)
713 * Paranoid: Set warm reset code and vector here back
719 * Reset trampoline flag
721 *((volatile int *) phys_to_virt(0x467)) = 0;
725 * Fall back to non SMP mode after errors.
727 * RED-PEN audit/test this more. I bet there is more state messed up here.
729 static __init
void disable_smp(void)
731 cpu_present_map
= cpumask_of_cpu(0);
732 cpu_possible_map
= cpumask_of_cpu(0);
733 if (smp_found_config
)
734 phys_cpu_present_map
= physid_mask_of_physid(boot_cpu_id
);
736 phys_cpu_present_map
= physid_mask_of_physid(0);
737 cpu_set(0, per_cpu(cpu_sibling_map
, 0));
738 cpu_set(0, per_cpu(cpu_core_map
, 0));
741 #ifdef CONFIG_HOTPLUG_CPU
743 int additional_cpus __initdata
= -1;
746 * cpu_possible_map should be static, it cannot change as cpu's
747 * are onlined, or offlined. The reason is per-cpu data-structures
748 * are allocated by some modules at init time, and dont expect to
749 * do this dynamically on cpu arrival/departure.
750 * cpu_present_map on the other hand can change dynamically.
751 * In case when cpu_hotplug is not compiled, then we resort to current
752 * behaviour, which is cpu_possible == cpu_present.
755 * Three ways to find out the number of additional hotplug CPUs:
756 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
757 * - The user can overwrite it with additional_cpus=NUM
758 * - Otherwise don't reserve additional CPUs.
759 * We do this because additional CPUs waste a lot of memory.
762 __init
void prefill_possible_map(void)
767 if (additional_cpus
== -1) {
768 if (disabled_cpus
> 0)
769 additional_cpus
= disabled_cpus
;
773 possible
= num_processors
+ additional_cpus
;
774 if (possible
> NR_CPUS
)
777 printk(KERN_INFO
"SMP: Allowing %d CPUs, %d hotplug CPUs\n",
779 max_t(int, possible
- num_processors
, 0));
781 for (i
= 0; i
< possible
; i
++)
782 cpu_set(i
, cpu_possible_map
);
787 * Various sanity checks.
789 static int __init
smp_sanity_check(unsigned max_cpus
)
791 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map
)) {
792 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
793 hard_smp_processor_id());
794 physid_set(hard_smp_processor_id(), phys_cpu_present_map
);
798 * If we couldn't find an SMP configuration at boot time,
799 * get out of here now!
801 if (!smp_found_config
) {
802 printk(KERN_NOTICE
"SMP motherboard not detected.\n");
804 if (APIC_init_uniprocessor())
805 printk(KERN_NOTICE
"Local APIC not detected."
806 " Using dummy APIC emulation.\n");
811 * Should not be necessary because the MP table should list the boot
812 * CPU too, but we do it for the sake of robustness anyway.
814 if (!physid_isset(boot_cpu_id
, phys_cpu_present_map
)) {
815 printk(KERN_NOTICE
"weird, boot CPU (#%d) not listed by the BIOS.\n",
817 physid_set(hard_smp_processor_id(), phys_cpu_present_map
);
821 * If we couldn't find a local APIC, then get out of here now!
824 printk(KERN_ERR
"BIOS bug, local APIC #%d not detected!...\n",
826 printk(KERN_ERR
"... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
832 * If SMP should be disabled, then really disable it!
835 printk(KERN_INFO
"SMP mode deactivated, forcing use of dummy APIC emulation.\n");
844 * Copy apicid's found by MP_processor_info from initial array to the per cpu
845 * data area. The x86_cpu_to_apicid_init array is then expendable and the
846 * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
849 void __init
smp_set_apicids(void)
853 for_each_cpu_mask(cpu
, cpu_possible_map
) {
854 if (per_cpu_offset(cpu
))
855 per_cpu(x86_cpu_to_apicid
, cpu
) =
856 x86_cpu_to_apicid_init
[cpu
];
859 /* indicate the static array will be going away soon */
860 x86_cpu_to_apicid_ptr
= NULL
;
864 * Prepare for SMP bootup. The MP table or ACPI has been read
865 * earlier. Just do some sanity checking here and enable APIC mode.
867 void __init
smp_prepare_cpus(unsigned int max_cpus
)
869 nmi_watchdog_default();
870 current_cpu_data
= boot_cpu_data
;
871 current_thread_info()->cpu
= 0; /* needed? */
873 set_cpu_sibling_map(0);
875 if (smp_sanity_check(max_cpus
) < 0) {
876 printk(KERN_INFO
"SMP disabled\n");
883 * Switch from PIC to APIC mode.
887 if (GET_APIC_ID(apic_read(APIC_ID
)) != boot_cpu_id
) {
888 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
889 GET_APIC_ID(apic_read(APIC_ID
)), boot_cpu_id
);
890 /* Or can we switch back to PIC here? */
894 * Now start the IO-APICs
896 if (!skip_ioapic_setup
&& nr_ioapics
)
902 * Set up local APIC timer on boot CPU.
905 setup_boot_APIC_clock();
909 * Early setup to make printk work.
911 void __init
smp_prepare_boot_cpu(void)
913 int me
= smp_processor_id();
914 cpu_set(me
, cpu_online_map
);
915 cpu_set(me
, cpu_callout_map
);
916 per_cpu(cpu_state
, me
) = CPU_ONLINE
;
920 * Entry point to boot a CPU.
922 int __cpuinit
__cpu_up(unsigned int cpu
)
924 int apicid
= cpu_present_to_apicid(cpu
);
928 WARN_ON(irqs_disabled());
930 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu
);
932 if (apicid
== BAD_APICID
|| apicid
== boot_cpu_id
||
933 !physid_isset(apicid
, phys_cpu_present_map
)) {
934 printk("__cpu_up: bad cpu %d\n", cpu
);
939 * Already booted CPU?
941 if (cpu_isset(cpu
, cpu_callin_map
)) {
942 Dprintk("do_boot_cpu %d Already started\n", cpu
);
947 * Save current MTRR state in case it was changed since early boot
948 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
952 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
954 err
= do_boot_cpu(cpu
, apicid
);
956 Dprintk("do_boot_cpu failed %d\n", err
);
960 /* Unleash the CPU! */
961 Dprintk("waiting for cpu %d\n", cpu
);
964 * Make sure and check TSC sync:
966 local_irq_save(flags
);
967 check_tsc_sync_source(cpu
);
968 local_irq_restore(flags
);
970 while (!cpu_isset(cpu
, cpu_online_map
))
978 * Finish the SMP boot.
980 void __init
smp_cpus_done(unsigned int max_cpus
)
984 check_nmi_watchdog();
987 #ifdef CONFIG_HOTPLUG_CPU
989 static void remove_siblinginfo(int cpu
)
992 struct cpuinfo_x86
*c
= cpu_data
;
994 for_each_cpu_mask(sibling
, per_cpu(cpu_core_map
, cpu
)) {
995 cpu_clear(cpu
, per_cpu(cpu_core_map
, sibling
));
997 * last thread sibling in this cpu core going down
999 if (cpus_weight(per_cpu(cpu_sibling_map
, cpu
)) == 1)
1000 c
[sibling
].booted_cores
--;
1003 for_each_cpu_mask(sibling
, per_cpu(cpu_sibling_map
, cpu
))
1004 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, sibling
));
1005 cpus_clear(per_cpu(cpu_sibling_map
, cpu
));
1006 cpus_clear(per_cpu(cpu_core_map
, cpu
));
1007 c
[cpu
].phys_proc_id
= 0;
1008 c
[cpu
].cpu_core_id
= 0;
1009 cpu_clear(cpu
, cpu_sibling_setup_map
);
1012 void remove_cpu_from_maps(void)
1014 int cpu
= smp_processor_id();
1016 cpu_clear(cpu
, cpu_callout_map
);
1017 cpu_clear(cpu
, cpu_callin_map
);
1018 clear_bit(cpu
, &cpu_initialized
); /* was set by cpu_init() */
1019 clear_node_cpumask(cpu
);
1022 int __cpu_disable(void)
1024 int cpu
= smp_processor_id();
1027 * Perhaps use cpufreq to drop frequency, but that could go
1028 * into generic code.
1030 * We won't take down the boot processor on i386 due to some
1031 * interrupts only being able to be serviced by the BSP.
1032 * Especially so if we're not using an IOAPIC -zwane
1037 if (nmi_watchdog
== NMI_LOCAL_APIC
)
1038 stop_apic_nmi_watchdog(NULL
);
1043 * Allow any queued timer interrupts to get serviced
1044 * This is only a temporary solution until we cleanup
1045 * fixup_irqs as we do for IA64.
1050 local_irq_disable();
1051 remove_siblinginfo(cpu
);
1053 spin_lock(&vector_lock
);
1054 /* It's now safe to remove this processor from the online map */
1055 cpu_clear(cpu
, cpu_online_map
);
1056 spin_unlock(&vector_lock
);
1057 remove_cpu_from_maps();
1058 fixup_irqs(cpu_online_map
);
1062 void __cpu_die(unsigned int cpu
)
1064 /* We don't do anything here: idle task is faking death itself. */
1067 for (i
= 0; i
< 10; i
++) {
1068 /* They ack this in play_dead by setting CPU_DEAD */
1069 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
) {
1070 printk ("CPU %d is now offline\n", cpu
);
1071 if (1 == num_online_cpus())
1072 alternatives_smp_switch(0);
1077 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
1080 static __init
int setup_additional_cpus(char *s
)
1082 return s
&& get_option(&s
, &additional_cpus
) ? 0 : -EINVAL
;
1084 early_param("additional_cpus", setup_additional_cpus
);
1086 #else /* ... !CONFIG_HOTPLUG_CPU */
1088 int __cpu_disable(void)
1093 void __cpu_die(unsigned int cpu
)
1095 /* We said "no" in __cpu_disable */
1098 #endif /* CONFIG_HOTPLUG_CPU */