ia64/kvm: compilation fix. export account_system_vtime.
[pv_ops_mirror.git] / arch / ia64 / kernel / smpboot.c
blob933f38811528169f71682925caae2e3f8872be09
1 /*
2 * SMP boot-related support
4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
15 * smp_boot_cpus()/smp_commence() is replaced by
16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
25 #include <linux/module.h>
26 #include <linux/acpi.h>
27 #include <linux/bootmem.h>
28 #include <linux/cpu.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
35 #include <linux/mm.h>
36 #include <linux/notifier.h>
37 #include <linux/smp.h>
38 #include <linux/spinlock.h>
39 #include <linux/efi.h>
40 #include <linux/percpu.h>
41 #include <linux/bitops.h>
43 #include <asm/atomic.h>
44 #include <asm/cache.h>
45 #include <asm/current.h>
46 #include <asm/delay.h>
47 #include <asm/ia32.h>
48 #include <asm/io.h>
49 #include <asm/irq.h>
50 #include <asm/machvec.h>
51 #include <asm/mca.h>
52 #include <asm/page.h>
53 #include <asm/paravirt.h>
54 #include <asm/pgalloc.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
57 #include <asm/ptrace.h>
58 #include <asm/sal.h>
59 #include <asm/system.h>
60 #include <asm/tlbflush.h>
61 #include <asm/unistd.h>
62 #include <asm/sn/arch.h>
64 #define SMP_DEBUG 0
66 #if SMP_DEBUG
67 #define Dprintk(x...) printk(x)
68 #else
69 #define Dprintk(x...)
70 #endif
72 #ifdef CONFIG_HOTPLUG_CPU
73 #ifdef CONFIG_PERMIT_BSP_REMOVE
74 #define bsp_remove_ok 1
75 #else
76 #define bsp_remove_ok 0
77 #endif
80 * Store all idle threads, this can be reused instead of creating
81 * a new thread. Also avoids complicated thread destroy functionality
82 * for idle threads.
84 struct task_struct *idle_thread_array[NR_CPUS];
87 * Global array allocated for NR_CPUS at boot time
89 struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
92 * start_ap in head.S uses this to store current booting cpu
93 * info.
95 struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
97 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
99 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
100 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
102 #else
104 #define get_idle_for_cpu(x) (NULL)
105 #define set_idle_for_cpu(x,p)
106 #define set_brendez_area(x)
107 #endif
111 * ITC synchronization related stuff:
113 #define MASTER (0)
114 #define SLAVE (SMP_CACHE_BYTES/8)
116 #define NUM_ROUNDS 64 /* magic value */
117 #define NUM_ITERS 5 /* likewise */
119 static DEFINE_SPINLOCK(itc_sync_lock);
120 static volatile unsigned long go[SLAVE + 1];
122 #define DEBUG_ITC_SYNC 0
124 extern void start_ap (void);
125 extern unsigned long ia64_iobase;
127 struct task_struct *task_for_booting_cpu;
130 * State for each CPU
132 DEFINE_PER_CPU(int, cpu_state);
134 /* Bitmasks of currently online, and possible CPUs */
135 cpumask_t cpu_online_map;
136 EXPORT_SYMBOL(cpu_online_map);
137 cpumask_t cpu_possible_map = CPU_MASK_NONE;
138 EXPORT_SYMBOL(cpu_possible_map);
140 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
142 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
144 int smp_num_siblings = 1;
146 /* which logical CPU number maps to which CPU (physical APIC ID) */
147 volatile int ia64_cpu_to_sapicid[NR_CPUS];
148 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
150 static volatile cpumask_t cpu_callin_map;
152 struct smp_boot_data smp_boot_data __initdata;
154 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
156 char __initdata no_int_routing;
158 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
160 #ifdef CONFIG_FORCE_CPEI_RETARGET
161 #define CPEI_OVERRIDE_DEFAULT (1)
162 #else
163 #define CPEI_OVERRIDE_DEFAULT (0)
164 #endif
166 unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
168 static int __init
169 cmdl_force_cpei(char *str)
171 int value=0;
173 get_option (&str, &value);
174 force_cpei_retarget = value;
176 return 1;
179 __setup("force_cpei=", cmdl_force_cpei);
181 static int __init
182 nointroute (char *str)
184 no_int_routing = 1;
185 printk ("no_int_routing on\n");
186 return 1;
189 __setup("nointroute", nointroute);
191 static void fix_b0_for_bsp(void)
193 #ifdef CONFIG_HOTPLUG_CPU
194 int cpuid;
195 static int fix_bsp_b0 = 1;
197 cpuid = smp_processor_id();
200 * Cache the b0 value on the first AP that comes up
202 if (!(fix_bsp_b0 && cpuid))
203 return;
205 sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
206 printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
208 fix_bsp_b0 = 0;
209 #endif
212 void
213 sync_master (void *arg)
215 unsigned long flags, i;
217 go[MASTER] = 0;
219 local_irq_save(flags);
221 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
222 while (!go[MASTER])
223 cpu_relax();
224 go[MASTER] = 0;
225 go[SLAVE] = ia64_get_itc();
228 local_irq_restore(flags);
232 * Return the number of cycles by which our itc differs from the itc on the master
233 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
234 * negative that it is behind.
236 static inline long
237 get_delta (long *rt, long *master)
239 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
240 unsigned long tcenter, t0, t1, tm;
241 long i;
243 for (i = 0; i < NUM_ITERS; ++i) {
244 t0 = ia64_get_itc();
245 go[MASTER] = 1;
246 while (!(tm = go[SLAVE]))
247 cpu_relax();
248 go[SLAVE] = 0;
249 t1 = ia64_get_itc();
251 if (t1 - t0 < best_t1 - best_t0)
252 best_t0 = t0, best_t1 = t1, best_tm = tm;
255 *rt = best_t1 - best_t0;
256 *master = best_tm - best_t0;
258 /* average best_t0 and best_t1 without overflow: */
259 tcenter = (best_t0/2 + best_t1/2);
260 if (best_t0 % 2 + best_t1 % 2 == 2)
261 ++tcenter;
262 return tcenter - best_tm;
266 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
267 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
268 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
269 * step). The basic idea is for the slave to ask the master what itc value it has and to
270 * read its own itc before and after the master responds. Each iteration gives us three
271 * timestamps:
273 * slave master
275 * t0 ---\
276 * ---\
277 * --->
278 * tm
279 * /---
280 * /---
281 * t1 <---
284 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
285 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
286 * between the slave and the master is symmetric. Even if the interconnect were
287 * asymmetric, we would still know that the synchronization error is smaller than the
288 * roundtrip latency (t0 - t1).
290 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
291 * within one or two cycles. However, we can only *guarantee* that the synchronization is
292 * accurate to within a round-trip time, which is typically in the range of several
293 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
294 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
295 * than half a micro second or so.
297 void
298 ia64_sync_itc (unsigned int master)
300 long i, delta, adj, adjust_latency = 0, done = 0;
301 unsigned long flags, rt, master_time_stamp, bound;
302 #if DEBUG_ITC_SYNC
303 struct {
304 long rt; /* roundtrip time */
305 long master; /* master's timestamp */
306 long diff; /* difference between midpoint and master's timestamp */
307 long lat; /* estimate of itc adjustment latency */
308 } t[NUM_ROUNDS];
309 #endif
312 * Make sure local timer ticks are disabled while we sync. If
313 * they were enabled, we'd have to worry about nasty issues
314 * like setting the ITC ahead of (or a long time before) the
315 * next scheduled tick.
317 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
319 go[MASTER] = 1;
321 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
322 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
323 return;
326 while (go[MASTER])
327 cpu_relax(); /* wait for master to be ready */
329 spin_lock_irqsave(&itc_sync_lock, flags);
331 for (i = 0; i < NUM_ROUNDS; ++i) {
332 delta = get_delta(&rt, &master_time_stamp);
333 if (delta == 0) {
334 done = 1; /* let's lock on to this... */
335 bound = rt;
338 if (!done) {
339 if (i > 0) {
340 adjust_latency += -delta;
341 adj = -delta + adjust_latency/4;
342 } else
343 adj = -delta;
345 ia64_set_itc(ia64_get_itc() + adj);
347 #if DEBUG_ITC_SYNC
348 t[i].rt = rt;
349 t[i].master = master_time_stamp;
350 t[i].diff = delta;
351 t[i].lat = adjust_latency/4;
352 #endif
355 spin_unlock_irqrestore(&itc_sync_lock, flags);
357 #if DEBUG_ITC_SYNC
358 for (i = 0; i < NUM_ROUNDS; ++i)
359 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
360 t[i].rt, t[i].master, t[i].diff, t[i].lat);
361 #endif
363 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
364 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
368 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
370 static inline void __devinit
371 smp_setup_percpu_timer (void)
375 static void __cpuinit
376 smp_callin (void)
378 int cpuid, phys_id, itc_master;
379 struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
380 extern void ia64_init_itm(void);
381 extern volatile int time_keeper_id;
383 #ifdef CONFIG_PERFMON
384 extern void pfm_init_percpu(void);
385 #endif
387 cpuid = smp_processor_id();
388 phys_id = hard_smp_processor_id();
389 itc_master = time_keeper_id;
391 if (cpu_online(cpuid)) {
392 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
393 phys_id, cpuid);
394 BUG();
397 fix_b0_for_bsp();
399 lock_ipi_calllock();
400 spin_lock(&vector_lock);
401 /* Setup the per cpu irq handling data structures */
402 __setup_vector_irq(cpuid);
403 cpu_set(cpuid, cpu_online_map);
404 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
405 spin_unlock(&vector_lock);
406 unlock_ipi_calllock();
408 smp_setup_percpu_timer();
410 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
412 #ifdef CONFIG_PERFMON
413 pfm_init_percpu();
414 #endif
416 local_irq_enable();
418 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
420 * Synchronize the ITC with the BP. Need to do this after irqs are
421 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
422 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
423 * local_bh_enable(), which bugs out if irqs are not enabled...
425 Dprintk("Going to syncup ITC with ITC Master.\n");
426 ia64_sync_itc(itc_master);
430 * Get our bogomips.
432 ia64_init_itm();
435 * Delay calibration can be skipped if new processor is identical to the
436 * previous processor.
438 last_cpuinfo = cpu_data(cpuid - 1);
439 this_cpuinfo = local_cpu_data;
440 if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
441 last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
442 last_cpuinfo->features != this_cpuinfo->features ||
443 last_cpuinfo->revision != this_cpuinfo->revision ||
444 last_cpuinfo->family != this_cpuinfo->family ||
445 last_cpuinfo->archrev != this_cpuinfo->archrev ||
446 last_cpuinfo->model != this_cpuinfo->model)
447 calibrate_delay();
448 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
450 #ifdef CONFIG_IA32_SUPPORT
451 ia32_gdt_init();
452 #endif
455 * Allow the master to continue.
457 cpu_set(cpuid, cpu_callin_map);
458 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
463 * Activate a secondary processor. head.S calls this.
465 int __cpuinit
466 start_secondary (void *unused)
468 /* Early console may use I/O ports */
469 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
470 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
471 efi_map_pal_code();
472 cpu_init();
473 preempt_disable();
474 smp_callin();
476 cpu_idle();
477 return 0;
480 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
482 return NULL;
485 struct create_idle {
486 struct work_struct work;
487 struct task_struct *idle;
488 struct completion done;
489 int cpu;
492 void __cpuinit
493 do_fork_idle(struct work_struct *work)
495 struct create_idle *c_idle =
496 container_of(work, struct create_idle, work);
498 c_idle->idle = fork_idle(c_idle->cpu);
499 complete(&c_idle->done);
502 static int __cpuinit
503 do_boot_cpu (int sapicid, int cpu)
505 int timeout;
506 struct create_idle c_idle = {
507 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
508 .cpu = cpu,
509 .done = COMPLETION_INITIALIZER(c_idle.done),
512 c_idle.idle = get_idle_for_cpu(cpu);
513 if (c_idle.idle) {
514 init_idle(c_idle.idle, cpu);
515 goto do_rest;
519 * We can't use kernel_thread since we must avoid to reschedule the child.
521 if (!keventd_up() || current_is_keventd())
522 c_idle.work.func(&c_idle.work);
523 else {
524 schedule_work(&c_idle.work);
525 wait_for_completion(&c_idle.done);
528 if (IS_ERR(c_idle.idle))
529 panic("failed fork for CPU %d", cpu);
531 set_idle_for_cpu(cpu, c_idle.idle);
533 do_rest:
534 task_for_booting_cpu = c_idle.idle;
536 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
538 set_brendez_area(cpu);
539 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
542 * Wait 10s total for the AP to start
544 Dprintk("Waiting on callin_map ...");
545 for (timeout = 0; timeout < 100000; timeout++) {
546 if (cpu_isset(cpu, cpu_callin_map))
547 break; /* It has booted */
548 udelay(100);
550 Dprintk("\n");
552 if (!cpu_isset(cpu, cpu_callin_map)) {
553 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
554 ia64_cpu_to_sapicid[cpu] = -1;
555 cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
556 return -EINVAL;
558 return 0;
561 static int __init
562 decay (char *str)
564 int ticks;
565 get_option (&str, &ticks);
566 return 1;
569 __setup("decay=", decay);
572 * Initialize the logical CPU number to SAPICID mapping
574 void __init
575 smp_build_cpu_map (void)
577 int sapicid, cpu, i;
578 int boot_cpu_id = hard_smp_processor_id();
580 for (cpu = 0; cpu < NR_CPUS; cpu++) {
581 ia64_cpu_to_sapicid[cpu] = -1;
584 ia64_cpu_to_sapicid[0] = boot_cpu_id;
585 cpus_clear(cpu_present_map);
586 cpu_set(0, cpu_present_map);
587 cpu_set(0, cpu_possible_map);
588 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
589 sapicid = smp_boot_data.cpu_phys_id[i];
590 if (sapicid == boot_cpu_id)
591 continue;
592 cpu_set(cpu, cpu_present_map);
593 cpu_set(cpu, cpu_possible_map);
594 ia64_cpu_to_sapicid[cpu] = sapicid;
595 cpu++;
600 * Cycle through the APs sending Wakeup IPIs to boot each.
602 void __init
603 smp_prepare_cpus (unsigned int max_cpus)
605 int boot_cpu_id = hard_smp_processor_id();
608 * Initialize the per-CPU profiling counter/multiplier
611 smp_setup_percpu_timer();
614 * We have the boot CPU online for sure.
616 cpu_set(0, cpu_online_map);
617 cpu_set(0, cpu_callin_map);
619 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
620 ia64_cpu_to_sapicid[0] = boot_cpu_id;
622 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
624 current_thread_info()->cpu = 0;
627 * If SMP should be disabled, then really disable it!
629 if (!max_cpus) {
630 printk(KERN_INFO "SMP mode deactivated.\n");
631 cpus_clear(cpu_online_map);
632 cpus_clear(cpu_present_map);
633 cpus_clear(cpu_possible_map);
634 cpu_set(0, cpu_online_map);
635 cpu_set(0, cpu_present_map);
636 cpu_set(0, cpu_possible_map);
637 return;
641 void __devinit smp_prepare_boot_cpu(void)
643 cpu_set(smp_processor_id(), cpu_online_map);
644 cpu_set(smp_processor_id(), cpu_callin_map);
645 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
646 paravirt_post_smp_prepare_boot_cpu();
649 #ifdef CONFIG_HOTPLUG_CPU
650 static inline void
651 clear_cpu_sibling_map(int cpu)
653 int i;
655 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
656 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
657 for_each_cpu_mask(i, cpu_core_map[cpu])
658 cpu_clear(cpu, cpu_core_map[i]);
660 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
663 static void
664 remove_siblinginfo(int cpu)
666 int last = 0;
668 if (cpu_data(cpu)->threads_per_core == 1 &&
669 cpu_data(cpu)->cores_per_socket == 1) {
670 cpu_clear(cpu, cpu_core_map[cpu]);
671 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
672 return;
675 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
677 /* remove it from all sibling map's */
678 clear_cpu_sibling_map(cpu);
681 extern void fixup_irqs(void);
683 int migrate_platform_irqs(unsigned int cpu)
685 int new_cpei_cpu;
686 irq_desc_t *desc = NULL;
687 cpumask_t mask;
688 int retval = 0;
691 * dont permit CPEI target to removed.
693 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
694 printk ("CPU (%d) is CPEI Target\n", cpu);
695 if (can_cpei_retarget()) {
697 * Now re-target the CPEI to a different processor
699 new_cpei_cpu = any_online_cpu(cpu_online_map);
700 mask = cpumask_of_cpu(new_cpei_cpu);
701 set_cpei_target_cpu(new_cpei_cpu);
702 desc = irq_desc + ia64_cpe_irq;
704 * Switch for now, immediately, we need to do fake intr
705 * as other interrupts, but need to study CPEI behaviour with
706 * polling before making changes.
708 if (desc) {
709 desc->chip->disable(ia64_cpe_irq);
710 desc->chip->set_affinity(ia64_cpe_irq, mask);
711 desc->chip->enable(ia64_cpe_irq);
712 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
715 if (!desc) {
716 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
717 retval = -EBUSY;
720 return retval;
723 /* must be called with cpucontrol mutex held */
724 int __cpu_disable(void)
726 int cpu = smp_processor_id();
729 * dont permit boot processor for now
731 if (cpu == 0 && !bsp_remove_ok) {
732 printk ("Your platform does not support removal of BSP\n");
733 return (-EBUSY);
736 if (ia64_platform_is("sn2")) {
737 if (!sn_cpu_disable_allowed(cpu))
738 return -EBUSY;
741 cpu_clear(cpu, cpu_online_map);
743 if (migrate_platform_irqs(cpu)) {
744 cpu_set(cpu, cpu_online_map);
745 return (-EBUSY);
748 remove_siblinginfo(cpu);
749 cpu_clear(cpu, cpu_online_map);
750 fixup_irqs();
751 local_flush_tlb_all();
752 cpu_clear(cpu, cpu_callin_map);
753 return 0;
756 void __cpu_die(unsigned int cpu)
758 unsigned int i;
760 for (i = 0; i < 100; i++) {
761 /* They ack this in play_dead by setting CPU_DEAD */
762 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
764 printk ("CPU %d is now offline\n", cpu);
765 return;
767 msleep(100);
769 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
771 #endif /* CONFIG_HOTPLUG_CPU */
773 void
774 smp_cpus_done (unsigned int dummy)
776 int cpu;
777 unsigned long bogosum = 0;
780 * Allow the user to impress friends.
783 for_each_online_cpu(cpu) {
784 bogosum += cpu_data(cpu)->loops_per_jiffy;
787 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
788 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
791 static inline void __devinit
792 set_cpu_sibling_map(int cpu)
794 int i;
796 for_each_online_cpu(i) {
797 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
798 cpu_set(i, cpu_core_map[cpu]);
799 cpu_set(cpu, cpu_core_map[i]);
800 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
801 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
802 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
808 int __cpuinit
809 __cpu_up (unsigned int cpu)
811 int ret;
812 int sapicid;
814 sapicid = ia64_cpu_to_sapicid[cpu];
815 if (sapicid == -1)
816 return -EINVAL;
819 * Already booted cpu? not valid anymore since we dont
820 * do idle loop tightspin anymore.
822 if (cpu_isset(cpu, cpu_callin_map))
823 return -EINVAL;
825 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
826 /* Processor goes to start_secondary(), sets online flag */
827 ret = do_boot_cpu(sapicid, cpu);
828 if (ret < 0)
829 return ret;
831 if (cpu_data(cpu)->threads_per_core == 1 &&
832 cpu_data(cpu)->cores_per_socket == 1) {
833 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
834 cpu_set(cpu, cpu_core_map[cpu]);
835 return 0;
838 set_cpu_sibling_map(cpu);
840 return 0;
844 * Assume that CPUs have been discovered by some platform-dependent interface. For
845 * SoftSDV/Lion, that would be ACPI.
847 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
849 void __init
850 init_smp_config(void)
852 struct fptr {
853 unsigned long fp;
854 unsigned long gp;
855 } *ap_startup;
856 long sal_ret;
858 /* Tell SAL where to drop the APs. */
859 ap_startup = (struct fptr *) start_ap;
860 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
861 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
862 if (sal_ret < 0)
863 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
864 ia64_sal_strerror(sal_ret));
868 * identify_siblings(cpu) gets called from identify_cpu. This populates the
869 * information related to logical execution units in per_cpu_data structure.
871 void __devinit
872 identify_siblings(struct cpuinfo_ia64 *c)
874 s64 status;
875 u16 pltid;
876 pal_logical_to_physical_t info;
878 status = ia64_pal_logical_to_phys(-1, &info);
879 if (status != PAL_STATUS_SUCCESS) {
880 if (status != PAL_STATUS_UNIMPLEMENTED) {
881 printk(KERN_ERR
882 "ia64_pal_logical_to_phys failed with %ld\n",
883 status);
884 return;
887 info.overview_ppid = 0;
888 info.overview_cpp = 1;
889 info.overview_tpc = 1;
892 status = ia64_sal_physical_id_info(&pltid);
893 if (status != PAL_STATUS_SUCCESS) {
894 if (status != PAL_STATUS_UNIMPLEMENTED)
895 printk(KERN_ERR
896 "ia64_sal_pltid failed with %ld\n",
897 status);
898 return;
901 c->socket_id = (pltid << 8) | info.overview_ppid;
903 if (info.overview_cpp == 1 && info.overview_tpc == 1)
904 return;
906 c->cores_per_socket = info.overview_cpp;
907 c->threads_per_core = info.overview_tpc;
908 c->num_log = info.overview_num_log;
910 c->core_id = info.log1_cid;
911 c->thread_id = info.log1_tid;
915 * returns non zero, if multi-threading is enabled
916 * on at least one physical package. Due to hotplug cpu
917 * and (maxcpus=), all threads may not necessarily be enabled
918 * even though the processor supports multi-threading.
920 int is_multithreading_enabled(void)
922 int i, j;
924 for_each_present_cpu(i) {
925 for_each_present_cpu(j) {
926 if (j == i)
927 continue;
928 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
929 if (cpu_data(j)->core_id == cpu_data(i)->core_id)
930 return 1;
934 return 0;
936 EXPORT_SYMBOL_GPL(is_multithreading_enabled);