2 * SMP boot-related support
4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
15 * smp_boot_cpus()/smp_commence() is replaced by
16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
25 #include <linux/module.h>
26 #include <linux/acpi.h>
27 #include <linux/bootmem.h>
28 #include <linux/cpu.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
36 #include <linux/notifier.h>
37 #include <linux/smp.h>
38 #include <linux/spinlock.h>
39 #include <linux/efi.h>
40 #include <linux/percpu.h>
41 #include <linux/bitops.h>
43 #include <linux/atomic.h>
44 #include <asm/cache.h>
45 #include <asm/current.h>
46 #include <asm/delay.h>
49 #include <asm/machvec.h>
52 #include <asm/paravirt.h>
53 #include <asm/pgalloc.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
58 #include <asm/system.h>
59 #include <asm/tlbflush.h>
60 #include <asm/unistd.h>
61 #include <asm/sn/arch.h>
66 #define Dprintk(x...) printk(x)
71 #ifdef CONFIG_HOTPLUG_CPU
72 #ifdef CONFIG_PERMIT_BSP_REMOVE
73 #define bsp_remove_ok 1
75 #define bsp_remove_ok 0
79 * Store all idle threads, this can be reused instead of creating
80 * a new thread. Also avoids complicated thread destroy functionality
83 struct task_struct
*idle_thread_array
[NR_CPUS
];
86 * Global array allocated for NR_CPUS at boot time
88 struct sal_to_os_boot sal_boot_rendez_state
[NR_CPUS
];
91 * start_ap in head.S uses this to store current booting cpu
94 struct sal_to_os_boot
*sal_state_for_booting_cpu
= &sal_boot_rendez_state
[0];
96 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
98 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
99 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
103 #define get_idle_for_cpu(x) (NULL)
104 #define set_idle_for_cpu(x,p)
105 #define set_brendez_area(x)
110 * ITC synchronization related stuff:
113 #define SLAVE (SMP_CACHE_BYTES/8)
115 #define NUM_ROUNDS 64 /* magic value */
116 #define NUM_ITERS 5 /* likewise */
118 static DEFINE_SPINLOCK(itc_sync_lock
);
119 static volatile unsigned long go
[SLAVE
+ 1];
121 #define DEBUG_ITC_SYNC 0
123 extern void start_ap (void);
124 extern unsigned long ia64_iobase
;
126 struct task_struct
*task_for_booting_cpu
;
131 DEFINE_PER_CPU(int, cpu_state
);
133 cpumask_t cpu_core_map
[NR_CPUS
] __cacheline_aligned
;
134 EXPORT_SYMBOL(cpu_core_map
);
135 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t
, cpu_sibling_map
);
136 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
138 int smp_num_siblings
= 1;
140 /* which logical CPU number maps to which CPU (physical APIC ID) */
141 volatile int ia64_cpu_to_sapicid
[NR_CPUS
];
142 EXPORT_SYMBOL(ia64_cpu_to_sapicid
);
144 static volatile cpumask_t cpu_callin_map
;
146 struct smp_boot_data smp_boot_data __initdata
;
148 unsigned long ap_wakeup_vector
= -1; /* External Int use to wakeup APs */
150 char __initdata no_int_routing
;
152 unsigned char smp_int_redirect
; /* are INT and IPI redirectable by the chipset? */
154 #ifdef CONFIG_FORCE_CPEI_RETARGET
155 #define CPEI_OVERRIDE_DEFAULT (1)
157 #define CPEI_OVERRIDE_DEFAULT (0)
160 unsigned int force_cpei_retarget
= CPEI_OVERRIDE_DEFAULT
;
163 cmdl_force_cpei(char *str
)
167 get_option (&str
, &value
);
168 force_cpei_retarget
= value
;
173 __setup("force_cpei=", cmdl_force_cpei
);
176 nointroute (char *str
)
179 printk ("no_int_routing on\n");
183 __setup("nointroute", nointroute
);
185 static void fix_b0_for_bsp(void)
187 #ifdef CONFIG_HOTPLUG_CPU
189 static int fix_bsp_b0
= 1;
191 cpuid
= smp_processor_id();
194 * Cache the b0 value on the first AP that comes up
196 if (!(fix_bsp_b0
&& cpuid
))
199 sal_boot_rendez_state
[0].br
[0] = sal_boot_rendez_state
[cpuid
].br
[0];
200 printk ("Fixed BSP b0 value from CPU %d\n", cpuid
);
207 sync_master (void *arg
)
209 unsigned long flags
, i
;
213 local_irq_save(flags
);
215 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; ++i
) {
219 go
[SLAVE
] = ia64_get_itc();
222 local_irq_restore(flags
);
226 * Return the number of cycles by which our itc differs from the itc on the master
227 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
228 * negative that it is behind.
231 get_delta (long *rt
, long *master
)
233 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
234 unsigned long tcenter
, t0
, t1
, tm
;
237 for (i
= 0; i
< NUM_ITERS
; ++i
) {
240 while (!(tm
= go
[SLAVE
]))
245 if (t1
- t0
< best_t1
- best_t0
)
246 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
249 *rt
= best_t1
- best_t0
;
250 *master
= best_tm
- best_t0
;
252 /* average best_t0 and best_t1 without overflow: */
253 tcenter
= (best_t0
/2 + best_t1
/2);
254 if (best_t0
% 2 + best_t1
% 2 == 2)
256 return tcenter
- best_tm
;
260 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
261 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
262 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
263 * step). The basic idea is for the slave to ask the master what itc value it has and to
264 * read its own itc before and after the master responds. Each iteration gives us three
278 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
279 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
280 * between the slave and the master is symmetric. Even if the interconnect were
281 * asymmetric, we would still know that the synchronization error is smaller than the
282 * roundtrip latency (t0 - t1).
284 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
285 * within one or two cycles. However, we can only *guarantee* that the synchronization is
286 * accurate to within a round-trip time, which is typically in the range of several
287 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
288 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
289 * than half a micro second or so.
292 ia64_sync_itc (unsigned int master
)
294 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
295 unsigned long flags
, rt
, master_time_stamp
, bound
;
298 long rt
; /* roundtrip time */
299 long master
; /* master's timestamp */
300 long diff
; /* difference between midpoint and master's timestamp */
301 long lat
; /* estimate of itc adjustment latency */
306 * Make sure local timer ticks are disabled while we sync. If
307 * they were enabled, we'd have to worry about nasty issues
308 * like setting the ITC ahead of (or a long time before) the
309 * next scheduled tick.
311 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
315 if (smp_call_function_single(master
, sync_master
, NULL
, 0) < 0) {
316 printk(KERN_ERR
"sync_itc: failed to get attention of CPU %u!\n", master
);
321 cpu_relax(); /* wait for master to be ready */
323 spin_lock_irqsave(&itc_sync_lock
, flags
);
325 for (i
= 0; i
< NUM_ROUNDS
; ++i
) {
326 delta
= get_delta(&rt
, &master_time_stamp
);
328 done
= 1; /* let's lock on to this... */
334 adjust_latency
+= -delta
;
335 adj
= -delta
+ adjust_latency
/4;
339 ia64_set_itc(ia64_get_itc() + adj
);
343 t
[i
].master
= master_time_stamp
;
345 t
[i
].lat
= adjust_latency
/4;
349 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
352 for (i
= 0; i
< NUM_ROUNDS
; ++i
)
353 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
354 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
357 printk(KERN_INFO
"CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
358 "maxerr %lu cycles)\n", smp_processor_id(), master
, delta
, rt
);
362 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
364 static inline void __devinit
365 smp_setup_percpu_timer (void)
369 static void __cpuinit
372 int cpuid
, phys_id
, itc_master
;
373 struct cpuinfo_ia64
*last_cpuinfo
, *this_cpuinfo
;
374 extern void ia64_init_itm(void);
375 extern volatile int time_keeper_id
;
377 #ifdef CONFIG_PERFMON
378 extern void pfm_init_percpu(void);
381 cpuid
= smp_processor_id();
382 phys_id
= hard_smp_processor_id();
383 itc_master
= time_keeper_id
;
385 if (cpu_online(cpuid
)) {
386 printk(KERN_ERR
"huh, phys CPU#0x%x, CPU#0x%x already present??\n",
394 * numa_node_id() works after this.
396 set_numa_node(cpu_to_node_map
[cpuid
]);
397 set_numa_mem(local_memory_node(cpu_to_node_map
[cpuid
]));
400 spin_lock(&vector_lock
);
401 /* Setup the per cpu irq handling data structures */
402 __setup_vector_irq(cpuid
);
403 notify_cpu_starting(cpuid
);
404 cpu_set(cpuid
, cpu_online_map
);
405 per_cpu(cpu_state
, cpuid
) = CPU_ONLINE
;
406 spin_unlock(&vector_lock
);
407 ipi_call_unlock_irq();
409 smp_setup_percpu_timer();
411 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
413 #ifdef CONFIG_PERFMON
419 if (!(sal_platform_features
& IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
)) {
421 * Synchronize the ITC with the BP. Need to do this after irqs are
422 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
423 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
424 * local_bh_enable(), which bugs out if irqs are not enabled...
426 Dprintk("Going to syncup ITC with ITC Master.\n");
427 ia64_sync_itc(itc_master
);
436 * Delay calibration can be skipped if new processor is identical to the
437 * previous processor.
439 last_cpuinfo
= cpu_data(cpuid
- 1);
440 this_cpuinfo
= local_cpu_data
;
441 if (last_cpuinfo
->itc_freq
!= this_cpuinfo
->itc_freq
||
442 last_cpuinfo
->proc_freq
!= this_cpuinfo
->proc_freq
||
443 last_cpuinfo
->features
!= this_cpuinfo
->features
||
444 last_cpuinfo
->revision
!= this_cpuinfo
->revision
||
445 last_cpuinfo
->family
!= this_cpuinfo
->family
||
446 last_cpuinfo
->archrev
!= this_cpuinfo
->archrev
||
447 last_cpuinfo
->model
!= this_cpuinfo
->model
)
449 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
452 * Allow the master to continue.
454 cpu_set(cpuid
, cpu_callin_map
);
455 Dprintk("Stack on CPU %d at about %p\n",cpuid
, &cpuid
);
460 * Activate a secondary processor. head.S calls this.
463 start_secondary (void *unused
)
465 /* Early console may use I/O ports */
466 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
467 #ifndef CONFIG_PRINTK_TIME
468 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
479 struct pt_regs
* __cpuinit
idle_regs(struct pt_regs
*regs
)
485 struct work_struct work
;
486 struct task_struct
*idle
;
487 struct completion done
;
492 do_fork_idle(struct work_struct
*work
)
494 struct create_idle
*c_idle
=
495 container_of(work
, struct create_idle
, work
);
497 c_idle
->idle
= fork_idle(c_idle
->cpu
);
498 complete(&c_idle
->done
);
502 do_boot_cpu (int sapicid
, int cpu
)
505 struct create_idle c_idle
= {
506 .work
= __WORK_INITIALIZER(c_idle
.work
, do_fork_idle
),
508 .done
= COMPLETION_INITIALIZER(c_idle
.done
),
512 * We can't use kernel_thread since we must avoid to
513 * reschedule the child.
515 c_idle
.idle
= get_idle_for_cpu(cpu
);
517 init_idle(c_idle
.idle
, cpu
);
521 schedule_work(&c_idle
.work
);
522 wait_for_completion(&c_idle
.done
);
524 if (IS_ERR(c_idle
.idle
))
525 panic("failed fork for CPU %d", cpu
);
527 set_idle_for_cpu(cpu
, c_idle
.idle
);
530 task_for_booting_cpu
= c_idle
.idle
;
532 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector
, cpu
, sapicid
);
534 set_brendez_area(cpu
);
535 platform_send_ipi(cpu
, ap_wakeup_vector
, IA64_IPI_DM_INT
, 0);
538 * Wait 10s total for the AP to start
540 Dprintk("Waiting on callin_map ...");
541 for (timeout
= 0; timeout
< 100000; timeout
++) {
542 if (cpu_isset(cpu
, cpu_callin_map
))
543 break; /* It has booted */
548 if (!cpu_isset(cpu
, cpu_callin_map
)) {
549 printk(KERN_ERR
"Processor 0x%x/0x%x is stuck.\n", cpu
, sapicid
);
550 ia64_cpu_to_sapicid
[cpu
] = -1;
551 cpu_clear(cpu
, cpu_online_map
); /* was set in smp_callin() */
561 get_option (&str
, &ticks
);
565 __setup("decay=", decay
);
568 * Initialize the logical CPU number to SAPICID mapping
571 smp_build_cpu_map (void)
574 int boot_cpu_id
= hard_smp_processor_id();
576 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
577 ia64_cpu_to_sapicid
[cpu
] = -1;
580 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
581 cpus_clear(cpu_present_map
);
582 set_cpu_present(0, true);
583 set_cpu_possible(0, true);
584 for (cpu
= 1, i
= 0; i
< smp_boot_data
.cpu_count
; i
++) {
585 sapicid
= smp_boot_data
.cpu_phys_id
[i
];
586 if (sapicid
== boot_cpu_id
)
588 set_cpu_present(cpu
, true);
589 set_cpu_possible(cpu
, true);
590 ia64_cpu_to_sapicid
[cpu
] = sapicid
;
596 * Cycle through the APs sending Wakeup IPIs to boot each.
599 smp_prepare_cpus (unsigned int max_cpus
)
601 int boot_cpu_id
= hard_smp_processor_id();
604 * Initialize the per-CPU profiling counter/multiplier
607 smp_setup_percpu_timer();
610 * We have the boot CPU online for sure.
612 cpu_set(0, cpu_online_map
);
613 cpu_set(0, cpu_callin_map
);
615 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
616 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
618 printk(KERN_INFO
"Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id
);
620 current_thread_info()->cpu
= 0;
623 * If SMP should be disabled, then really disable it!
626 printk(KERN_INFO
"SMP mode deactivated.\n");
627 init_cpu_online(cpumask_of(0));
628 init_cpu_present(cpumask_of(0));
629 init_cpu_possible(cpumask_of(0));
634 void __devinit
smp_prepare_boot_cpu(void)
636 cpu_set(smp_processor_id(), cpu_online_map
);
637 cpu_set(smp_processor_id(), cpu_callin_map
);
638 set_numa_node(cpu_to_node_map
[smp_processor_id()]);
639 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
640 paravirt_post_smp_prepare_boot_cpu();
643 #ifdef CONFIG_HOTPLUG_CPU
645 clear_cpu_sibling_map(int cpu
)
649 for_each_cpu_mask(i
, per_cpu(cpu_sibling_map
, cpu
))
650 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, i
));
651 for_each_cpu_mask(i
, cpu_core_map
[cpu
])
652 cpu_clear(cpu
, cpu_core_map
[i
]);
654 per_cpu(cpu_sibling_map
, cpu
) = cpu_core_map
[cpu
] = CPU_MASK_NONE
;
658 remove_siblinginfo(int cpu
)
662 if (cpu_data(cpu
)->threads_per_core
== 1 &&
663 cpu_data(cpu
)->cores_per_socket
== 1) {
664 cpu_clear(cpu
, cpu_core_map
[cpu
]);
665 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, cpu
));
669 last
= (cpus_weight(cpu_core_map
[cpu
]) == 1 ? 1 : 0);
671 /* remove it from all sibling map's */
672 clear_cpu_sibling_map(cpu
);
675 extern void fixup_irqs(void);
677 int migrate_platform_irqs(unsigned int cpu
)
680 struct irq_data
*data
= NULL
;
681 const struct cpumask
*mask
;
685 * dont permit CPEI target to removed.
687 if (cpe_vector
> 0 && is_cpu_cpei_target(cpu
)) {
688 printk ("CPU (%d) is CPEI Target\n", cpu
);
689 if (can_cpei_retarget()) {
691 * Now re-target the CPEI to a different processor
693 new_cpei_cpu
= any_online_cpu(cpu_online_map
);
694 mask
= cpumask_of(new_cpei_cpu
);
695 set_cpei_target_cpu(new_cpei_cpu
);
696 data
= irq_get_irq_data(ia64_cpe_irq
);
698 * Switch for now, immediately, we need to do fake intr
699 * as other interrupts, but need to study CPEI behaviour with
700 * polling before making changes.
702 if (data
&& data
->chip
) {
703 data
->chip
->irq_disable(data
);
704 data
->chip
->irq_set_affinity(data
, mask
, false);
705 data
->chip
->irq_enable(data
);
706 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu
);
710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu
);
717 /* must be called with cpucontrol mutex held */
718 int __cpu_disable(void)
720 int cpu
= smp_processor_id();
723 * dont permit boot processor for now
725 if (cpu
== 0 && !bsp_remove_ok
) {
726 printk ("Your platform does not support removal of BSP\n");
730 if (ia64_platform_is("sn2")) {
731 if (!sn_cpu_disable_allowed(cpu
))
735 cpu_clear(cpu
, cpu_online_map
);
737 if (migrate_platform_irqs(cpu
)) {
738 cpu_set(cpu
, cpu_online_map
);
742 remove_siblinginfo(cpu
);
744 local_flush_tlb_all();
745 cpu_clear(cpu
, cpu_callin_map
);
749 void __cpu_die(unsigned int cpu
)
753 for (i
= 0; i
< 100; i
++) {
754 /* They ack this in play_dead by setting CPU_DEAD */
755 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
757 printk ("CPU %d is now offline\n", cpu
);
762 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
764 #endif /* CONFIG_HOTPLUG_CPU */
767 smp_cpus_done (unsigned int dummy
)
770 unsigned long bogosum
= 0;
773 * Allow the user to impress friends.
776 for_each_online_cpu(cpu
) {
777 bogosum
+= cpu_data(cpu
)->loops_per_jiffy
;
780 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
781 (int)num_online_cpus(), bogosum
/(500000/HZ
), (bogosum
/(5000/HZ
))%100);
784 static inline void __devinit
785 set_cpu_sibling_map(int cpu
)
789 for_each_online_cpu(i
) {
790 if ((cpu_data(cpu
)->socket_id
== cpu_data(i
)->socket_id
)) {
791 cpu_set(i
, cpu_core_map
[cpu
]);
792 cpu_set(cpu
, cpu_core_map
[i
]);
793 if (cpu_data(cpu
)->core_id
== cpu_data(i
)->core_id
) {
794 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
795 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
802 __cpu_up (unsigned int cpu
)
807 sapicid
= ia64_cpu_to_sapicid
[cpu
];
812 * Already booted cpu? not valid anymore since we dont
813 * do idle loop tightspin anymore.
815 if (cpu_isset(cpu
, cpu_callin_map
))
818 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
819 /* Processor goes to start_secondary(), sets online flag */
820 ret
= do_boot_cpu(sapicid
, cpu
);
824 if (cpu_data(cpu
)->threads_per_core
== 1 &&
825 cpu_data(cpu
)->cores_per_socket
== 1) {
826 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
827 cpu_set(cpu
, cpu_core_map
[cpu
]);
831 set_cpu_sibling_map(cpu
);
837 * Assume that CPUs have been discovered by some platform-dependent interface. For
838 * SoftSDV/Lion, that would be ACPI.
840 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
843 init_smp_config(void)
851 /* Tell SAL where to drop the APs. */
852 ap_startup
= (struct fptr
*) start_ap
;
853 sal_ret
= ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ
,
854 ia64_tpa(ap_startup
->fp
), ia64_tpa(ap_startup
->gp
), 0, 0, 0, 0);
856 printk(KERN_ERR
"SMP: Can't set SAL AP Boot Rendezvous: %s\n",
857 ia64_sal_strerror(sal_ret
));
861 * identify_siblings(cpu) gets called from identify_cpu. This populates the
862 * information related to logical execution units in per_cpu_data structure.
865 identify_siblings(struct cpuinfo_ia64
*c
)
869 pal_logical_to_physical_t info
;
871 status
= ia64_pal_logical_to_phys(-1, &info
);
872 if (status
!= PAL_STATUS_SUCCESS
) {
873 if (status
!= PAL_STATUS_UNIMPLEMENTED
) {
875 "ia64_pal_logical_to_phys failed with %ld\n",
880 info
.overview_ppid
= 0;
881 info
.overview_cpp
= 1;
882 info
.overview_tpc
= 1;
885 status
= ia64_sal_physical_id_info(&pltid
);
886 if (status
!= PAL_STATUS_SUCCESS
) {
887 if (status
!= PAL_STATUS_UNIMPLEMENTED
)
889 "ia64_sal_pltid failed with %ld\n",
894 c
->socket_id
= (pltid
<< 8) | info
.overview_ppid
;
896 if (info
.overview_cpp
== 1 && info
.overview_tpc
== 1)
899 c
->cores_per_socket
= info
.overview_cpp
;
900 c
->threads_per_core
= info
.overview_tpc
;
901 c
->num_log
= info
.overview_num_log
;
903 c
->core_id
= info
.log1_cid
;
904 c
->thread_id
= info
.log1_tid
;
908 * returns non zero, if multi-threading is enabled
909 * on at least one physical package. Due to hotplug cpu
910 * and (maxcpus=), all threads may not necessarily be enabled
911 * even though the processor supports multi-threading.
913 int is_multithreading_enabled(void)
917 for_each_present_cpu(i
) {
918 for_each_present_cpu(j
) {
921 if ((cpu_data(j
)->socket_id
== cpu_data(i
)->socket_id
)) {
922 if (cpu_data(j
)->core_id
== cpu_data(i
)->core_id
)
929 EXPORT_SYMBOL_GPL(is_multithreading_enabled
);