2 * SMP boot-related support
4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
15 * smp_boot_cpus()/smp_commence() is replaced by
16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
25 #include <linux/module.h>
26 #include <linux/acpi.h>
27 #include <linux/bootmem.h>
28 #include <linux/cpu.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
36 #include <linux/notifier.h>
37 #include <linux/smp.h>
38 #include <linux/spinlock.h>
39 #include <linux/efi.h>
40 #include <linux/percpu.h>
41 #include <linux/bitops.h>
43 #include <asm/atomic.h>
44 #include <asm/cache.h>
45 #include <asm/current.h>
46 #include <asm/delay.h>
50 #include <asm/machvec.h>
53 #include <asm/paravirt.h>
54 #include <asm/pgalloc.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
57 #include <asm/ptrace.h>
59 #include <asm/system.h>
60 #include <asm/tlbflush.h>
61 #include <asm/unistd.h>
62 #include <asm/sn/arch.h>
67 #define Dprintk(x...) printk(x)
72 #ifdef CONFIG_HOTPLUG_CPU
73 #ifdef CONFIG_PERMIT_BSP_REMOVE
74 #define bsp_remove_ok 1
76 #define bsp_remove_ok 0
80 * Store all idle threads, this can be reused instead of creating
81 * a new thread. Also avoids complicated thread destroy functionality
84 struct task_struct
*idle_thread_array
[NR_CPUS
];
87 * Global array allocated for NR_CPUS at boot time
89 struct sal_to_os_boot sal_boot_rendez_state
[NR_CPUS
];
92 * start_ap in head.S uses this to store current booting cpu
95 struct sal_to_os_boot
*sal_state_for_booting_cpu
= &sal_boot_rendez_state
[0];
97 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
99 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
100 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
104 #define get_idle_for_cpu(x) (NULL)
105 #define set_idle_for_cpu(x,p)
106 #define set_brendez_area(x)
111 * ITC synchronization related stuff:
114 #define SLAVE (SMP_CACHE_BYTES/8)
116 #define NUM_ROUNDS 64 /* magic value */
117 #define NUM_ITERS 5 /* likewise */
119 static DEFINE_SPINLOCK(itc_sync_lock
);
120 static volatile unsigned long go
[SLAVE
+ 1];
122 #define DEBUG_ITC_SYNC 0
124 extern void start_ap (void);
125 extern unsigned long ia64_iobase
;
127 struct task_struct
*task_for_booting_cpu
;
132 DEFINE_PER_CPU(int, cpu_state
);
134 cpumask_t cpu_core_map
[NR_CPUS
] __cacheline_aligned
;
135 EXPORT_SYMBOL(cpu_core_map
);
136 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t
, cpu_sibling_map
);
137 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
139 int smp_num_siblings
= 1;
141 /* which logical CPU number maps to which CPU (physical APIC ID) */
142 volatile int ia64_cpu_to_sapicid
[NR_CPUS
];
143 EXPORT_SYMBOL(ia64_cpu_to_sapicid
);
145 static volatile cpumask_t cpu_callin_map
;
147 struct smp_boot_data smp_boot_data __initdata
;
149 unsigned long ap_wakeup_vector
= -1; /* External Int use to wakeup APs */
151 char __initdata no_int_routing
;
153 unsigned char smp_int_redirect
; /* are INT and IPI redirectable by the chipset? */
155 #ifdef CONFIG_FORCE_CPEI_RETARGET
156 #define CPEI_OVERRIDE_DEFAULT (1)
158 #define CPEI_OVERRIDE_DEFAULT (0)
161 unsigned int force_cpei_retarget
= CPEI_OVERRIDE_DEFAULT
;
164 cmdl_force_cpei(char *str
)
168 get_option (&str
, &value
);
169 force_cpei_retarget
= value
;
174 __setup("force_cpei=", cmdl_force_cpei
);
177 nointroute (char *str
)
180 printk ("no_int_routing on\n");
184 __setup("nointroute", nointroute
);
186 static void fix_b0_for_bsp(void)
188 #ifdef CONFIG_HOTPLUG_CPU
190 static int fix_bsp_b0
= 1;
192 cpuid
= smp_processor_id();
195 * Cache the b0 value on the first AP that comes up
197 if (!(fix_bsp_b0
&& cpuid
))
200 sal_boot_rendez_state
[0].br
[0] = sal_boot_rendez_state
[cpuid
].br
[0];
201 printk ("Fixed BSP b0 value from CPU %d\n", cpuid
);
208 sync_master (void *arg
)
210 unsigned long flags
, i
;
214 local_irq_save(flags
);
216 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; ++i
) {
220 go
[SLAVE
] = ia64_get_itc();
223 local_irq_restore(flags
);
227 * Return the number of cycles by which our itc differs from the itc on the master
228 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
229 * negative that it is behind.
232 get_delta (long *rt
, long *master
)
234 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
235 unsigned long tcenter
, t0
, t1
, tm
;
238 for (i
= 0; i
< NUM_ITERS
; ++i
) {
241 while (!(tm
= go
[SLAVE
]))
246 if (t1
- t0
< best_t1
- best_t0
)
247 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
250 *rt
= best_t1
- best_t0
;
251 *master
= best_tm
- best_t0
;
253 /* average best_t0 and best_t1 without overflow: */
254 tcenter
= (best_t0
/2 + best_t1
/2);
255 if (best_t0
% 2 + best_t1
% 2 == 2)
257 return tcenter
- best_tm
;
261 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
262 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
263 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
264 * step). The basic idea is for the slave to ask the master what itc value it has and to
265 * read its own itc before and after the master responds. Each iteration gives us three
279 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
280 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
281 * between the slave and the master is symmetric. Even if the interconnect were
282 * asymmetric, we would still know that the synchronization error is smaller than the
283 * roundtrip latency (t0 - t1).
285 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
286 * within one or two cycles. However, we can only *guarantee* that the synchronization is
287 * accurate to within a round-trip time, which is typically in the range of several
288 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
289 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
290 * than half a micro second or so.
293 ia64_sync_itc (unsigned int master
)
295 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
296 unsigned long flags
, rt
, master_time_stamp
, bound
;
299 long rt
; /* roundtrip time */
300 long master
; /* master's timestamp */
301 long diff
; /* difference between midpoint and master's timestamp */
302 long lat
; /* estimate of itc adjustment latency */
307 * Make sure local timer ticks are disabled while we sync. If
308 * they were enabled, we'd have to worry about nasty issues
309 * like setting the ITC ahead of (or a long time before) the
310 * next scheduled tick.
312 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
316 if (smp_call_function_single(master
, sync_master
, NULL
, 0) < 0) {
317 printk(KERN_ERR
"sync_itc: failed to get attention of CPU %u!\n", master
);
322 cpu_relax(); /* wait for master to be ready */
324 spin_lock_irqsave(&itc_sync_lock
, flags
);
326 for (i
= 0; i
< NUM_ROUNDS
; ++i
) {
327 delta
= get_delta(&rt
, &master_time_stamp
);
329 done
= 1; /* let's lock on to this... */
335 adjust_latency
+= -delta
;
336 adj
= -delta
+ adjust_latency
/4;
340 ia64_set_itc(ia64_get_itc() + adj
);
344 t
[i
].master
= master_time_stamp
;
346 t
[i
].lat
= adjust_latency
/4;
350 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
353 for (i
= 0; i
< NUM_ROUNDS
; ++i
)
354 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
355 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
358 printk(KERN_INFO
"CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
359 "maxerr %lu cycles)\n", smp_processor_id(), master
, delta
, rt
);
363 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
365 static inline void __devinit
366 smp_setup_percpu_timer (void)
370 static void __cpuinit
373 int cpuid
, phys_id
, itc_master
;
374 struct cpuinfo_ia64
*last_cpuinfo
, *this_cpuinfo
;
375 extern void ia64_init_itm(void);
376 extern volatile int time_keeper_id
;
378 #ifdef CONFIG_PERFMON
379 extern void pfm_init_percpu(void);
382 cpuid
= smp_processor_id();
383 phys_id
= hard_smp_processor_id();
384 itc_master
= time_keeper_id
;
386 if (cpu_online(cpuid
)) {
387 printk(KERN_ERR
"huh, phys CPU#0x%x, CPU#0x%x already present??\n",
395 spin_lock(&vector_lock
);
396 /* Setup the per cpu irq handling data structures */
397 __setup_vector_irq(cpuid
);
398 notify_cpu_starting(cpuid
);
399 cpu_set(cpuid
, cpu_online_map
);
400 per_cpu(cpu_state
, cpuid
) = CPU_ONLINE
;
401 spin_unlock(&vector_lock
);
402 ipi_call_unlock_irq();
404 smp_setup_percpu_timer();
406 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
408 #ifdef CONFIG_PERFMON
414 if (!(sal_platform_features
& IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
)) {
416 * Synchronize the ITC with the BP. Need to do this after irqs are
417 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
418 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
419 * local_bh_enable(), which bugs out if irqs are not enabled...
421 Dprintk("Going to syncup ITC with ITC Master.\n");
422 ia64_sync_itc(itc_master
);
431 * Delay calibration can be skipped if new processor is identical to the
432 * previous processor.
434 last_cpuinfo
= cpu_data(cpuid
- 1);
435 this_cpuinfo
= local_cpu_data
;
436 if (last_cpuinfo
->itc_freq
!= this_cpuinfo
->itc_freq
||
437 last_cpuinfo
->proc_freq
!= this_cpuinfo
->proc_freq
||
438 last_cpuinfo
->features
!= this_cpuinfo
->features
||
439 last_cpuinfo
->revision
!= this_cpuinfo
->revision
||
440 last_cpuinfo
->family
!= this_cpuinfo
->family
||
441 last_cpuinfo
->archrev
!= this_cpuinfo
->archrev
||
442 last_cpuinfo
->model
!= this_cpuinfo
->model
)
444 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
446 #ifdef CONFIG_IA32_SUPPORT
451 * Allow the master to continue.
453 cpu_set(cpuid
, cpu_callin_map
);
454 Dprintk("Stack on CPU %d at about %p\n",cpuid
, &cpuid
);
459 * Activate a secondary processor. head.S calls this.
462 start_secondary (void *unused
)
464 /* Early console may use I/O ports */
465 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
466 #ifndef CONFIG_PRINTK_TIME
467 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
478 struct pt_regs
* __cpuinit
idle_regs(struct pt_regs
*regs
)
484 struct work_struct work
;
485 struct task_struct
*idle
;
486 struct completion done
;
491 do_fork_idle(struct work_struct
*work
)
493 struct create_idle
*c_idle
=
494 container_of(work
, struct create_idle
, work
);
496 c_idle
->idle
= fork_idle(c_idle
->cpu
);
497 complete(&c_idle
->done
);
501 do_boot_cpu (int sapicid
, int cpu
)
504 struct create_idle c_idle
= {
505 .work
= __WORK_INITIALIZER(c_idle
.work
, do_fork_idle
),
507 .done
= COMPLETION_INITIALIZER(c_idle
.done
),
510 c_idle
.idle
= get_idle_for_cpu(cpu
);
512 init_idle(c_idle
.idle
, cpu
);
517 * We can't use kernel_thread since we must avoid to reschedule the child.
519 if (!keventd_up() || current_is_keventd())
520 c_idle
.work
.func(&c_idle
.work
);
522 schedule_work(&c_idle
.work
);
523 wait_for_completion(&c_idle
.done
);
526 if (IS_ERR(c_idle
.idle
))
527 panic("failed fork for CPU %d", cpu
);
529 set_idle_for_cpu(cpu
, c_idle
.idle
);
532 task_for_booting_cpu
= c_idle
.idle
;
534 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector
, cpu
, sapicid
);
536 set_brendez_area(cpu
);
537 platform_send_ipi(cpu
, ap_wakeup_vector
, IA64_IPI_DM_INT
, 0);
540 * Wait 10s total for the AP to start
542 Dprintk("Waiting on callin_map ...");
543 for (timeout
= 0; timeout
< 100000; timeout
++) {
544 if (cpu_isset(cpu
, cpu_callin_map
))
545 break; /* It has booted */
550 if (!cpu_isset(cpu
, cpu_callin_map
)) {
551 printk(KERN_ERR
"Processor 0x%x/0x%x is stuck.\n", cpu
, sapicid
);
552 ia64_cpu_to_sapicid
[cpu
] = -1;
553 cpu_clear(cpu
, cpu_online_map
); /* was set in smp_callin() */
563 get_option (&str
, &ticks
);
567 __setup("decay=", decay
);
570 * Initialize the logical CPU number to SAPICID mapping
573 smp_build_cpu_map (void)
576 int boot_cpu_id
= hard_smp_processor_id();
578 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
579 ia64_cpu_to_sapicid
[cpu
] = -1;
582 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
583 cpus_clear(cpu_present_map
);
584 set_cpu_present(0, true);
585 set_cpu_possible(0, true);
586 for (cpu
= 1, i
= 0; i
< smp_boot_data
.cpu_count
; i
++) {
587 sapicid
= smp_boot_data
.cpu_phys_id
[i
];
588 if (sapicid
== boot_cpu_id
)
590 set_cpu_present(cpu
, true);
591 set_cpu_possible(cpu
, true);
592 ia64_cpu_to_sapicid
[cpu
] = sapicid
;
598 * Cycle through the APs sending Wakeup IPIs to boot each.
601 smp_prepare_cpus (unsigned int max_cpus
)
603 int boot_cpu_id
= hard_smp_processor_id();
606 * Initialize the per-CPU profiling counter/multiplier
609 smp_setup_percpu_timer();
612 * We have the boot CPU online for sure.
614 cpu_set(0, cpu_online_map
);
615 cpu_set(0, cpu_callin_map
);
617 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
618 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
620 printk(KERN_INFO
"Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id
);
622 current_thread_info()->cpu
= 0;
625 * If SMP should be disabled, then really disable it!
628 printk(KERN_INFO
"SMP mode deactivated.\n");
629 init_cpu_online(cpumask_of(0));
630 init_cpu_present(cpumask_of(0));
631 init_cpu_possible(cpumask_of(0));
636 void __devinit
smp_prepare_boot_cpu(void)
638 cpu_set(smp_processor_id(), cpu_online_map
);
639 cpu_set(smp_processor_id(), cpu_callin_map
);
640 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
641 paravirt_post_smp_prepare_boot_cpu();
644 #ifdef CONFIG_HOTPLUG_CPU
646 clear_cpu_sibling_map(int cpu
)
650 for_each_cpu_mask(i
, per_cpu(cpu_sibling_map
, cpu
))
651 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, i
));
652 for_each_cpu_mask(i
, cpu_core_map
[cpu
])
653 cpu_clear(cpu
, cpu_core_map
[i
]);
655 per_cpu(cpu_sibling_map
, cpu
) = cpu_core_map
[cpu
] = CPU_MASK_NONE
;
659 remove_siblinginfo(int cpu
)
663 if (cpu_data(cpu
)->threads_per_core
== 1 &&
664 cpu_data(cpu
)->cores_per_socket
== 1) {
665 cpu_clear(cpu
, cpu_core_map
[cpu
]);
666 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, cpu
));
670 last
= (cpus_weight(cpu_core_map
[cpu
]) == 1 ? 1 : 0);
672 /* remove it from all sibling map's */
673 clear_cpu_sibling_map(cpu
);
676 extern void fixup_irqs(void);
678 int migrate_platform_irqs(unsigned int cpu
)
681 struct irq_desc
*desc
= NULL
;
682 const struct cpumask
*mask
;
686 * dont permit CPEI target to removed.
688 if (cpe_vector
> 0 && is_cpu_cpei_target(cpu
)) {
689 printk ("CPU (%d) is CPEI Target\n", cpu
);
690 if (can_cpei_retarget()) {
692 * Now re-target the CPEI to a different processor
694 new_cpei_cpu
= any_online_cpu(cpu_online_map
);
695 mask
= cpumask_of(new_cpei_cpu
);
696 set_cpei_target_cpu(new_cpei_cpu
);
697 desc
= irq_desc
+ ia64_cpe_irq
;
699 * Switch for now, immediately, we need to do fake intr
700 * as other interrupts, but need to study CPEI behaviour with
701 * polling before making changes.
704 desc
->chip
->disable(ia64_cpe_irq
);
705 desc
->chip
->set_affinity(ia64_cpe_irq
, mask
);
706 desc
->chip
->enable(ia64_cpe_irq
);
707 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu
);
711 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu
);
718 /* must be called with cpucontrol mutex held */
719 int __cpu_disable(void)
721 int cpu
= smp_processor_id();
724 * dont permit boot processor for now
726 if (cpu
== 0 && !bsp_remove_ok
) {
727 printk ("Your platform does not support removal of BSP\n");
731 if (ia64_platform_is("sn2")) {
732 if (!sn_cpu_disable_allowed(cpu
))
736 cpu_clear(cpu
, cpu_online_map
);
738 if (migrate_platform_irqs(cpu
)) {
739 cpu_set(cpu
, cpu_online_map
);
743 remove_siblinginfo(cpu
);
745 local_flush_tlb_all();
746 cpu_clear(cpu
, cpu_callin_map
);
750 void __cpu_die(unsigned int cpu
)
754 for (i
= 0; i
< 100; i
++) {
755 /* They ack this in play_dead by setting CPU_DEAD */
756 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
758 printk ("CPU %d is now offline\n", cpu
);
763 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
765 #endif /* CONFIG_HOTPLUG_CPU */
768 smp_cpus_done (unsigned int dummy
)
771 unsigned long bogosum
= 0;
774 * Allow the user to impress friends.
777 for_each_online_cpu(cpu
) {
778 bogosum
+= cpu_data(cpu
)->loops_per_jiffy
;
781 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
782 (int)num_online_cpus(), bogosum
/(500000/HZ
), (bogosum
/(5000/HZ
))%100);
785 static inline void __devinit
786 set_cpu_sibling_map(int cpu
)
790 for_each_online_cpu(i
) {
791 if ((cpu_data(cpu
)->socket_id
== cpu_data(i
)->socket_id
)) {
792 cpu_set(i
, cpu_core_map
[cpu
]);
793 cpu_set(cpu
, cpu_core_map
[i
]);
794 if (cpu_data(cpu
)->core_id
== cpu_data(i
)->core_id
) {
795 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
796 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
803 __cpu_up (unsigned int cpu
)
808 sapicid
= ia64_cpu_to_sapicid
[cpu
];
813 * Already booted cpu? not valid anymore since we dont
814 * do idle loop tightspin anymore.
816 if (cpu_isset(cpu
, cpu_callin_map
))
819 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
820 /* Processor goes to start_secondary(), sets online flag */
821 ret
= do_boot_cpu(sapicid
, cpu
);
825 if (cpu_data(cpu
)->threads_per_core
== 1 &&
826 cpu_data(cpu
)->cores_per_socket
== 1) {
827 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
828 cpu_set(cpu
, cpu_core_map
[cpu
]);
832 set_cpu_sibling_map(cpu
);
838 * Assume that CPUs have been discovered by some platform-dependent interface. For
839 * SoftSDV/Lion, that would be ACPI.
841 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
844 init_smp_config(void)
852 /* Tell SAL where to drop the APs. */
853 ap_startup
= (struct fptr
*) start_ap
;
854 sal_ret
= ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ
,
855 ia64_tpa(ap_startup
->fp
), ia64_tpa(ap_startup
->gp
), 0, 0, 0, 0);
857 printk(KERN_ERR
"SMP: Can't set SAL AP Boot Rendezvous: %s\n",
858 ia64_sal_strerror(sal_ret
));
862 * identify_siblings(cpu) gets called from identify_cpu. This populates the
863 * information related to logical execution units in per_cpu_data structure.
866 identify_siblings(struct cpuinfo_ia64
*c
)
870 pal_logical_to_physical_t info
;
872 status
= ia64_pal_logical_to_phys(-1, &info
);
873 if (status
!= PAL_STATUS_SUCCESS
) {
874 if (status
!= PAL_STATUS_UNIMPLEMENTED
) {
876 "ia64_pal_logical_to_phys failed with %ld\n",
881 info
.overview_ppid
= 0;
882 info
.overview_cpp
= 1;
883 info
.overview_tpc
= 1;
886 status
= ia64_sal_physical_id_info(&pltid
);
887 if (status
!= PAL_STATUS_SUCCESS
) {
888 if (status
!= PAL_STATUS_UNIMPLEMENTED
)
890 "ia64_sal_pltid failed with %ld\n",
895 c
->socket_id
= (pltid
<< 8) | info
.overview_ppid
;
897 if (info
.overview_cpp
== 1 && info
.overview_tpc
== 1)
900 c
->cores_per_socket
= info
.overview_cpp
;
901 c
->threads_per_core
= info
.overview_tpc
;
902 c
->num_log
= info
.overview_num_log
;
904 c
->core_id
= info
.log1_cid
;
905 c
->thread_id
= info
.log1_tid
;
909 * returns non zero, if multi-threading is enabled
910 * on at least one physical package. Due to hotplug cpu
911 * and (maxcpus=), all threads may not necessarily be enabled
912 * even though the processor supports multi-threading.
914 int is_multithreading_enabled(void)
918 for_each_present_cpu(i
) {
919 for_each_present_cpu(j
) {
922 if ((cpu_data(j
)->socket_id
== cpu_data(i
)->socket_id
)) {
923 if (cpu_data(j
)->core_id
== cpu_data(i
)->core_id
)
930 EXPORT_SYMBOL_GPL(is_multithreading_enabled
);