2 * SMP boot-related support
4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
15 * smp_boot_cpus()/smp_commence() is replaced by
16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
25 #include <linux/module.h>
26 #include <linux/acpi.h>
27 #include <linux/bootmem.h>
28 #include <linux/cpu.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/kernel.h>
34 #include <linux/kernel_stat.h>
36 #include <linux/notifier.h>
37 #include <linux/smp.h>
38 #include <linux/spinlock.h>
39 #include <linux/efi.h>
40 #include <linux/percpu.h>
41 #include <linux/bitops.h>
43 #include <asm/atomic.h>
44 #include <asm/cache.h>
45 #include <asm/current.h>
46 #include <asm/delay.h>
50 #include <asm/machvec.h>
53 #include <asm/pgalloc.h>
54 #include <asm/pgtable.h>
55 #include <asm/processor.h>
56 #include <asm/ptrace.h>
58 #include <asm/system.h>
59 #include <asm/tlbflush.h>
60 #include <asm/unistd.h>
61 #include <asm/sn/arch.h>
66 #define Dprintk(x...) printk(x)
71 #ifdef CONFIG_HOTPLUG_CPU
72 #ifdef CONFIG_PERMIT_BSP_REMOVE
73 #define bsp_remove_ok 1
75 #define bsp_remove_ok 0
79 * Store all idle threads, this can be reused instead of creating
80 * a new thread. Also avoids complicated thread destroy functionality
83 struct task_struct
*idle_thread_array
[NR_CPUS
];
86 * Global array allocated for NR_CPUS at boot time
88 struct sal_to_os_boot sal_boot_rendez_state
[NR_CPUS
];
91 * start_ap in head.S uses this to store current booting cpu
94 struct sal_to_os_boot
*sal_state_for_booting_cpu
= &sal_boot_rendez_state
[0];
96 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
98 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
99 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
103 #define get_idle_for_cpu(x) (NULL)
104 #define set_idle_for_cpu(x,p)
105 #define set_brendez_area(x)
110 * ITC synchronization related stuff:
113 #define SLAVE (SMP_CACHE_BYTES/8)
115 #define NUM_ROUNDS 64 /* magic value */
116 #define NUM_ITERS 5 /* likewise */
118 static DEFINE_SPINLOCK(itc_sync_lock
);
119 static volatile unsigned long go
[SLAVE
+ 1];
121 #define DEBUG_ITC_SYNC 0
123 extern void start_ap (void);
124 extern unsigned long ia64_iobase
;
126 struct task_struct
*task_for_booting_cpu
;
131 DEFINE_PER_CPU(int, cpu_state
);
133 /* Bitmasks of currently online, and possible CPUs */
134 cpumask_t cpu_online_map
;
135 EXPORT_SYMBOL(cpu_online_map
);
136 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
137 EXPORT_SYMBOL(cpu_possible_map
);
139 cpumask_t cpu_core_map
[NR_CPUS
] __cacheline_aligned
;
140 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t
, cpu_sibling_map
);
141 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
143 int smp_num_siblings
= 1;
145 /* which logical CPU number maps to which CPU (physical APIC ID) */
146 volatile int ia64_cpu_to_sapicid
[NR_CPUS
];
147 EXPORT_SYMBOL(ia64_cpu_to_sapicid
);
149 static volatile cpumask_t cpu_callin_map
;
151 struct smp_boot_data smp_boot_data __initdata
;
153 unsigned long ap_wakeup_vector
= -1; /* External Int use to wakeup APs */
155 char __initdata no_int_routing
;
157 unsigned char smp_int_redirect
; /* are INT and IPI redirectable by the chipset? */
159 #ifdef CONFIG_FORCE_CPEI_RETARGET
160 #define CPEI_OVERRIDE_DEFAULT (1)
162 #define CPEI_OVERRIDE_DEFAULT (0)
165 unsigned int force_cpei_retarget
= CPEI_OVERRIDE_DEFAULT
;
168 cmdl_force_cpei(char *str
)
172 get_option (&str
, &value
);
173 force_cpei_retarget
= value
;
178 __setup("force_cpei=", cmdl_force_cpei
);
181 nointroute (char *str
)
184 printk ("no_int_routing on\n");
188 __setup("nointroute", nointroute
);
190 static void fix_b0_for_bsp(void)
192 #ifdef CONFIG_HOTPLUG_CPU
194 static int fix_bsp_b0
= 1;
196 cpuid
= smp_processor_id();
199 * Cache the b0 value on the first AP that comes up
201 if (!(fix_bsp_b0
&& cpuid
))
204 sal_boot_rendez_state
[0].br
[0] = sal_boot_rendez_state
[cpuid
].br
[0];
205 printk ("Fixed BSP b0 value from CPU %d\n", cpuid
);
212 sync_master (void *arg
)
214 unsigned long flags
, i
;
218 local_irq_save(flags
);
220 for (i
= 0; i
< NUM_ROUNDS
*NUM_ITERS
; ++i
) {
224 go
[SLAVE
] = ia64_get_itc();
227 local_irq_restore(flags
);
231 * Return the number of cycles by which our itc differs from the itc on the master
232 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
233 * negative that it is behind.
236 get_delta (long *rt
, long *master
)
238 unsigned long best_t0
= 0, best_t1
= ~0UL, best_tm
= 0;
239 unsigned long tcenter
, t0
, t1
, tm
;
242 for (i
= 0; i
< NUM_ITERS
; ++i
) {
245 while (!(tm
= go
[SLAVE
]))
250 if (t1
- t0
< best_t1
- best_t0
)
251 best_t0
= t0
, best_t1
= t1
, best_tm
= tm
;
254 *rt
= best_t1
- best_t0
;
255 *master
= best_tm
- best_t0
;
257 /* average best_t0 and best_t1 without overflow: */
258 tcenter
= (best_t0
/2 + best_t1
/2);
259 if (best_t0
% 2 + best_t1
% 2 == 2)
261 return tcenter
- best_tm
;
265 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
266 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
267 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
268 * step). The basic idea is for the slave to ask the master what itc value it has and to
269 * read its own itc before and after the master responds. Each iteration gives us three
283 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
284 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
285 * between the slave and the master is symmetric. Even if the interconnect were
286 * asymmetric, we would still know that the synchronization error is smaller than the
287 * roundtrip latency (t0 - t1).
289 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
290 * within one or two cycles. However, we can only *guarantee* that the synchronization is
291 * accurate to within a round-trip time, which is typically in the range of several
292 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
293 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
294 * than half a micro second or so.
297 ia64_sync_itc (unsigned int master
)
299 long i
, delta
, adj
, adjust_latency
= 0, done
= 0;
300 unsigned long flags
, rt
, master_time_stamp
, bound
;
303 long rt
; /* roundtrip time */
304 long master
; /* master's timestamp */
305 long diff
; /* difference between midpoint and master's timestamp */
306 long lat
; /* estimate of itc adjustment latency */
311 * Make sure local timer ticks are disabled while we sync. If
312 * they were enabled, we'd have to worry about nasty issues
313 * like setting the ITC ahead of (or a long time before) the
314 * next scheduled tick.
316 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
320 if (smp_call_function_single(master
, sync_master
, NULL
, 1, 0) < 0) {
321 printk(KERN_ERR
"sync_itc: failed to get attention of CPU %u!\n", master
);
326 cpu_relax(); /* wait for master to be ready */
328 spin_lock_irqsave(&itc_sync_lock
, flags
);
330 for (i
= 0; i
< NUM_ROUNDS
; ++i
) {
331 delta
= get_delta(&rt
, &master_time_stamp
);
333 done
= 1; /* let's lock on to this... */
339 adjust_latency
+= -delta
;
340 adj
= -delta
+ adjust_latency
/4;
344 ia64_set_itc(ia64_get_itc() + adj
);
348 t
[i
].master
= master_time_stamp
;
350 t
[i
].lat
= adjust_latency
/4;
354 spin_unlock_irqrestore(&itc_sync_lock
, flags
);
357 for (i
= 0; i
< NUM_ROUNDS
; ++i
)
358 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
359 t
[i
].rt
, t
[i
].master
, t
[i
].diff
, t
[i
].lat
);
362 printk(KERN_INFO
"CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
363 "maxerr %lu cycles)\n", smp_processor_id(), master
, delta
, rt
);
367 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
369 static inline void __devinit
370 smp_setup_percpu_timer (void)
374 static void __cpuinit
377 int cpuid
, phys_id
, itc_master
;
378 struct cpuinfo_ia64
*last_cpuinfo
, *this_cpuinfo
;
379 extern void ia64_init_itm(void);
380 extern volatile int time_keeper_id
;
382 #ifdef CONFIG_PERFMON
383 extern void pfm_init_percpu(void);
386 cpuid
= smp_processor_id();
387 phys_id
= hard_smp_processor_id();
388 itc_master
= time_keeper_id
;
390 if (cpu_online(cpuid
)) {
391 printk(KERN_ERR
"huh, phys CPU#0x%x, CPU#0x%x already present??\n",
399 spin_lock(&vector_lock
);
400 /* Setup the per cpu irq handling data structures */
401 __setup_vector_irq(cpuid
);
402 cpu_set(cpuid
, cpu_online_map
);
403 unlock_ipi_calllock();
404 per_cpu(cpu_state
, cpuid
) = CPU_ONLINE
;
405 spin_unlock(&vector_lock
);
407 smp_setup_percpu_timer();
409 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
411 #ifdef CONFIG_PERFMON
417 if (!(sal_platform_features
& IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
)) {
419 * Synchronize the ITC with the BP. Need to do this after irqs are
420 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
421 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
422 * local_bh_enable(), which bugs out if irqs are not enabled...
424 Dprintk("Going to syncup ITC with ITC Master.\n");
425 ia64_sync_itc(itc_master
);
434 * Delay calibration can be skipped if new processor is identical to the
435 * previous processor.
437 last_cpuinfo
= cpu_data(cpuid
- 1);
438 this_cpuinfo
= local_cpu_data
;
439 if (last_cpuinfo
->itc_freq
!= this_cpuinfo
->itc_freq
||
440 last_cpuinfo
->proc_freq
!= this_cpuinfo
->proc_freq
||
441 last_cpuinfo
->features
!= this_cpuinfo
->features
||
442 last_cpuinfo
->revision
!= this_cpuinfo
->revision
||
443 last_cpuinfo
->family
!= this_cpuinfo
->family
||
444 last_cpuinfo
->archrev
!= this_cpuinfo
->archrev
||
445 last_cpuinfo
->model
!= this_cpuinfo
->model
)
447 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
449 #ifdef CONFIG_IA32_SUPPORT
454 * Allow the master to continue.
456 cpu_set(cpuid
, cpu_callin_map
);
457 Dprintk("Stack on CPU %d at about %p\n",cpuid
, &cpuid
);
462 * Activate a secondary processor. head.S calls this.
465 start_secondary (void *unused
)
467 /* Early console may use I/O ports */
468 ia64_set_kr(IA64_KR_IO_BASE
, __pa(ia64_iobase
));
469 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
479 struct pt_regs
* __cpuinit
idle_regs(struct pt_regs
*regs
)
485 struct work_struct work
;
486 struct task_struct
*idle
;
487 struct completion done
;
492 do_fork_idle(struct work_struct
*work
)
494 struct create_idle
*c_idle
=
495 container_of(work
, struct create_idle
, work
);
497 c_idle
->idle
= fork_idle(c_idle
->cpu
);
498 complete(&c_idle
->done
);
502 do_boot_cpu (int sapicid
, int cpu
)
505 struct create_idle c_idle
= {
506 .work
= __WORK_INITIALIZER(c_idle
.work
, do_fork_idle
),
508 .done
= COMPLETION_INITIALIZER(c_idle
.done
),
511 c_idle
.idle
= get_idle_for_cpu(cpu
);
513 init_idle(c_idle
.idle
, cpu
);
518 * We can't use kernel_thread since we must avoid to reschedule the child.
520 if (!keventd_up() || current_is_keventd())
521 c_idle
.work
.func(&c_idle
.work
);
523 schedule_work(&c_idle
.work
);
524 wait_for_completion(&c_idle
.done
);
527 if (IS_ERR(c_idle
.idle
))
528 panic("failed fork for CPU %d", cpu
);
530 set_idle_for_cpu(cpu
, c_idle
.idle
);
533 task_for_booting_cpu
= c_idle
.idle
;
535 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector
, cpu
, sapicid
);
537 set_brendez_area(cpu
);
538 platform_send_ipi(cpu
, ap_wakeup_vector
, IA64_IPI_DM_INT
, 0);
541 * Wait 10s total for the AP to start
543 Dprintk("Waiting on callin_map ...");
544 for (timeout
= 0; timeout
< 100000; timeout
++) {
545 if (cpu_isset(cpu
, cpu_callin_map
))
546 break; /* It has booted */
551 if (!cpu_isset(cpu
, cpu_callin_map
)) {
552 printk(KERN_ERR
"Processor 0x%x/0x%x is stuck.\n", cpu
, sapicid
);
553 ia64_cpu_to_sapicid
[cpu
] = -1;
554 cpu_clear(cpu
, cpu_online_map
); /* was set in smp_callin() */
564 get_option (&str
, &ticks
);
568 __setup("decay=", decay
);
571 * Initialize the logical CPU number to SAPICID mapping
574 smp_build_cpu_map (void)
577 int boot_cpu_id
= hard_smp_processor_id();
579 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++) {
580 ia64_cpu_to_sapicid
[cpu
] = -1;
583 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
584 cpus_clear(cpu_present_map
);
585 cpu_set(0, cpu_present_map
);
586 cpu_set(0, cpu_possible_map
);
587 for (cpu
= 1, i
= 0; i
< smp_boot_data
.cpu_count
; i
++) {
588 sapicid
= smp_boot_data
.cpu_phys_id
[i
];
589 if (sapicid
== boot_cpu_id
)
591 cpu_set(cpu
, cpu_present_map
);
592 cpu_set(cpu
, cpu_possible_map
);
593 ia64_cpu_to_sapicid
[cpu
] = sapicid
;
599 * Cycle through the APs sending Wakeup IPIs to boot each.
602 smp_prepare_cpus (unsigned int max_cpus
)
604 int boot_cpu_id
= hard_smp_processor_id();
607 * Initialize the per-CPU profiling counter/multiplier
610 smp_setup_percpu_timer();
613 * We have the boot CPU online for sure.
615 cpu_set(0, cpu_online_map
);
616 cpu_set(0, cpu_callin_map
);
618 local_cpu_data
->loops_per_jiffy
= loops_per_jiffy
;
619 ia64_cpu_to_sapicid
[0] = boot_cpu_id
;
621 printk(KERN_INFO
"Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id
);
623 current_thread_info()->cpu
= 0;
626 * If SMP should be disabled, then really disable it!
629 printk(KERN_INFO
"SMP mode deactivated.\n");
630 cpus_clear(cpu_online_map
);
631 cpus_clear(cpu_present_map
);
632 cpus_clear(cpu_possible_map
);
633 cpu_set(0, cpu_online_map
);
634 cpu_set(0, cpu_present_map
);
635 cpu_set(0, cpu_possible_map
);
640 void __devinit
smp_prepare_boot_cpu(void)
642 cpu_set(smp_processor_id(), cpu_online_map
);
643 cpu_set(smp_processor_id(), cpu_callin_map
);
644 per_cpu(cpu_state
, smp_processor_id()) = CPU_ONLINE
;
647 #ifdef CONFIG_HOTPLUG_CPU
649 clear_cpu_sibling_map(int cpu
)
653 for_each_cpu_mask(i
, per_cpu(cpu_sibling_map
, cpu
))
654 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, i
));
655 for_each_cpu_mask(i
, cpu_core_map
[cpu
])
656 cpu_clear(cpu
, cpu_core_map
[i
]);
658 per_cpu(cpu_sibling_map
, cpu
) = cpu_core_map
[cpu
] = CPU_MASK_NONE
;
662 remove_siblinginfo(int cpu
)
666 if (cpu_data(cpu
)->threads_per_core
== 1 &&
667 cpu_data(cpu
)->cores_per_socket
== 1) {
668 cpu_clear(cpu
, cpu_core_map
[cpu
]);
669 cpu_clear(cpu
, per_cpu(cpu_sibling_map
, cpu
));
673 last
= (cpus_weight(cpu_core_map
[cpu
]) == 1 ? 1 : 0);
675 /* remove it from all sibling map's */
676 clear_cpu_sibling_map(cpu
);
679 extern void fixup_irqs(void);
681 int migrate_platform_irqs(unsigned int cpu
)
684 irq_desc_t
*desc
= NULL
;
689 * dont permit CPEI target to removed.
691 if (cpe_vector
> 0 && is_cpu_cpei_target(cpu
)) {
692 printk ("CPU (%d) is CPEI Target\n", cpu
);
693 if (can_cpei_retarget()) {
695 * Now re-target the CPEI to a different processor
697 new_cpei_cpu
= any_online_cpu(cpu_online_map
);
698 mask
= cpumask_of_cpu(new_cpei_cpu
);
699 set_cpei_target_cpu(new_cpei_cpu
);
700 desc
= irq_desc
+ ia64_cpe_irq
;
702 * Switch for now, immediately, we need to do fake intr
703 * as other interrupts, but need to study CPEI behaviour with
704 * polling before making changes.
707 desc
->chip
->disable(ia64_cpe_irq
);
708 desc
->chip
->set_affinity(ia64_cpe_irq
, mask
);
709 desc
->chip
->enable(ia64_cpe_irq
);
710 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu
);
714 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu
);
721 /* must be called with cpucontrol mutex held */
722 int __cpu_disable(void)
724 int cpu
= smp_processor_id();
727 * dont permit boot processor for now
729 if (cpu
== 0 && !bsp_remove_ok
) {
730 printk ("Your platform does not support removal of BSP\n");
734 if (ia64_platform_is("sn2")) {
735 if (!sn_cpu_disable_allowed(cpu
))
739 cpu_clear(cpu
, cpu_online_map
);
741 if (migrate_platform_irqs(cpu
)) {
742 cpu_set(cpu
, cpu_online_map
);
746 remove_siblinginfo(cpu
);
747 cpu_clear(cpu
, cpu_online_map
);
749 local_flush_tlb_all();
750 cpu_clear(cpu
, cpu_callin_map
);
754 void __cpu_die(unsigned int cpu
)
758 for (i
= 0; i
< 100; i
++) {
759 /* They ack this in play_dead by setting CPU_DEAD */
760 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
762 printk ("CPU %d is now offline\n", cpu
);
767 printk(KERN_ERR
"CPU %u didn't die...\n", cpu
);
769 #endif /* CONFIG_HOTPLUG_CPU */
772 smp_cpus_done (unsigned int dummy
)
775 unsigned long bogosum
= 0;
778 * Allow the user to impress friends.
781 for_each_online_cpu(cpu
) {
782 bogosum
+= cpu_data(cpu
)->loops_per_jiffy
;
785 printk(KERN_INFO
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
786 (int)num_online_cpus(), bogosum
/(500000/HZ
), (bogosum
/(5000/HZ
))%100);
789 static inline void __devinit
790 set_cpu_sibling_map(int cpu
)
794 for_each_online_cpu(i
) {
795 if ((cpu_data(cpu
)->socket_id
== cpu_data(i
)->socket_id
)) {
796 cpu_set(i
, cpu_core_map
[cpu
]);
797 cpu_set(cpu
, cpu_core_map
[i
]);
798 if (cpu_data(cpu
)->core_id
== cpu_data(i
)->core_id
) {
799 cpu_set(i
, per_cpu(cpu_sibling_map
, cpu
));
800 cpu_set(cpu
, per_cpu(cpu_sibling_map
, i
));
807 __cpu_up (unsigned int cpu
)
812 sapicid
= ia64_cpu_to_sapicid
[cpu
];
817 * Already booted cpu? not valid anymore since we dont
818 * do idle loop tightspin anymore.
820 if (cpu_isset(cpu
, cpu_callin_map
))
823 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
824 /* Processor goes to start_secondary(), sets online flag */
825 ret
= do_boot_cpu(sapicid
, cpu
);
829 if (cpu_data(cpu
)->threads_per_core
== 1 &&
830 cpu_data(cpu
)->cores_per_socket
== 1) {
831 cpu_set(cpu
, per_cpu(cpu_sibling_map
, cpu
));
832 cpu_set(cpu
, cpu_core_map
[cpu
]);
836 set_cpu_sibling_map(cpu
);
842 * Assume that CPUs have been discovered by some platform-dependent interface. For
843 * SoftSDV/Lion, that would be ACPI.
845 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
848 init_smp_config(void)
856 /* Tell SAL where to drop the APs. */
857 ap_startup
= (struct fptr
*) start_ap
;
858 sal_ret
= ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ
,
859 ia64_tpa(ap_startup
->fp
), ia64_tpa(ap_startup
->gp
), 0, 0, 0, 0);
861 printk(KERN_ERR
"SMP: Can't set SAL AP Boot Rendezvous: %s\n",
862 ia64_sal_strerror(sal_ret
));
866 * identify_siblings(cpu) gets called from identify_cpu. This populates the
867 * information related to logical execution units in per_cpu_data structure.
870 identify_siblings(struct cpuinfo_ia64
*c
)
874 pal_logical_to_physical_t info
;
876 if ((status
= ia64_pal_logical_to_phys(-1, &info
)) != PAL_STATUS_SUCCESS
) {
877 if (status
!= PAL_STATUS_UNIMPLEMENTED
) {
879 "ia64_pal_logical_to_phys failed with %ld\n",
884 info
.overview_ppid
= 0;
885 info
.overview_cpp
= 1;
886 info
.overview_tpc
= 1;
888 if ((status
= ia64_sal_physical_id_info(&pltid
)) != PAL_STATUS_SUCCESS
) {
889 printk(KERN_ERR
"ia64_sal_pltid failed with %ld\n", status
);
893 c
->socket_id
= (pltid
<< 8) | info
.overview_ppid
;
895 if (info
.overview_cpp
== 1 && info
.overview_tpc
== 1)
898 c
->cores_per_socket
= info
.overview_cpp
;
899 c
->threads_per_core
= info
.overview_tpc
;
900 c
->num_log
= info
.overview_num_log
;
902 c
->core_id
= info
.log1_cid
;
903 c
->thread_id
= info
.log1_tid
;
907 * returns non zero, if multi-threading is enabled
908 * on at least one physical package. Due to hotplug cpu
909 * and (maxcpus=), all threads may not necessarily be enabled
910 * even though the processor supports multi-threading.
912 int is_multithreading_enabled(void)
916 for_each_present_cpu(i
) {
917 for_each_present_cpu(j
) {
920 if ((cpu_data(j
)->socket_id
== cpu_data(i
)->socket_id
)) {
921 if (cpu_data(j
)->core_id
== cpu_data(i
)->core_id
)
928 EXPORT_SYMBOL_GPL(is_multithreading_enabled
);