2 * SMP support for pSeries machines.
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
7 * Plus various changes from other IBM teams...
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/cache.h>
25 #include <linux/err.h>
26 #include <linux/sysdev.h>
27 #include <linux/cpu.h>
29 #include <asm/ptrace.h>
30 #include <asm/atomic.h>
33 #include <asm/pgtable.h>
39 #include <asm/machdep.h>
40 #include <asm/cputable.h>
41 #include <asm/firmware.h>
42 #include <asm/system.h>
44 #include <asm/pSeries_reconfig.h>
46 #include <asm/vdso_datapage.h>
47 #include <asm/cputhreads.h>
49 #include "plpar_wrappers.h"
55 * The Primary thread of each non-boot processor was started from the OF client
56 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
58 static cpumask_t of_spin_map
;
60 extern void generic_secondary_smp_init(unsigned long);
63 * smp_startup_cpu() - start the given cpu
65 * At boot time, there is nothing to do for primary threads which were
66 * started from Open Firmware. For anything else, call RTAS with the
67 * appropriate start location.
73 static inline int __devinit
smp_startup_cpu(unsigned int lcpu
)
76 unsigned long start_here
= __pa((u32
)*((unsigned long *)
77 generic_secondary_smp_init
));
81 if (cpu_isset(lcpu
, of_spin_map
))
82 /* Already started by OF and sitting in spin loop */
85 pcpu
= get_hard_smp_processor_id(lcpu
);
87 /* Fixup atomic count: it exited inside IRQ handler. */
88 task_thread_info(paca
[lcpu
].__current
)->preempt_count
= 0;
91 * If the RTAS start-cpu token does not exist then presume the
92 * cpu is already spinning.
94 start_cpu
= rtas_token("start-cpu");
95 if (start_cpu
== RTAS_UNKNOWN_SERVICE
)
98 status
= rtas_call(start_cpu
, 3, 1, NULL
, pcpu
, start_here
, pcpu
);
100 printk(KERN_ERR
"start-cpu failed: %i\n", status
);
108 static void __devinit
smp_xics_setup_cpu(int cpu
)
110 if (cpu
!= boot_cpuid
)
113 if (firmware_has_feature(FW_FEATURE_SPLPAR
))
116 cpu_clear(cpu
, of_spin_map
);
119 #endif /* CONFIG_XICS */
121 static DEFINE_SPINLOCK(timebase_lock
);
122 static unsigned long timebase
= 0;
124 static void __devinit
pSeries_give_timebase(void)
126 spin_lock(&timebase_lock
);
127 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL
);
129 spin_unlock(&timebase_lock
);
133 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL
);
136 static void __devinit
pSeries_take_timebase(void)
140 spin_lock(&timebase_lock
);
141 set_tb(timebase
>> 32, timebase
& 0xffffffff);
143 spin_unlock(&timebase_lock
);
146 static void __devinit
smp_pSeries_kick_cpu(int nr
)
148 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
150 if (!smp_startup_cpu(nr
))
154 * The processor is currently spinning, waiting for the
155 * cpu_start field to become non-zero After we set cpu_start,
156 * the processor will continue on to secondary_start
158 paca
[nr
].cpu_start
= 1;
161 static int smp_pSeries_cpu_bootable(unsigned int nr
)
163 /* Special case - we inhibit secondary thread startup
164 * during boot if the user requests it.
166 if (system_state
< SYSTEM_RUNNING
&&
167 cpu_has_feature(CPU_FTR_SMT
) &&
168 !smt_enabled_at_boot
&& cpu_thread_in_core(nr
) != 0)
174 static struct smp_ops_t pSeries_mpic_smp_ops
= {
175 .message_pass
= smp_mpic_message_pass
,
176 .probe
= smp_mpic_probe
,
177 .kick_cpu
= smp_pSeries_kick_cpu
,
178 .setup_cpu
= smp_mpic_setup_cpu
,
182 static struct smp_ops_t pSeries_xics_smp_ops
= {
183 .message_pass
= smp_xics_message_pass
,
184 .probe
= smp_xics_probe
,
185 .kick_cpu
= smp_pSeries_kick_cpu
,
186 .setup_cpu
= smp_xics_setup_cpu
,
187 .cpu_bootable
= smp_pSeries_cpu_bootable
,
191 /* This is called very early */
192 static void __init
smp_init_pseries(void)
196 pr_debug(" -> smp_init_pSeries()\n");
198 /* Mark threads which are still spinning in hold loops. */
199 if (cpu_has_feature(CPU_FTR_SMT
)) {
200 for_each_present_cpu(i
) {
201 if (cpu_thread_in_core(i
) == 0)
202 cpu_set(i
, of_spin_map
);
205 of_spin_map
= cpu_present_map
;
208 cpu_clear(boot_cpuid
, of_spin_map
);
210 /* Non-lpar has additional take/give timebase */
211 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE
) {
212 smp_ops
->give_timebase
= pSeries_give_timebase
;
213 smp_ops
->take_timebase
= pSeries_take_timebase
;
216 pr_debug(" <- smp_init_pSeries()\n");
220 void __init
smp_init_pseries_mpic(void)
222 smp_ops
= &pSeries_mpic_smp_ops
;
228 void __init
smp_init_pseries_xics(void)
230 smp_ops
= &pSeries_xics_smp_ops
;