4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/sysdev.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
39 #include <asm/pgtable.h>
43 #include <asm/machdep.h>
44 #include <asm/cputhreads.h>
45 #include <asm/cputable.h>
46 #include <asm/system.h>
48 #include <asm/vdso_datapage.h>
55 #define DBG(fmt...) udbg_printf(fmt)
61 /* Store all idle threads, this can be reused instead of creating
62 * a new thread. Also avoids complicated thread destroy functionality
65 #ifdef CONFIG_HOTPLUG_CPU
67 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
68 * removed after init for !CONFIG_HOTPLUG_CPU.
70 static DEFINE_PER_CPU(struct task_struct
*, idle_thread_array
);
71 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
72 #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
74 static struct task_struct
*idle_thread_array
[NR_CPUS
] __cpuinitdata
;
75 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
76 #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
79 struct thread_info
*secondary_ti
;
81 DEFINE_PER_CPU(cpumask_var_t
, cpu_sibling_map
);
82 DEFINE_PER_CPU(cpumask_var_t
, cpu_core_map
);
84 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
85 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
87 /* SMP operations for this machine */
88 struct smp_ops_t
*smp_ops
;
90 /* Can't be static due to PowerMac hackery */
91 volatile unsigned int cpu_callin_map
[NR_CPUS
];
93 int smt_enabled_at_boot
= 1;
95 static void (*crash_ipi_function_ptr
)(struct pt_regs
*) = NULL
;
98 int __devinit
smp_generic_kick_cpu(int nr
)
100 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
103 * The processor is currently spinning, waiting for the
104 * cpu_start field to become non-zero After we set cpu_start,
105 * the processor will continue on to secondary_start
107 paca
[nr
].cpu_start
= 1;
114 static irqreturn_t
call_function_action(int irq
, void *data
)
116 generic_smp_call_function_interrupt();
120 static irqreturn_t
reschedule_action(int irq
, void *data
)
126 static irqreturn_t
call_function_single_action(int irq
, void *data
)
128 generic_smp_call_function_single_interrupt();
132 static irqreturn_t
debug_ipi_action(int irq
, void *data
)
134 if (crash_ipi_function_ptr
) {
135 crash_ipi_function_ptr(get_irq_regs());
139 #ifdef CONFIG_DEBUGGER
140 debugger_ipi(get_irq_regs());
141 #endif /* CONFIG_DEBUGGER */
146 static irq_handler_t smp_ipi_action
[] = {
147 [PPC_MSG_CALL_FUNCTION
] = call_function_action
,
148 [PPC_MSG_RESCHEDULE
] = reschedule_action
,
149 [PPC_MSG_CALL_FUNC_SINGLE
] = call_function_single_action
,
150 [PPC_MSG_DEBUGGER_BREAK
] = debug_ipi_action
,
153 const char *smp_ipi_name
[] = {
154 [PPC_MSG_CALL_FUNCTION
] = "ipi call function",
155 [PPC_MSG_RESCHEDULE
] = "ipi reschedule",
156 [PPC_MSG_CALL_FUNC_SINGLE
] = "ipi call function single",
157 [PPC_MSG_DEBUGGER_BREAK
] = "ipi debugger",
160 /* optional function to request ipi, for controllers with >= 4 ipis */
161 int smp_request_message_ipi(int virq
, int msg
)
165 if (msg
< 0 || msg
> PPC_MSG_DEBUGGER_BREAK
) {
168 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
169 if (msg
== PPC_MSG_DEBUGGER_BREAK
) {
173 err
= request_irq(virq
, smp_ipi_action
[msg
], IRQF_DISABLED
|IRQF_PERCPU
,
174 smp_ipi_name
[msg
], 0);
175 WARN(err
< 0, "unable to request_irq %d for %s (rc %d)\n",
176 virq
, smp_ipi_name
[msg
], err
);
181 #ifdef CONFIG_PPC_SMP_MUXED_IPI
182 struct cpu_messages
{
183 int messages
; /* current messages */
184 unsigned long data
; /* data for cause ipi */
186 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages
, ipi_message
);
188 void smp_muxed_ipi_set_data(int cpu
, unsigned long data
)
190 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
195 void smp_muxed_ipi_message_pass(int cpu
, int msg
)
197 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
198 char *message
= (char *)&info
->messages
;
202 smp_ops
->cause_ipi(cpu
, info
->data
);
205 irqreturn_t
smp_ipi_demux(void)
207 struct cpu_messages
*info
= &__get_cpu_var(ipi_message
);
210 mb(); /* order any irq clear */
213 all
= xchg_local(&info
->messages
, 0);
216 if (all
& (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION
)))
217 generic_smp_call_function_interrupt();
218 if (all
& (1 << (24 - 8 * PPC_MSG_RESCHEDULE
)))
220 if (all
& (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE
)))
221 generic_smp_call_function_single_interrupt();
222 if (all
& (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK
)))
223 debug_ipi_action(0, NULL
);
225 #error Unsupported ENDIAN
227 } while (info
->messages
);
231 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
233 static inline void do_message_pass(int cpu
, int msg
)
235 if (smp_ops
->message_pass
)
236 smp_ops
->message_pass(cpu
, msg
);
237 #ifdef CONFIG_PPC_SMP_MUXED_IPI
239 smp_muxed_ipi_message_pass(cpu
, msg
);
243 void smp_send_reschedule(int cpu
)
246 do_message_pass(cpu
, PPC_MSG_RESCHEDULE
);
248 EXPORT_SYMBOL_GPL(smp_send_reschedule
);
250 void arch_send_call_function_single_ipi(int cpu
)
252 do_message_pass(cpu
, PPC_MSG_CALL_FUNC_SINGLE
);
255 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
259 for_each_cpu(cpu
, mask
)
260 do_message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
263 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
264 void smp_send_debugger_break(void)
267 int me
= raw_smp_processor_id();
269 if (unlikely(!smp_ops
))
272 for_each_online_cpu(cpu
)
274 do_message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
279 void crash_send_ipi(void (*crash_ipi_callback
)(struct pt_regs
*))
281 crash_ipi_function_ptr
= crash_ipi_callback
;
282 if (crash_ipi_callback
) {
284 smp_send_debugger_break();
289 static void stop_this_cpu(void *dummy
)
291 /* Remove this CPU */
292 set_cpu_online(smp_processor_id(), false);
299 void smp_send_stop(void)
301 smp_call_function(stop_this_cpu
, NULL
, 0);
304 struct thread_info
*current_set
[NR_CPUS
];
306 static void __devinit
smp_store_cpu_info(int id
)
308 per_cpu(cpu_pvr
, id
) = mfspr(SPRN_PVR
);
309 #ifdef CONFIG_PPC_FSL_BOOK3E
310 per_cpu(next_tlbcam_idx
, id
)
311 = (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
315 void __init
smp_prepare_cpus(unsigned int max_cpus
)
319 DBG("smp_prepare_cpus\n");
322 * setup_cpu may need to be called on the boot cpu. We havent
323 * spun any cpus up but lets be paranoid.
325 BUG_ON(boot_cpuid
!= smp_processor_id());
328 smp_store_cpu_info(boot_cpuid
);
329 cpu_callin_map
[boot_cpuid
] = 1;
331 for_each_possible_cpu(cpu
) {
332 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map
, cpu
),
333 GFP_KERNEL
, cpu_to_node(cpu
));
334 zalloc_cpumask_var_node(&per_cpu(cpu_core_map
, cpu
),
335 GFP_KERNEL
, cpu_to_node(cpu
));
338 cpumask_set_cpu(boot_cpuid
, cpu_sibling_mask(boot_cpuid
));
339 cpumask_set_cpu(boot_cpuid
, cpu_core_mask(boot_cpuid
));
343 max_cpus
= smp_ops
->probe();
350 void __devinit
smp_prepare_boot_cpu(void)
352 BUG_ON(smp_processor_id() != boot_cpuid
);
354 paca
[boot_cpuid
].__current
= current
;
356 current_set
[boot_cpuid
] = task_thread_info(current
);
359 #ifdef CONFIG_HOTPLUG_CPU
360 /* State of each CPU during hotplug phases */
361 static DEFINE_PER_CPU(int, cpu_state
) = { 0 };
363 int generic_cpu_disable(void)
365 unsigned int cpu
= smp_processor_id();
367 if (cpu
== boot_cpuid
)
370 set_cpu_online(cpu
, false);
372 vdso_data
->processorCount
--;
378 void generic_cpu_die(unsigned int cpu
)
382 for (i
= 0; i
< 100; i
++) {
384 if (per_cpu(cpu_state
, cpu
) == CPU_DEAD
)
388 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
391 void generic_mach_cpu_die(void)
397 cpu
= smp_processor_id();
398 printk(KERN_DEBUG
"CPU%d offline\n", cpu
);
399 __get_cpu_var(cpu_state
) = CPU_DEAD
;
401 while (__get_cpu_var(cpu_state
) != CPU_UP_PREPARE
)
405 void generic_set_cpu_dead(unsigned int cpu
)
407 per_cpu(cpu_state
, cpu
) = CPU_DEAD
;
412 struct work_struct work
;
413 struct task_struct
*idle
;
414 struct completion done
;
418 static void __cpuinit
do_fork_idle(struct work_struct
*work
)
420 struct create_idle
*c_idle
=
421 container_of(work
, struct create_idle
, work
);
423 c_idle
->idle
= fork_idle(c_idle
->cpu
);
424 complete(&c_idle
->done
);
427 static int __cpuinit
create_idle(unsigned int cpu
)
429 struct thread_info
*ti
;
430 struct create_idle c_idle
= {
432 .done
= COMPLETION_INITIALIZER_ONSTACK(c_idle
.done
),
434 INIT_WORK_ONSTACK(&c_idle
.work
, do_fork_idle
);
436 c_idle
.idle
= get_idle_for_cpu(cpu
);
438 /* We can't use kernel_thread since we must avoid to
439 * reschedule the child. We use a workqueue because
440 * we want to fork from a kernel thread, not whatever
441 * userspace process happens to be trying to online us.
444 schedule_work(&c_idle
.work
);
445 wait_for_completion(&c_idle
.done
);
447 init_idle(c_idle
.idle
, cpu
);
448 if (IS_ERR(c_idle
.idle
)) {
449 pr_err("Failed fork for CPU %u: %li", cpu
, PTR_ERR(c_idle
.idle
));
450 return PTR_ERR(c_idle
.idle
);
452 ti
= task_thread_info(c_idle
.idle
);
455 paca
[cpu
].__current
= c_idle
.idle
;
456 paca
[cpu
].kstack
= (unsigned long)ti
+ THREAD_SIZE
- STACK_FRAME_OVERHEAD
;
459 current_set
[cpu
] = ti
;
464 int __cpuinit
__cpu_up(unsigned int cpu
)
468 if (smp_ops
== NULL
||
469 (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
)))
472 /* Make sure we have an idle thread */
473 rc
= create_idle(cpu
);
477 secondary_ti
= current_set
[cpu
];
479 /* Make sure callin-map entry is 0 (can be leftover a CPU
482 cpu_callin_map
[cpu
] = 0;
484 /* The information for processor bringup must
485 * be written out to main store before we release
491 DBG("smp: kicking cpu %d\n", cpu
);
492 rc
= smp_ops
->kick_cpu(cpu
);
494 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu
, rc
);
499 * wait to see if the cpu made a callin (is actually up).
500 * use this value that I found through experimentation.
503 if (system_state
< SYSTEM_RUNNING
)
504 for (c
= 50000; c
&& !cpu_callin_map
[cpu
]; c
--)
506 #ifdef CONFIG_HOTPLUG_CPU
509 * CPUs can take much longer to come up in the
510 * hotplug case. Wait five seconds.
512 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
516 if (!cpu_callin_map
[cpu
]) {
517 printk(KERN_ERR
"Processor %u is stuck.\n", cpu
);
521 DBG("Processor %u found.\n", cpu
);
523 if (smp_ops
->give_timebase
)
524 smp_ops
->give_timebase();
526 /* Wait until cpu puts itself in the online map */
527 while (!cpu_online(cpu
))
533 /* Return the value of the reg property corresponding to the given
536 int cpu_to_core_id(int cpu
)
538 struct device_node
*np
;
542 np
= of_get_cpu_node(cpu
, NULL
);
546 reg
= of_get_property(np
, "reg", NULL
);
556 /* Helper routines for cpu to core mapping */
557 int cpu_core_index_of_thread(int cpu
)
559 return cpu
>> threads_shift
;
561 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread
);
563 int cpu_first_thread_of_core(int core
)
565 return core
<< threads_shift
;
567 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core
);
569 /* Must be called when no change can occur to cpu_present_mask,
570 * i.e. during cpu online or offline.
572 static struct device_node
*cpu_to_l2cache(int cpu
)
574 struct device_node
*np
;
575 struct device_node
*cache
;
577 if (!cpu_present(cpu
))
580 np
= of_get_cpu_node(cpu
, NULL
);
584 cache
= of_find_next_cache_node(np
);
591 /* Activate a secondary processor. */
592 void __devinit
start_secondary(void *unused
)
594 unsigned int cpu
= smp_processor_id();
595 struct device_node
*l2_cache
;
598 atomic_inc(&init_mm
.mm_count
);
599 current
->active_mm
= &init_mm
;
601 smp_store_cpu_info(cpu
);
602 set_dec(tb_ticks_per_jiffy
);
604 cpu_callin_map
[cpu
] = 1;
606 if (smp_ops
->setup_cpu
)
607 smp_ops
->setup_cpu(cpu
);
608 if (smp_ops
->take_timebase
)
609 smp_ops
->take_timebase();
611 secondary_cpu_time_init();
614 if (system_state
== SYSTEM_RUNNING
)
615 vdso_data
->processorCount
++;
618 notify_cpu_starting(cpu
);
619 set_cpu_online(cpu
, true);
620 /* Update sibling maps */
621 base
= cpu_first_thread_sibling(cpu
);
622 for (i
= 0; i
< threads_per_core
; i
++) {
623 if (cpu_is_offline(base
+ i
))
625 cpumask_set_cpu(cpu
, cpu_sibling_mask(base
+ i
));
626 cpumask_set_cpu(base
+ i
, cpu_sibling_mask(cpu
));
628 /* cpu_core_map should be a superset of
629 * cpu_sibling_map even if we don't have cache
630 * information, so update the former here, too.
632 cpumask_set_cpu(cpu
, cpu_core_mask(base
+ i
));
633 cpumask_set_cpu(base
+ i
, cpu_core_mask(cpu
));
635 l2_cache
= cpu_to_l2cache(cpu
);
636 for_each_online_cpu(i
) {
637 struct device_node
*np
= cpu_to_l2cache(i
);
640 if (np
== l2_cache
) {
641 cpumask_set_cpu(cpu
, cpu_core_mask(i
));
642 cpumask_set_cpu(i
, cpu_core_mask(cpu
));
646 of_node_put(l2_cache
);
656 int setup_profiling_timer(unsigned int multiplier
)
661 void __init
smp_cpus_done(unsigned int max_cpus
)
663 cpumask_var_t old_mask
;
665 /* We want the setup_cpu() here to be called from CPU 0, but our
666 * init thread may have been "borrowed" by another CPU in the meantime
667 * se we pin us down to CPU 0 for a short while
669 alloc_cpumask_var(&old_mask
, GFP_NOWAIT
);
670 cpumask_copy(old_mask
, tsk_cpus_allowed(current
));
671 set_cpus_allowed_ptr(current
, cpumask_of(boot_cpuid
));
673 if (smp_ops
&& smp_ops
->setup_cpu
)
674 smp_ops
->setup_cpu(boot_cpuid
);
676 set_cpus_allowed_ptr(current
, old_mask
);
678 free_cpumask_var(old_mask
);
680 if (smp_ops
&& smp_ops
->bringup_done
)
681 smp_ops
->bringup_done();
683 dump_numa_cpu_topology();
687 int arch_sd_sibling_asym_packing(void)
689 if (cpu_has_feature(CPU_FTR_ASYM_SMT
)) {
690 printk_once(KERN_INFO
"Enabling Asymmetric SMT scheduling\n");
691 return SD_ASYM_PACKING
;
696 #ifdef CONFIG_HOTPLUG_CPU
697 int __cpu_disable(void)
699 struct device_node
*l2_cache
;
700 int cpu
= smp_processor_id();
704 if (!smp_ops
->cpu_disable
)
707 err
= smp_ops
->cpu_disable();
711 /* Update sibling maps */
712 base
= cpu_first_thread_sibling(cpu
);
713 for (i
= 0; i
< threads_per_core
; i
++) {
714 cpumask_clear_cpu(cpu
, cpu_sibling_mask(base
+ i
));
715 cpumask_clear_cpu(base
+ i
, cpu_sibling_mask(cpu
));
716 cpumask_clear_cpu(cpu
, cpu_core_mask(base
+ i
));
717 cpumask_clear_cpu(base
+ i
, cpu_core_mask(cpu
));
720 l2_cache
= cpu_to_l2cache(cpu
);
721 for_each_present_cpu(i
) {
722 struct device_node
*np
= cpu_to_l2cache(i
);
725 if (np
== l2_cache
) {
726 cpumask_clear_cpu(cpu
, cpu_core_mask(i
));
727 cpumask_clear_cpu(i
, cpu_core_mask(cpu
));
731 of_node_put(l2_cache
);
737 void __cpu_die(unsigned int cpu
)
739 if (smp_ops
->cpu_die
)
740 smp_ops
->cpu_die(cpu
);
743 static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex
);
745 void cpu_hotplug_driver_lock()
747 mutex_lock(&powerpc_cpu_hotplug_driver_mutex
);
750 void cpu_hotplug_driver_unlock()
752 mutex_unlock(&powerpc_cpu_hotplug_driver_mutex
);
760 /* If we return, we re-enter start_secondary */
761 start_secondary_resume();