io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / arch / powerpc / kernel / smp.c
blob9e2246e80efd6c63b780df63956cdd895b14ca79
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SMP support for ppc.
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
14 #undef DEBUG
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
44 #include <asm/page.h>
45 #include <asm/prom.h>
46 #include <asm/smp.h>
47 #include <asm/time.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
51 #include <asm/mpic.h>
52 #include <asm/vdso_datapage.h>
53 #ifdef CONFIG_PPC64
54 #include <asm/paca.h>
55 #endif
56 #include <asm/vdso.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
62 #include <asm/kup.h>
64 #ifdef DEBUG
65 #include <asm/udbg.h>
66 #define DBG(fmt...) udbg_printf(fmt)
67 #else
68 #define DBG(fmt...)
69 #endif
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
74 #endif
76 struct task_struct *secondary_current;
77 bool has_big_cores;
78 bool coregroup_enabled;
79 bool thread_group_shares_l2;
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
85 DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
87 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
89 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
90 EXPORT_SYMBOL_GPL(has_big_cores);
92 enum {
93 #ifdef CONFIG_SCHED_SMT
94 smt_idx,
95 #endif
96 cache_idx,
97 mc_idx,
98 die_idx,
101 #define MAX_THREAD_LIST_SIZE 8
102 #define THREAD_GROUP_SHARE_L1 1
103 #define THREAD_GROUP_SHARE_L2 2
104 struct thread_groups {
105 unsigned int property;
106 unsigned int nr_groups;
107 unsigned int threads_per_group;
108 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
111 /* Maximum number of properties that groups of threads within a core can share */
112 #define MAX_THREAD_GROUP_PROPERTIES 2
114 struct thread_groups_list {
115 unsigned int nr_properties;
116 struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
119 static struct thread_groups_list tgl[NR_CPUS] __initdata;
121 * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
122 * the set its siblings that share the L1-cache.
124 DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
127 * On some big-cores system, thread_group_l2_cache_map for each CPU
128 * corresponds to the set its siblings within the core that share the
129 * L2-cache.
131 DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
133 /* SMP operations for this machine */
134 struct smp_ops_t *smp_ops;
136 /* Can't be static due to PowerMac hackery */
137 volatile unsigned int cpu_callin_map[NR_CPUS];
139 int smt_enabled_at_boot = 1;
142 * Returns 1 if the specified cpu should be brought up during boot.
143 * Used to inhibit booting threads if they've been disabled or
144 * limited on the command line
146 int smp_generic_cpu_bootable(unsigned int nr)
148 /* Special case - we inhibit secondary thread startup
149 * during boot if the user requests it.
151 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
152 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
153 return 0;
154 if (smt_enabled_at_boot
155 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
156 return 0;
159 return 1;
163 #ifdef CONFIG_PPC64
164 int smp_generic_kick_cpu(int nr)
166 if (nr < 0 || nr >= nr_cpu_ids)
167 return -EINVAL;
170 * The processor is currently spinning, waiting for the
171 * cpu_start field to become non-zero After we set cpu_start,
172 * the processor will continue on to secondary_start
174 if (!paca_ptrs[nr]->cpu_start) {
175 paca_ptrs[nr]->cpu_start = 1;
176 smp_mb();
177 return 0;
180 #ifdef CONFIG_HOTPLUG_CPU
182 * Ok it's not there, so it might be soft-unplugged, let's
183 * try to bring it back
185 generic_set_cpu_up(nr);
186 smp_wmb();
187 smp_send_reschedule(nr);
188 #endif /* CONFIG_HOTPLUG_CPU */
190 return 0;
192 #endif /* CONFIG_PPC64 */
194 static irqreturn_t call_function_action(int irq, void *data)
196 generic_smp_call_function_interrupt();
197 return IRQ_HANDLED;
200 static irqreturn_t reschedule_action(int irq, void *data)
202 scheduler_ipi();
203 return IRQ_HANDLED;
206 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
207 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
209 timer_broadcast_interrupt();
210 return IRQ_HANDLED;
212 #endif
214 #ifdef CONFIG_NMI_IPI
215 static irqreturn_t nmi_ipi_action(int irq, void *data)
217 smp_handle_nmi_ipi(get_irq_regs());
218 return IRQ_HANDLED;
220 #endif
222 static irq_handler_t smp_ipi_action[] = {
223 [PPC_MSG_CALL_FUNCTION] = call_function_action,
224 [PPC_MSG_RESCHEDULE] = reschedule_action,
225 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
226 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
227 #endif
228 #ifdef CONFIG_NMI_IPI
229 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
230 #endif
234 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
235 * than going through the call function infrastructure, and strongly
236 * serialized, so it is more appropriate for debugging.
238 const char *smp_ipi_name[] = {
239 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
240 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
241 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
242 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
243 #endif
244 #ifdef CONFIG_NMI_IPI
245 [PPC_MSG_NMI_IPI] = "nmi ipi",
246 #endif
249 /* optional function to request ipi, for controllers with >= 4 ipis */
250 int smp_request_message_ipi(int virq, int msg)
252 int err;
254 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
255 return -EINVAL;
256 #ifndef CONFIG_NMI_IPI
257 if (msg == PPC_MSG_NMI_IPI)
258 return 1;
259 #endif
261 err = request_irq(virq, smp_ipi_action[msg],
262 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
263 smp_ipi_name[msg], NULL);
264 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
265 virq, smp_ipi_name[msg], err);
267 return err;
270 #ifdef CONFIG_PPC_SMP_MUXED_IPI
271 struct cpu_messages {
272 long messages; /* current messages */
274 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
276 void smp_muxed_ipi_set_message(int cpu, int msg)
278 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
279 char *message = (char *)&info->messages;
282 * Order previous accesses before accesses in the IPI handler.
284 smp_mb();
285 message[msg] = 1;
288 void smp_muxed_ipi_message_pass(int cpu, int msg)
290 smp_muxed_ipi_set_message(cpu, msg);
293 * cause_ipi functions are required to include a full barrier
294 * before doing whatever causes the IPI.
296 smp_ops->cause_ipi(cpu);
299 #ifdef __BIG_ENDIAN__
300 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
301 #else
302 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
303 #endif
305 irqreturn_t smp_ipi_demux(void)
307 mb(); /* order any irq clear */
309 return smp_ipi_demux_relaxed();
312 /* sync-free variant. Callers should ensure synchronization */
313 irqreturn_t smp_ipi_demux_relaxed(void)
315 struct cpu_messages *info;
316 unsigned long all;
318 info = this_cpu_ptr(&ipi_message);
319 do {
320 all = xchg(&info->messages, 0);
321 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
323 * Must check for PPC_MSG_RM_HOST_ACTION messages
324 * before PPC_MSG_CALL_FUNCTION messages because when
325 * a VM is destroyed, we call kick_all_cpus_sync()
326 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
327 * messages have completed before we free any VCPUs.
329 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
330 kvmppc_xics_ipi_action();
331 #endif
332 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
333 generic_smp_call_function_interrupt();
334 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
335 scheduler_ipi();
336 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
337 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
338 timer_broadcast_interrupt();
339 #endif
340 #ifdef CONFIG_NMI_IPI
341 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
342 nmi_ipi_action(0, NULL);
343 #endif
344 } while (info->messages);
346 return IRQ_HANDLED;
348 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
350 static inline void do_message_pass(int cpu, int msg)
352 if (smp_ops->message_pass)
353 smp_ops->message_pass(cpu, msg);
354 #ifdef CONFIG_PPC_SMP_MUXED_IPI
355 else
356 smp_muxed_ipi_message_pass(cpu, msg);
357 #endif
360 void smp_send_reschedule(int cpu)
362 if (likely(smp_ops))
363 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
365 EXPORT_SYMBOL_GPL(smp_send_reschedule);
367 void arch_send_call_function_single_ipi(int cpu)
369 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
372 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
374 unsigned int cpu;
376 for_each_cpu(cpu, mask)
377 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
380 #ifdef CONFIG_NMI_IPI
383 * "NMI IPI" system.
385 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
386 * a running system. They can be used for crash, debug, halt/reboot, etc.
388 * The IPI call waits with interrupts disabled until all targets enter the
389 * NMI handler, then returns. Subsequent IPIs can be issued before targets
390 * have returned from their handlers, so there is no guarantee about
391 * concurrency or re-entrancy.
393 * A new NMI can be issued before all targets exit the handler.
395 * The IPI call may time out without all targets entering the NMI handler.
396 * In that case, there is some logic to recover (and ignore subsequent
397 * NMI interrupts that may eventually be raised), but the platform interrupt
398 * handler may not be able to distinguish this from other exception causes,
399 * which may cause a crash.
402 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
403 static struct cpumask nmi_ipi_pending_mask;
404 static bool nmi_ipi_busy = false;
405 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
407 static void nmi_ipi_lock_start(unsigned long *flags)
409 raw_local_irq_save(*flags);
410 hard_irq_disable();
411 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
412 raw_local_irq_restore(*flags);
413 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
414 raw_local_irq_save(*flags);
415 hard_irq_disable();
419 static void nmi_ipi_lock(void)
421 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
422 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
425 static void nmi_ipi_unlock(void)
427 smp_mb();
428 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
429 atomic_set(&__nmi_ipi_lock, 0);
432 static void nmi_ipi_unlock_end(unsigned long *flags)
434 nmi_ipi_unlock();
435 raw_local_irq_restore(*flags);
439 * Platform NMI handler calls this to ack
441 int smp_handle_nmi_ipi(struct pt_regs *regs)
443 void (*fn)(struct pt_regs *) = NULL;
444 unsigned long flags;
445 int me = raw_smp_processor_id();
446 int ret = 0;
449 * Unexpected NMIs are possible here because the interrupt may not
450 * be able to distinguish NMI IPIs from other types of NMIs, or
451 * because the caller may have timed out.
453 nmi_ipi_lock_start(&flags);
454 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
455 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
456 fn = READ_ONCE(nmi_ipi_function);
457 WARN_ON_ONCE(!fn);
458 ret = 1;
460 nmi_ipi_unlock_end(&flags);
462 if (fn)
463 fn(regs);
465 return ret;
468 static void do_smp_send_nmi_ipi(int cpu, bool safe)
470 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
471 return;
473 if (cpu >= 0) {
474 do_message_pass(cpu, PPC_MSG_NMI_IPI);
475 } else {
476 int c;
478 for_each_online_cpu(c) {
479 if (c == raw_smp_processor_id())
480 continue;
481 do_message_pass(c, PPC_MSG_NMI_IPI);
487 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
488 * - fn is the target callback function.
489 * - delay_us > 0 is the delay before giving up waiting for targets to
490 * begin executing the handler, == 0 specifies indefinite delay.
492 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
493 u64 delay_us, bool safe)
495 unsigned long flags;
496 int me = raw_smp_processor_id();
497 int ret = 1;
499 BUG_ON(cpu == me);
500 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
502 if (unlikely(!smp_ops))
503 return 0;
505 nmi_ipi_lock_start(&flags);
506 while (nmi_ipi_busy) {
507 nmi_ipi_unlock_end(&flags);
508 spin_until_cond(!nmi_ipi_busy);
509 nmi_ipi_lock_start(&flags);
511 nmi_ipi_busy = true;
512 nmi_ipi_function = fn;
514 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
516 if (cpu < 0) {
517 /* ALL_OTHERS */
518 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
519 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
520 } else {
521 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
524 nmi_ipi_unlock();
526 /* Interrupts remain hard disabled */
528 do_smp_send_nmi_ipi(cpu, safe);
530 nmi_ipi_lock();
531 /* nmi_ipi_busy is set here, so unlock/lock is okay */
532 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
533 nmi_ipi_unlock();
534 udelay(1);
535 nmi_ipi_lock();
536 if (delay_us) {
537 delay_us--;
538 if (!delay_us)
539 break;
543 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
544 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
545 ret = 0;
546 cpumask_clear(&nmi_ipi_pending_mask);
549 nmi_ipi_function = NULL;
550 nmi_ipi_busy = false;
552 nmi_ipi_unlock_end(&flags);
554 return ret;
557 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
559 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
562 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
564 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
566 #endif /* CONFIG_NMI_IPI */
568 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
569 void tick_broadcast(const struct cpumask *mask)
571 unsigned int cpu;
573 for_each_cpu(cpu, mask)
574 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
576 #endif
578 #ifdef CONFIG_DEBUGGER
579 void debugger_ipi_callback(struct pt_regs *regs)
581 debugger_ipi(regs);
584 void smp_send_debugger_break(void)
586 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
588 #endif
590 #ifdef CONFIG_KEXEC_CORE
591 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
593 int cpu;
595 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
596 if (kdump_in_progress() && crash_wake_offline) {
597 for_each_present_cpu(cpu) {
598 if (cpu_online(cpu))
599 continue;
601 * crash_ipi_callback will wait for
602 * all cpus, including offline CPUs.
603 * We don't care about nmi_ipi_function.
604 * Offline cpus will jump straight into
605 * crash_ipi_callback, we can skip the
606 * entire NMI dance and waiting for
607 * cpus to clear pending mask, etc.
609 do_smp_send_nmi_ipi(cpu, false);
613 #endif
615 #ifdef CONFIG_NMI_IPI
616 static void nmi_stop_this_cpu(struct pt_regs *regs)
619 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
621 spin_begin();
622 while (1)
623 spin_cpu_relax();
626 void smp_send_stop(void)
628 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
631 #else /* CONFIG_NMI_IPI */
633 static void stop_this_cpu(void *dummy)
635 hard_irq_disable();
636 spin_begin();
637 while (1)
638 spin_cpu_relax();
641 void smp_send_stop(void)
643 static bool stopped = false;
646 * Prevent waiting on csd lock from a previous smp_send_stop.
647 * This is racy, but in general callers try to do the right
648 * thing and only fire off one smp_send_stop (e.g., see
649 * kernel/panic.c)
651 if (stopped)
652 return;
654 stopped = true;
656 smp_call_function(stop_this_cpu, NULL, 0);
658 #endif /* CONFIG_NMI_IPI */
660 struct task_struct *current_set[NR_CPUS];
662 static void smp_store_cpu_info(int id)
664 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
665 #ifdef CONFIG_PPC_FSL_BOOK3E
666 per_cpu(next_tlbcam_idx, id)
667 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
668 #endif
672 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
673 * rather than just passing around the cpumask we pass around a function that
674 * returns the that cpumask for the given CPU.
676 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
678 cpumask_set_cpu(i, get_cpumask(j));
679 cpumask_set_cpu(j, get_cpumask(i));
682 #ifdef CONFIG_HOTPLUG_CPU
683 static void set_cpus_unrelated(int i, int j,
684 struct cpumask *(*get_cpumask)(int))
686 cpumask_clear_cpu(i, get_cpumask(j));
687 cpumask_clear_cpu(j, get_cpumask(i));
689 #endif
692 * Extends set_cpus_related. Instead of setting one CPU at a time in
693 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
695 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
696 struct cpumask *(*dstmask)(int))
698 struct cpumask *mask;
699 int k;
701 mask = srcmask(j);
702 for_each_cpu(k, srcmask(i))
703 cpumask_or(dstmask(k), dstmask(k), mask);
705 if (i == j)
706 return;
708 mask = srcmask(i);
709 for_each_cpu(k, srcmask(j))
710 cpumask_or(dstmask(k), dstmask(k), mask);
714 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
715 * property for the CPU device node @dn and stores
716 * the parsed output in the thread_groups_list
717 * structure @tglp.
719 * @dn: The device node of the CPU device.
720 * @tglp: Pointer to a thread group list structure into which the parsed
721 * output of "ibm,thread-groups" is stored.
723 * ibm,thread-groups[0..N-1] array defines which group of threads in
724 * the CPU-device node can be grouped together based on the property.
726 * This array can represent thread groupings for multiple properties.
728 * ibm,thread-groups[i + 0] tells us the property based on which the
729 * threads are being grouped together. If this value is 1, it implies
730 * that the threads in the same group share L1, translation cache. If
731 * the value is 2, it implies that the threads in the same group share
732 * the same L2 cache.
734 * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
735 * property ibm,thread-groups[i]
737 * ibm,thread-groups[i+2] tells us the number of threads in each such
738 * group.
739 * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
741 * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
742 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
743 * the grouping.
745 * Example:
746 * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
747 * This can be decomposed up into two consecutive arrays:
748 * a) [1,2,4,8,10,12,14,9,11,13,15]
749 * b) [2,2,4,8,10,12,14,9,11,13,15]
751 * where in,
753 * a) provides information of Property "1" being shared by "2" groups,
754 * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
755 * the first group is {8,10,12,14} and the
756 * "ibm,ppc-interrupt-server#s" of the second group is
757 * {9,11,13,15}. Property "1" is indicative of the thread in the
758 * group sharing L1 cache, translation cache and Instruction Data
759 * flow.
761 * b) provides information of Property "2" being shared by "2" groups,
762 * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
763 * the first group is {8,10,12,14} and the
764 * "ibm,ppc-interrupt-server#s" of the second group is
765 * {9,11,13,15}. Property "2" indicates that the threads in each
766 * group share the L2-cache.
768 * Returns 0 on success, -EINVAL if the property does not exist,
769 * -ENODATA if property does not have a value, and -EOVERFLOW if the
770 * property data isn't large enough.
772 static int parse_thread_groups(struct device_node *dn,
773 struct thread_groups_list *tglp)
775 unsigned int property_idx = 0;
776 u32 *thread_group_array;
777 size_t total_threads;
778 int ret = 0, count;
779 u32 *thread_list;
780 int i = 0;
782 count = of_property_count_u32_elems(dn, "ibm,thread-groups");
783 thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
784 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
785 thread_group_array, count);
786 if (ret)
787 goto out_free;
789 while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
790 int j;
791 struct thread_groups *tg = &tglp->property_tgs[property_idx++];
793 tg->property = thread_group_array[i];
794 tg->nr_groups = thread_group_array[i + 1];
795 tg->threads_per_group = thread_group_array[i + 2];
796 total_threads = tg->nr_groups * tg->threads_per_group;
798 thread_list = &thread_group_array[i + 3];
800 for (j = 0; j < total_threads; j++)
801 tg->thread_list[j] = thread_list[j];
802 i = i + 3 + total_threads;
805 tglp->nr_properties = property_idx;
807 out_free:
808 kfree(thread_group_array);
809 return ret;
813 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
814 * that @cpu belongs to.
816 * @cpu : The logical CPU whose thread group is being searched.
817 * @tg : The thread-group structure of the CPU node which @cpu belongs
818 * to.
820 * Returns the index to tg->thread_list that points to the the start
821 * of the thread_group that @cpu belongs to.
823 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
824 * tg->thread_list.
826 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
828 int hw_cpu_id = get_hard_smp_processor_id(cpu);
829 int i, j;
831 for (i = 0; i < tg->nr_groups; i++) {
832 int group_start = i * tg->threads_per_group;
834 for (j = 0; j < tg->threads_per_group; j++) {
835 int idx = group_start + j;
837 if (tg->thread_list[idx] == hw_cpu_id)
838 return group_start;
842 return -1;
845 static struct thread_groups *__init get_thread_groups(int cpu,
846 int group_property,
847 int *err)
849 struct device_node *dn = of_get_cpu_node(cpu, NULL);
850 struct thread_groups_list *cpu_tgl = &tgl[cpu];
851 struct thread_groups *tg = NULL;
852 int i;
853 *err = 0;
855 if (!dn) {
856 *err = -ENODATA;
857 return NULL;
860 if (!cpu_tgl->nr_properties) {
861 *err = parse_thread_groups(dn, cpu_tgl);
862 if (*err)
863 goto out;
866 for (i = 0; i < cpu_tgl->nr_properties; i++) {
867 if (cpu_tgl->property_tgs[i].property == group_property) {
868 tg = &cpu_tgl->property_tgs[i];
869 break;
873 if (!tg)
874 *err = -EINVAL;
875 out:
876 of_node_put(dn);
877 return tg;
880 static int __init init_thread_group_cache_map(int cpu, int cache_property)
883 int first_thread = cpu_first_thread_sibling(cpu);
884 int i, cpu_group_start = -1, err = 0;
885 struct thread_groups *tg = NULL;
886 cpumask_var_t *mask = NULL;
888 if (cache_property != THREAD_GROUP_SHARE_L1 &&
889 cache_property != THREAD_GROUP_SHARE_L2)
890 return -EINVAL;
892 tg = get_thread_groups(cpu, cache_property, &err);
893 if (!tg)
894 return err;
896 cpu_group_start = get_cpu_thread_group_start(cpu, tg);
898 if (unlikely(cpu_group_start == -1)) {
899 WARN_ON_ONCE(1);
900 return -ENODATA;
903 if (cache_property == THREAD_GROUP_SHARE_L1)
904 mask = &per_cpu(thread_group_l1_cache_map, cpu);
905 else if (cache_property == THREAD_GROUP_SHARE_L2)
906 mask = &per_cpu(thread_group_l2_cache_map, cpu);
908 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
910 for (i = first_thread; i < first_thread + threads_per_core; i++) {
911 int i_group_start = get_cpu_thread_group_start(i, tg);
913 if (unlikely(i_group_start == -1)) {
914 WARN_ON_ONCE(1);
915 return -ENODATA;
918 if (i_group_start == cpu_group_start)
919 cpumask_set_cpu(i, *mask);
922 return 0;
925 static bool shared_caches;
927 #ifdef CONFIG_SCHED_SMT
928 /* cpumask of CPUs with asymmetric SMT dependency */
929 static int powerpc_smt_flags(void)
931 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
933 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
934 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
935 flags |= SD_ASYM_PACKING;
937 return flags;
939 #endif
942 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
943 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
944 * since the migrated task remains cache hot. We want to take advantage of this
945 * at the scheduler level so an extra topology level is required.
947 static int powerpc_shared_cache_flags(void)
949 return SD_SHARE_PKG_RESOURCES;
953 * We can't just pass cpu_l2_cache_mask() directly because
954 * returns a non-const pointer and the compiler barfs on that.
956 static const struct cpumask *shared_cache_mask(int cpu)
958 return per_cpu(cpu_l2_cache_map, cpu);
961 #ifdef CONFIG_SCHED_SMT
962 static const struct cpumask *smallcore_smt_mask(int cpu)
964 return cpu_smallcore_mask(cpu);
966 #endif
968 static struct cpumask *cpu_coregroup_mask(int cpu)
970 return per_cpu(cpu_coregroup_map, cpu);
973 static bool has_coregroup_support(void)
975 return coregroup_enabled;
978 static const struct cpumask *cpu_mc_mask(int cpu)
980 return cpu_coregroup_mask(cpu);
983 static struct sched_domain_topology_level powerpc_topology[] = {
984 #ifdef CONFIG_SCHED_SMT
985 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
986 #endif
987 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
988 { cpu_mc_mask, SD_INIT_NAME(MC) },
989 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
990 { NULL, },
993 static int __init init_big_cores(void)
995 int cpu;
997 for_each_possible_cpu(cpu) {
998 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
1000 if (err)
1001 return err;
1003 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
1004 GFP_KERNEL,
1005 cpu_to_node(cpu));
1008 has_big_cores = true;
1010 for_each_possible_cpu(cpu) {
1011 int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2);
1013 if (err)
1014 return err;
1017 thread_group_shares_l2 = true;
1018 pr_debug("L2 cache only shared by the threads in the small core\n");
1019 return 0;
1022 void __init smp_prepare_cpus(unsigned int max_cpus)
1024 unsigned int cpu;
1026 DBG("smp_prepare_cpus\n");
1029 * setup_cpu may need to be called on the boot cpu. We havent
1030 * spun any cpus up but lets be paranoid.
1032 BUG_ON(boot_cpuid != smp_processor_id());
1034 /* Fixup boot cpu */
1035 smp_store_cpu_info(boot_cpuid);
1036 cpu_callin_map[boot_cpuid] = 1;
1038 for_each_possible_cpu(cpu) {
1039 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
1040 GFP_KERNEL, cpu_to_node(cpu));
1041 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
1042 GFP_KERNEL, cpu_to_node(cpu));
1043 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
1044 GFP_KERNEL, cpu_to_node(cpu));
1045 if (has_coregroup_support())
1046 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
1047 GFP_KERNEL, cpu_to_node(cpu));
1049 #ifdef CONFIG_NEED_MULTIPLE_NODES
1051 * numa_node_id() works after this.
1053 if (cpu_present(cpu)) {
1054 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
1055 set_cpu_numa_mem(cpu,
1056 local_memory_node(numa_cpu_lookup_table[cpu]));
1058 #endif
1060 * cpu_core_map is now more updated and exists only since
1061 * its been exported for long. It only will have a snapshot
1062 * of cpu_cpu_mask.
1064 cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1067 /* Init the cpumasks so the boot CPU is related to itself */
1068 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1069 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1071 if (has_coregroup_support())
1072 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1074 init_big_cores();
1075 if (has_big_cores) {
1076 cpumask_set_cpu(boot_cpuid,
1077 cpu_smallcore_mask(boot_cpuid));
1080 if (smp_ops && smp_ops->probe)
1081 smp_ops->probe();
1084 void smp_prepare_boot_cpu(void)
1086 BUG_ON(smp_processor_id() != boot_cpuid);
1087 #ifdef CONFIG_PPC64
1088 paca_ptrs[boot_cpuid]->__current = current;
1089 #endif
1090 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1091 current_set[boot_cpuid] = current;
1094 #ifdef CONFIG_HOTPLUG_CPU
1096 int generic_cpu_disable(void)
1098 unsigned int cpu = smp_processor_id();
1100 if (cpu == boot_cpuid)
1101 return -EBUSY;
1103 set_cpu_online(cpu, false);
1104 #ifdef CONFIG_PPC64
1105 vdso_data->processorCount--;
1106 #endif
1107 /* Update affinity of all IRQs previously aimed at this CPU */
1108 irq_migrate_all_off_this_cpu();
1111 * Depending on the details of the interrupt controller, it's possible
1112 * that one of the interrupts we just migrated away from this CPU is
1113 * actually already pending on this CPU. If we leave it in that state
1114 * the interrupt will never be EOI'ed, and will never fire again. So
1115 * temporarily enable interrupts here, to allow any pending interrupt to
1116 * be received (and EOI'ed), before we take this CPU offline.
1118 local_irq_enable();
1119 mdelay(1);
1120 local_irq_disable();
1122 return 0;
1125 void generic_cpu_die(unsigned int cpu)
1127 int i;
1129 for (i = 0; i < 100; i++) {
1130 smp_rmb();
1131 if (is_cpu_dead(cpu))
1132 return;
1133 msleep(100);
1135 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1138 void generic_set_cpu_dead(unsigned int cpu)
1140 per_cpu(cpu_state, cpu) = CPU_DEAD;
1144 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1145 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1146 * which makes the delay in generic_cpu_die() not happen.
1148 void generic_set_cpu_up(unsigned int cpu)
1150 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1153 int generic_check_cpu_restart(unsigned int cpu)
1155 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1158 int is_cpu_dead(unsigned int cpu)
1160 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1163 static bool secondaries_inhibited(void)
1165 return kvm_hv_mode_active();
1168 #else /* HOTPLUG_CPU */
1170 #define secondaries_inhibited() 0
1172 #endif
1174 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1176 #ifdef CONFIG_PPC64
1177 paca_ptrs[cpu]->__current = idle;
1178 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1179 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1180 #endif
1181 idle->cpu = cpu;
1182 secondary_current = current_set[cpu] = idle;
1185 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1187 int rc, c;
1190 * Don't allow secondary threads to come online if inhibited
1192 if (threads_per_core > 1 && secondaries_inhibited() &&
1193 cpu_thread_in_subcore(cpu))
1194 return -EBUSY;
1196 if (smp_ops == NULL ||
1197 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1198 return -EINVAL;
1200 cpu_idle_thread_init(cpu, tidle);
1203 * The platform might need to allocate resources prior to bringing
1204 * up the CPU
1206 if (smp_ops->prepare_cpu) {
1207 rc = smp_ops->prepare_cpu(cpu);
1208 if (rc)
1209 return rc;
1212 /* Make sure callin-map entry is 0 (can be leftover a CPU
1213 * hotplug
1215 cpu_callin_map[cpu] = 0;
1217 /* The information for processor bringup must
1218 * be written out to main store before we release
1219 * the processor.
1221 smp_mb();
1223 /* wake up cpus */
1224 DBG("smp: kicking cpu %d\n", cpu);
1225 rc = smp_ops->kick_cpu(cpu);
1226 if (rc) {
1227 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1228 return rc;
1232 * wait to see if the cpu made a callin (is actually up).
1233 * use this value that I found through experimentation.
1234 * -- Cort
1236 if (system_state < SYSTEM_RUNNING)
1237 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1238 udelay(100);
1239 #ifdef CONFIG_HOTPLUG_CPU
1240 else
1242 * CPUs can take much longer to come up in the
1243 * hotplug case. Wait five seconds.
1245 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1246 msleep(1);
1247 #endif
1249 if (!cpu_callin_map[cpu]) {
1250 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1251 return -ENOENT;
1254 DBG("Processor %u found.\n", cpu);
1256 if (smp_ops->give_timebase)
1257 smp_ops->give_timebase();
1259 /* Wait until cpu puts itself in the online & active maps */
1260 spin_until_cond(cpu_online(cpu));
1262 return 0;
1265 /* Return the value of the reg property corresponding to the given
1266 * logical cpu.
1268 int cpu_to_core_id(int cpu)
1270 struct device_node *np;
1271 const __be32 *reg;
1272 int id = -1;
1274 np = of_get_cpu_node(cpu, NULL);
1275 if (!np)
1276 goto out;
1278 reg = of_get_property(np, "reg", NULL);
1279 if (!reg)
1280 goto out;
1282 id = be32_to_cpup(reg);
1283 out:
1284 of_node_put(np);
1285 return id;
1287 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1289 /* Helper routines for cpu to core mapping */
1290 int cpu_core_index_of_thread(int cpu)
1292 return cpu >> threads_shift;
1294 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1296 int cpu_first_thread_of_core(int core)
1298 return core << threads_shift;
1300 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1302 /* Must be called when no change can occur to cpu_present_mask,
1303 * i.e. during cpu online or offline.
1305 static struct device_node *cpu_to_l2cache(int cpu)
1307 struct device_node *np;
1308 struct device_node *cache;
1310 if (!cpu_present(cpu))
1311 return NULL;
1313 np = of_get_cpu_node(cpu, NULL);
1314 if (np == NULL)
1315 return NULL;
1317 cache = of_find_next_cache_node(np);
1319 of_node_put(np);
1321 return cache;
1324 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1326 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1327 struct device_node *l2_cache, *np;
1328 int i;
1330 if (has_big_cores)
1331 submask_fn = cpu_smallcore_mask;
1334 * If the threads in a thread-group share L2 cache, then the
1335 * L2-mask can be obtained from thread_group_l2_cache_map.
1337 if (thread_group_shares_l2) {
1338 cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
1340 for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
1341 if (cpu_online(i))
1342 set_cpus_related(i, cpu, cpu_l2_cache_mask);
1345 /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
1346 if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
1347 !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
1348 pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
1349 cpu);
1352 return true;
1355 l2_cache = cpu_to_l2cache(cpu);
1356 if (!l2_cache || !*mask) {
1357 /* Assume only core siblings share cache with this CPU */
1358 for_each_cpu(i, submask_fn(cpu))
1359 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1361 return false;
1364 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1366 /* Update l2-cache mask with all the CPUs that are part of submask */
1367 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1369 /* Skip all CPUs already part of current CPU l2-cache mask */
1370 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1372 for_each_cpu(i, *mask) {
1374 * when updating the marks the current CPU has not been marked
1375 * online, but we need to update the cache masks
1377 np = cpu_to_l2cache(i);
1379 /* Skip all CPUs already part of current CPU l2-cache */
1380 if (np == l2_cache) {
1381 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1382 cpumask_andnot(*mask, *mask, submask_fn(i));
1383 } else {
1384 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1387 of_node_put(np);
1389 of_node_put(l2_cache);
1391 return true;
1394 #ifdef CONFIG_HOTPLUG_CPU
1395 static void remove_cpu_from_masks(int cpu)
1397 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1398 int i;
1400 if (shared_caches)
1401 mask_fn = cpu_l2_cache_mask;
1403 for_each_cpu(i, mask_fn(cpu)) {
1404 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1405 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1406 if (has_big_cores)
1407 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1410 if (has_coregroup_support()) {
1411 for_each_cpu(i, cpu_coregroup_mask(cpu))
1412 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1415 #endif
1417 static inline void add_cpu_to_smallcore_masks(int cpu)
1419 int i;
1421 if (!has_big_cores)
1422 return;
1424 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1426 for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
1427 if (cpu_online(i))
1428 set_cpus_related(i, cpu, cpu_smallcore_mask);
1432 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1434 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1435 int coregroup_id = cpu_to_coregroup_id(cpu);
1436 int i;
1438 if (shared_caches)
1439 submask_fn = cpu_l2_cache_mask;
1441 if (!*mask) {
1442 /* Assume only siblings are part of this CPU's coregroup */
1443 for_each_cpu(i, submask_fn(cpu))
1444 set_cpus_related(cpu, i, cpu_coregroup_mask);
1446 return;
1449 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1451 /* Update coregroup mask with all the CPUs that are part of submask */
1452 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1454 /* Skip all CPUs already part of coregroup mask */
1455 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1457 for_each_cpu(i, *mask) {
1458 /* Skip all CPUs not part of this coregroup */
1459 if (coregroup_id == cpu_to_coregroup_id(i)) {
1460 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1461 cpumask_andnot(*mask, *mask, submask_fn(i));
1462 } else {
1463 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1468 static void add_cpu_to_masks(int cpu)
1470 int first_thread = cpu_first_thread_sibling(cpu);
1471 cpumask_var_t mask;
1472 int i;
1475 * This CPU will not be in the online mask yet so we need to manually
1476 * add it to it's own thread sibling mask.
1478 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1480 for (i = first_thread; i < first_thread + threads_per_core; i++)
1481 if (cpu_online(i))
1482 set_cpus_related(i, cpu, cpu_sibling_mask);
1484 add_cpu_to_smallcore_masks(cpu);
1486 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1487 alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1488 update_mask_by_l2(cpu, &mask);
1490 if (has_coregroup_support())
1491 update_coregroup_mask(cpu, &mask);
1493 free_cpumask_var(mask);
1496 /* Activate a secondary processor. */
1497 void start_secondary(void *unused)
1499 unsigned int cpu = raw_smp_processor_id();
1501 mmgrab(&init_mm);
1502 current->active_mm = &init_mm;
1504 smp_store_cpu_info(cpu);
1505 set_dec(tb_ticks_per_jiffy);
1506 rcu_cpu_starting(cpu);
1507 preempt_disable();
1508 cpu_callin_map[cpu] = 1;
1510 if (smp_ops->setup_cpu)
1511 smp_ops->setup_cpu(cpu);
1512 if (smp_ops->take_timebase)
1513 smp_ops->take_timebase();
1515 secondary_cpu_time_init();
1517 #ifdef CONFIG_PPC64
1518 if (system_state == SYSTEM_RUNNING)
1519 vdso_data->processorCount++;
1521 vdso_getcpu_init();
1522 #endif
1523 /* Update topology CPU masks */
1524 add_cpu_to_masks(cpu);
1527 * Check for any shared caches. Note that this must be done on a
1528 * per-core basis because one core in the pair might be disabled.
1530 if (!shared_caches) {
1531 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1532 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1534 if (has_big_cores)
1535 sibling_mask = cpu_smallcore_mask;
1537 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1538 shared_caches = true;
1541 set_numa_node(numa_cpu_lookup_table[cpu]);
1542 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1544 smp_wmb();
1545 notify_cpu_starting(cpu);
1546 set_cpu_online(cpu, true);
1548 boot_init_stack_canary();
1550 local_irq_enable();
1552 /* We can enable ftrace for secondary cpus now */
1553 this_cpu_enable_ftrace();
1555 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1557 BUG();
1560 int setup_profiling_timer(unsigned int multiplier)
1562 return 0;
1565 static void fixup_topology(void)
1567 int i;
1569 #ifdef CONFIG_SCHED_SMT
1570 if (has_big_cores) {
1571 pr_info("Big cores detected but using small core scheduling\n");
1572 powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1574 #endif
1576 if (!has_coregroup_support())
1577 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1580 * Try to consolidate topology levels here instead of
1581 * allowing scheduler to degenerate.
1582 * - Dont consolidate if masks are different.
1583 * - Dont consolidate if sd_flags exists and are different.
1585 for (i = 1; i <= die_idx; i++) {
1586 if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1587 continue;
1589 if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1590 powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1591 continue;
1593 if (!powerpc_topology[i - 1].sd_flags)
1594 powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1596 powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1597 powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1598 #ifdef CONFIG_SCHED_DEBUG
1599 powerpc_topology[i].name = powerpc_topology[i + 1].name;
1600 #endif
1604 void __init smp_cpus_done(unsigned int max_cpus)
1607 * We are running pinned to the boot CPU, see rest_init().
1609 if (smp_ops && smp_ops->setup_cpu)
1610 smp_ops->setup_cpu(boot_cpuid);
1612 if (smp_ops && smp_ops->bringup_done)
1613 smp_ops->bringup_done();
1615 dump_numa_cpu_topology();
1617 fixup_topology();
1618 set_sched_topology(powerpc_topology);
1621 #ifdef CONFIG_HOTPLUG_CPU
1622 int __cpu_disable(void)
1624 int cpu = smp_processor_id();
1625 int err;
1627 if (!smp_ops->cpu_disable)
1628 return -ENOSYS;
1630 this_cpu_disable_ftrace();
1632 err = smp_ops->cpu_disable();
1633 if (err)
1634 return err;
1636 /* Update sibling maps */
1637 remove_cpu_from_masks(cpu);
1639 return 0;
1642 void __cpu_die(unsigned int cpu)
1644 if (smp_ops->cpu_die)
1645 smp_ops->cpu_die(cpu);
1648 void arch_cpu_idle_dead(void)
1650 sched_preempt_enable_no_resched();
1653 * Disable on the down path. This will be re-enabled by
1654 * start_secondary() via start_secondary_resume() below
1656 this_cpu_disable_ftrace();
1658 if (smp_ops->cpu_offline_self)
1659 smp_ops->cpu_offline_self();
1661 /* If we return, we re-enter start_secondary */
1662 start_secondary_resume();
1665 #endif