2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/acpi.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/cache.h>
27 #include <linux/profile.h>
28 #include <linux/errno.h>
30 #include <linux/err.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/seq_file.h>
34 #include <linux/irq.h>
35 #include <linux/percpu.h>
36 #include <linux/clockchips.h>
37 #include <linux/completion.h>
39 #include <linux/irq_work.h>
41 #include <asm/alternative.h>
42 #include <asm/atomic.h>
43 #include <asm/cacheflush.h>
45 #include <asm/cputype.h>
46 #include <asm/cpu_ops.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
49 #include <asm/pgalloc.h>
50 #include <asm/processor.h>
51 #include <asm/smp_plat.h>
52 #include <asm/sections.h>
53 #include <asm/tlbflush.h>
54 #include <asm/ptrace.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/ipi.h>
61 * as from 2.5, kernels no longer have an init_tasks structure
62 * so we need some other way of telling a new secondary core
63 * where to place its SVC stack
65 struct secondary_data secondary_data
;
76 * Boot a secondary CPU, and assign it the specified idle task.
77 * This also gives us the initial stack to use for this CPU.
79 static int boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
81 if (cpu_ops
[cpu
]->cpu_boot
)
82 return cpu_ops
[cpu
]->cpu_boot(cpu
);
87 static DECLARE_COMPLETION(cpu_running
);
89 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
94 * We need to tell the secondary core where to find its stack and the
97 secondary_data
.stack
= task_stack_page(idle
) + THREAD_START_SP
;
98 __flush_dcache_area(&secondary_data
, sizeof(secondary_data
));
101 * Now bring the CPU into our world.
103 ret
= boot_secondary(cpu
, idle
);
106 * CPU was successfully started, wait for it to come online or
109 wait_for_completion_timeout(&cpu_running
,
110 msecs_to_jiffies(1000));
112 if (!cpu_online(cpu
)) {
113 pr_crit("CPU%u: failed to come online\n", cpu
);
117 pr_err("CPU%u: failed to boot: %d\n", cpu
, ret
);
120 secondary_data
.stack
= NULL
;
125 static void smp_store_cpu_info(unsigned int cpuid
)
127 store_cpu_topology(cpuid
);
131 * This is the secondary CPU boot entry. We're using this CPUs
132 * idle thread stack, but a set of temporary page tables.
134 asmlinkage
void secondary_start_kernel(void)
136 struct mm_struct
*mm
= &init_mm
;
137 unsigned int cpu
= smp_processor_id();
140 * All kernel threads share the same mm context; grab a
141 * reference and switch to it.
143 atomic_inc(&mm
->mm_count
);
144 current
->active_mm
= mm
;
145 cpumask_set_cpu(cpu
, mm_cpumask(mm
));
147 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
148 printk("CPU%u: Booted secondary processor\n", cpu
);
151 * TTBR0 is only used for the identity mapping at this stage. Make it
152 * point to zero page to avoid speculatively fetching new entries.
154 cpu_set_reserved_ttbr0();
156 cpu_set_default_tcr_t0sz();
159 trace_hardirqs_off();
161 if (cpu_ops
[cpu
]->cpu_postboot
)
162 cpu_ops
[cpu
]->cpu_postboot();
165 * Log the CPU info before it is marked online and might get read.
170 * Enable GIC and timers.
172 notify_cpu_starting(cpu
);
174 smp_store_cpu_info(cpu
);
177 * OK, now it's safe to let the boot CPU continue. Wait for
178 * the CPU migration code to notice that the CPU is online
179 * before we continue.
181 set_cpu_online(cpu
, true);
182 complete(&cpu_running
);
186 local_async_enable();
189 * OK, it's off to the idle thread for us
191 cpu_startup_entry(CPUHP_ONLINE
);
194 #ifdef CONFIG_HOTPLUG_CPU
195 static int op_cpu_disable(unsigned int cpu
)
198 * If we don't have a cpu_die method, abort before we reach the point
199 * of no return. CPU0 may not have an cpu_ops, so test for it.
201 if (!cpu_ops
[cpu
] || !cpu_ops
[cpu
]->cpu_die
)
205 * We may need to abort a hot unplug for some other mechanism-specific
208 if (cpu_ops
[cpu
]->cpu_disable
)
209 return cpu_ops
[cpu
]->cpu_disable(cpu
);
215 * __cpu_disable runs on the processor to be shutdown.
217 int __cpu_disable(void)
219 unsigned int cpu
= smp_processor_id();
222 ret
= op_cpu_disable(cpu
);
227 * Take this CPU offline. Once we clear this, we can't return,
228 * and we must not schedule until we're ready to give up the cpu.
230 set_cpu_online(cpu
, false);
233 * OK - migrate IRQs away from this CPU
238 * Remove this CPU from the vm mask set of all processes.
240 clear_tasks_mm_cpumask(cpu
);
245 static int op_cpu_kill(unsigned int cpu
)
248 * If we have no means of synchronising with the dying CPU, then assume
249 * that it is really dead. We can only wait for an arbitrary length of
250 * time and hope that it's dead, so let's skip the wait and just hope.
252 if (!cpu_ops
[cpu
]->cpu_kill
)
255 return cpu_ops
[cpu
]->cpu_kill(cpu
);
259 * called on the thread which is asking for a CPU to be shutdown -
260 * waits until shutdown has completed, or it is timed out.
262 void __cpu_die(unsigned int cpu
)
266 if (!cpu_wait_death(cpu
, 5)) {
267 pr_crit("CPU%u: cpu didn't die\n", cpu
);
270 pr_notice("CPU%u: shutdown\n", cpu
);
273 * Now that the dying CPU is beyond the point of no return w.r.t.
274 * in-kernel synchronisation, try to get the firwmare to help us to
275 * verify that it has really left the kernel before we consider
276 * clobbering anything it might still be using.
278 err
= op_cpu_kill(cpu
);
280 pr_warn("CPU%d may not have shut down cleanly: %d\n",
285 * Called from the idle thread for the CPU which has been shutdown.
287 * Note that we disable IRQs here, but do not re-enable them
288 * before returning to the caller. This is also the behaviour
289 * of the other hotplug-cpu capable cores, so presumably coming
290 * out of idle fixes this.
294 unsigned int cpu
= smp_processor_id();
300 /* Tell __cpu_die() that this CPU is now safe to dispose of */
301 (void)cpu_report_death();
304 * Actually shutdown the CPU. This must never fail. The specific hotplug
305 * mechanism must perform all required cache maintenance to ensure that
306 * no dirty lines are lost in the process of shutting down the CPU.
308 cpu_ops
[cpu
]->cpu_die(cpu
);
314 static void __init
hyp_mode_check(void)
316 if (is_hyp_mode_available())
317 pr_info("CPU: All CPU(s) started at EL2\n");
318 else if (is_hyp_mode_mismatched())
319 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC
,
320 "CPU: CPUs started in inconsistent modes");
322 pr_info("CPU: All CPU(s) started at EL1\n");
325 void __init
smp_cpus_done(unsigned int max_cpus
)
327 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
329 apply_alternatives_all();
332 void __init
smp_prepare_boot_cpu(void)
334 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
337 static u64 __init
of_get_cpu_mpidr(struct device_node
*dn
)
343 * A cpu node with missing "reg" property is
344 * considered invalid to build a cpu_logical_map
347 cell
= of_get_property(dn
, "reg", NULL
);
349 pr_err("%s: missing reg property\n", dn
->full_name
);
353 hwid
= of_read_number(cell
, of_n_addr_cells(dn
));
355 * Non affinity bits must be set to 0 in the DT
357 if (hwid
& ~MPIDR_HWID_BITMASK
) {
358 pr_err("%s: invalid reg property\n", dn
->full_name
);
365 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
366 * entries and check for duplicates. If any is found just ignore the
367 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
368 * matching valid MPIDR values.
370 static bool __init
is_mpidr_duplicate(unsigned int cpu
, u64 hwid
)
374 for (i
= 1; (i
< cpu
) && (i
< NR_CPUS
); i
++)
375 if (cpu_logical_map(i
) == hwid
)
381 * Initialize cpu operations for a logical cpu and
382 * set it in the possible mask on success
384 static int __init
smp_cpu_setup(int cpu
)
386 if (cpu_read_ops(cpu
))
389 if (cpu_ops
[cpu
]->cpu_init(cpu
))
392 set_cpu_possible(cpu
, true);
397 static bool bootcpu_valid __initdata
;
398 static unsigned int cpu_count
= 1;
402 * acpi_map_gic_cpu_interface - parse processor MADT entry
404 * Carry out sanity checks on MADT processor entry and initialize
405 * cpu_logical_map on success
408 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt
*processor
)
410 u64 hwid
= processor
->arm_mpidr
;
412 if (!(processor
->flags
& ACPI_MADT_ENABLED
)) {
413 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid
);
417 if (hwid
& ~MPIDR_HWID_BITMASK
|| hwid
== INVALID_HWID
) {
418 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid
);
422 if (is_mpidr_duplicate(cpu_count
, hwid
)) {
423 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid
);
427 /* Check if GICC structure of boot CPU is available in the MADT */
428 if (cpu_logical_map(0) == hwid
) {
430 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
434 bootcpu_valid
= true;
438 if (cpu_count
>= NR_CPUS
)
441 /* map the logical cpu id to cpu MPIDR */
442 cpu_logical_map(cpu_count
) = hwid
;
448 acpi_parse_gic_cpu_interface(struct acpi_subtable_header
*header
,
449 const unsigned long end
)
451 struct acpi_madt_generic_interrupt
*processor
;
453 processor
= (struct acpi_madt_generic_interrupt
*)header
;
454 if (BAD_MADT_GICC_ENTRY(processor
, end
))
457 acpi_table_print_madt_entry(header
);
459 acpi_map_gic_cpu_interface(processor
);
464 #define acpi_table_parse_madt(...) do { } while (0)
468 * Enumerate the possible CPU set from the device tree and build the
469 * cpu logical map array containing MPIDR values related to logical
470 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
472 void __init
of_parse_and_init_cpus(void)
474 struct device_node
*dn
= NULL
;
476 while ((dn
= of_find_node_by_type(dn
, "cpu"))) {
477 u64 hwid
= of_get_cpu_mpidr(dn
);
479 if (hwid
== INVALID_HWID
)
482 if (is_mpidr_duplicate(cpu_count
, hwid
)) {
483 pr_err("%s: duplicate cpu reg properties in the DT\n",
489 * The numbering scheme requires that the boot CPU
490 * must be assigned logical id 0. Record it so that
491 * the logical map built from DT is validated and can
494 if (hwid
== cpu_logical_map(0)) {
496 pr_err("%s: duplicate boot cpu reg property in DT\n",
501 bootcpu_valid
= true;
504 * cpu_logical_map has already been
505 * initialized and the boot cpu doesn't need
506 * the enable-method so continue without
512 if (cpu_count
>= NR_CPUS
)
515 pr_debug("cpu logical map 0x%llx\n", hwid
);
516 cpu_logical_map(cpu_count
) = hwid
;
523 * Enumerate the possible CPU set from the device tree or ACPI and build the
524 * cpu logical map array containing MPIDR values related to logical
525 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
527 void __init
smp_init_cpus(void)
532 of_parse_and_init_cpus();
535 * do a walk of MADT to determine how many CPUs
536 * we have including disabled CPUs, and get information
537 * we need for SMP init
539 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
540 acpi_parse_gic_cpu_interface
, 0);
542 if (cpu_count
> NR_CPUS
)
543 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
546 if (!bootcpu_valid
) {
547 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
552 * We need to set the cpu_logical_map entries before enabling
553 * the cpus so that cpu processor description entries (DT cpu nodes
554 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
555 * with entries in cpu_logical_map while initializing the cpus.
556 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
558 for (i
= 1; i
< NR_CPUS
; i
++) {
559 if (cpu_logical_map(i
) != INVALID_HWID
) {
560 if (smp_cpu_setup(i
))
561 cpu_logical_map(i
) = INVALID_HWID
;
566 void __init
smp_prepare_cpus(unsigned int max_cpus
)
569 unsigned int cpu
, ncores
= num_possible_cpus();
573 smp_store_cpu_info(smp_processor_id());
576 * are we trying to boot more cores than exist?
578 if (max_cpus
> ncores
)
581 /* Don't bother if we're effectively UP */
586 * Initialise the present map (which describes the set of CPUs
587 * actually populated at the present time) and release the
588 * secondaries from the bootloader.
590 * Make sure we online at most (max_cpus - 1) additional CPUs.
593 for_each_possible_cpu(cpu
) {
597 if (cpu
== smp_processor_id())
603 err
= cpu_ops
[cpu
]->cpu_prepare(cpu
);
607 set_cpu_present(cpu
, true);
612 void (*__smp_cross_call
)(const struct cpumask
*, unsigned int);
614 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
616 __smp_cross_call
= fn
;
619 static const char *ipi_types
[NR_IPI
] __tracepoint_string
= {
620 #define S(x,s) [x] = s
621 S(IPI_RESCHEDULE
, "Rescheduling interrupts"),
622 S(IPI_CALL_FUNC
, "Function call interrupts"),
623 S(IPI_CPU_STOP
, "CPU stop interrupts"),
624 S(IPI_TIMER
, "Timer broadcast interrupts"),
625 S(IPI_IRQ_WORK
, "IRQ work interrupts"),
628 static void smp_cross_call(const struct cpumask
*target
, unsigned int ipinr
)
630 trace_ipi_raise(target
, ipi_types
[ipinr
]);
631 __smp_cross_call(target
, ipinr
);
634 void show_ipi_list(struct seq_file
*p
, int prec
)
638 for (i
= 0; i
< NR_IPI
; i
++) {
639 seq_printf(p
, "%*s%u:%s", prec
- 1, "IPI", i
,
640 prec
>= 4 ? " " : "");
641 for_each_online_cpu(cpu
)
642 seq_printf(p
, "%10u ",
643 __get_irq_stat(cpu
, ipi_irqs
[i
]));
644 seq_printf(p
, " %s\n", ipi_types
[i
]);
648 u64
smp_irq_stat_cpu(unsigned int cpu
)
653 for (i
= 0; i
< NR_IPI
; i
++)
654 sum
+= __get_irq_stat(cpu
, ipi_irqs
[i
]);
659 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
661 smp_cross_call(mask
, IPI_CALL_FUNC
);
664 void arch_send_call_function_single_ipi(int cpu
)
666 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC
);
669 #ifdef CONFIG_IRQ_WORK
670 void arch_irq_work_raise(void)
672 if (__smp_cross_call
)
673 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK
);
677 static DEFINE_RAW_SPINLOCK(stop_lock
);
680 * ipi_cpu_stop - handle IPI from smp_send_stop()
682 static void ipi_cpu_stop(unsigned int cpu
)
684 if (system_state
== SYSTEM_BOOTING
||
685 system_state
== SYSTEM_RUNNING
) {
686 raw_spin_lock(&stop_lock
);
687 pr_crit("CPU%u: stopping\n", cpu
);
689 raw_spin_unlock(&stop_lock
);
692 set_cpu_online(cpu
, false);
701 * Main handler for inter-processor interrupts
703 void handle_IPI(int ipinr
, struct pt_regs
*regs
)
705 unsigned int cpu
= smp_processor_id();
706 struct pt_regs
*old_regs
= set_irq_regs(regs
);
708 if ((unsigned)ipinr
< NR_IPI
) {
709 trace_ipi_entry_rcuidle(ipi_types
[ipinr
]);
710 __inc_irq_stat(cpu
, ipi_irqs
[ipinr
]);
720 generic_smp_call_function_interrupt();
730 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
733 tick_receive_broadcast();
738 #ifdef CONFIG_IRQ_WORK
747 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu
, ipinr
);
751 if ((unsigned)ipinr
< NR_IPI
)
752 trace_ipi_exit_rcuidle(ipi_types
[ipinr
]);
753 set_irq_regs(old_regs
);
756 void smp_send_reschedule(int cpu
)
758 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
761 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
762 void tick_broadcast(const struct cpumask
*mask
)
764 smp_cross_call(mask
, IPI_TIMER
);
768 void smp_send_stop(void)
770 unsigned long timeout
;
772 if (num_online_cpus() > 1) {
775 cpumask_copy(&mask
, cpu_online_mask
);
776 cpumask_clear_cpu(smp_processor_id(), &mask
);
778 smp_cross_call(&mask
, IPI_CPU_STOP
);
781 /* Wait up to one second for other CPUs to stop */
782 timeout
= USEC_PER_SEC
;
783 while (num_online_cpus() > 1 && timeout
--)
786 if (num_online_cpus() > 1)
787 pr_warning("SMP: failed to stop secondary CPUs\n");
793 int setup_profiling_timer(unsigned int multiplier
)