2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/acpi.h>
21 #include <linux/arm_sdei.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/hotplug.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/interrupt.h>
29 #include <linux/cache.h>
30 #include <linux/profile.h>
31 #include <linux/errno.h>
33 #include <linux/err.h>
34 #include <linux/cpu.h>
35 #include <linux/smp.h>
36 #include <linux/seq_file.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/arm-gic-v3.h>
39 #include <linux/percpu.h>
40 #include <linux/clockchips.h>
41 #include <linux/completion.h>
43 #include <linux/irq_work.h>
44 #include <linux/kexec.h>
46 #include <asm/alternative.h>
47 #include <asm/atomic.h>
48 #include <asm/cacheflush.h>
50 #include <asm/cputype.h>
51 #include <asm/cpu_ops.h>
52 #include <asm/daifflags.h>
53 #include <asm/mmu_context.h>
55 #include <asm/pgtable.h>
56 #include <asm/pgalloc.h>
57 #include <asm/processor.h>
58 #include <asm/smp_plat.h>
59 #include <asm/sections.h>
60 #include <asm/tlbflush.h>
61 #include <asm/ptrace.h>
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/ipi.h>
67 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number
);
68 EXPORT_PER_CPU_SYMBOL(cpu_number
);
71 * as from 2.5, kernels no longer have an init_tasks structure
72 * so we need some other way of telling a new secondary core
73 * where to place its SVC stack
75 struct secondary_data secondary_data
;
76 /* Number of CPUs which aren't online, but looping in kernel text. */
77 int cpus_stuck_in_kernel
;
89 #ifdef CONFIG_HOTPLUG_CPU
90 static int op_cpu_kill(unsigned int cpu
);
92 static inline int op_cpu_kill(unsigned int cpu
)
100 * Boot a secondary CPU, and assign it the specified idle task.
101 * This also gives us the initial stack to use for this CPU.
103 static int boot_secondary(unsigned int cpu
, struct task_struct
*idle
)
105 if (cpu_ops
[cpu
]->cpu_boot
)
106 return cpu_ops
[cpu
]->cpu_boot(cpu
);
111 static DECLARE_COMPLETION(cpu_running
);
113 int __cpu_up(unsigned int cpu
, struct task_struct
*idle
)
119 * We need to tell the secondary core where to find its stack and the
122 secondary_data
.task
= idle
;
123 secondary_data
.stack
= task_stack_page(idle
) + THREAD_SIZE
;
124 update_cpu_boot_status(CPU_MMU_OFF
);
125 __flush_dcache_area(&secondary_data
, sizeof(secondary_data
));
128 * Now bring the CPU into our world.
130 ret
= boot_secondary(cpu
, idle
);
133 * CPU was successfully started, wait for it to come online or
136 wait_for_completion_timeout(&cpu_running
,
137 msecs_to_jiffies(1000));
139 if (!cpu_online(cpu
)) {
140 pr_crit("CPU%u: failed to come online\n", cpu
);
144 pr_err("CPU%u: failed to boot: %d\n", cpu
, ret
);
148 secondary_data
.task
= NULL
;
149 secondary_data
.stack
= NULL
;
150 status
= READ_ONCE(secondary_data
.status
);
153 if (status
== CPU_MMU_OFF
)
154 status
= READ_ONCE(__early_cpu_boot_status
);
156 switch (status
& CPU_BOOT_STATUS_MASK
) {
158 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
162 if (!op_cpu_kill(cpu
)) {
163 pr_crit("CPU%u: died during early boot\n", cpu
);
167 pr_crit("CPU%u: may not have shut down cleanly\n", cpu
);
168 case CPU_STUCK_IN_KERNEL
:
169 pr_crit("CPU%u: is stuck in kernel\n", cpu
);
170 if (status
& CPU_STUCK_REASON_52_BIT_VA
)
171 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu
);
172 if (status
& CPU_STUCK_REASON_NO_GRAN
)
173 pr_crit("CPU%u: does not support %luK granule \n", cpu
, PAGE_SIZE
/ SZ_1K
);
174 cpus_stuck_in_kernel
++;
176 case CPU_PANIC_KERNEL
:
177 panic("CPU%u detected unsupported configuration\n", cpu
);
184 static void init_gic_priority_masking(void)
188 if (WARN_ON(!gic_enable_sre()))
191 cpuflags
= read_sysreg(daif
);
193 WARN_ON(!(cpuflags
& PSR_I_BIT
));
195 gic_write_pmr(GIC_PRIO_IRQOFF
);
197 /* We can only unmask PSR.I if we can take aborts */
198 if (!(cpuflags
& PSR_A_BIT
))
199 write_sysreg(cpuflags
& ~PSR_I_BIT
, daif
);
203 * This is the secondary CPU boot entry. We're using this CPUs
204 * idle thread stack, but a set of temporary page tables.
206 asmlinkage notrace
void secondary_start_kernel(void)
208 u64 mpidr
= read_cpuid_mpidr() & MPIDR_HWID_BITMASK
;
209 struct mm_struct
*mm
= &init_mm
;
212 cpu
= task_cpu(current
);
213 set_my_cpu_offset(per_cpu_offset(cpu
));
216 * All kernel threads share the same mm context; grab a
217 * reference and switch to it.
220 current
->active_mm
= mm
;
223 * TTBR0 is only used for the identity mapping at this stage. Make it
224 * point to zero page to avoid speculatively fetching new entries.
226 cpu_uninstall_idmap();
228 if (system_uses_irq_prio_masking())
229 init_gic_priority_masking();
232 trace_hardirqs_off();
235 * If the system has established the capabilities, make sure
236 * this CPU ticks all of those. If it doesn't, the CPU will
237 * fail to come online.
239 check_local_cpu_capabilities();
241 if (cpu_ops
[cpu
]->cpu_postboot
)
242 cpu_ops
[cpu
]->cpu_postboot();
245 * Log the CPU info before it is marked online and might get read.
250 * Enable GIC and timers.
252 notify_cpu_starting(cpu
);
254 store_cpu_topology(cpu
);
258 * OK, now it's safe to let the boot CPU continue. Wait for
259 * the CPU migration code to notice that the CPU is online
260 * before we continue.
262 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
263 cpu
, (unsigned long)mpidr
,
265 update_cpu_boot_status(CPU_BOOT_SUCCESS
);
266 set_cpu_online(cpu
, true);
267 complete(&cpu_running
);
269 local_daif_restore(DAIF_PROCCTX
);
272 * OK, it's off to the idle thread for us
274 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
277 #ifdef CONFIG_HOTPLUG_CPU
278 static int op_cpu_disable(unsigned int cpu
)
281 * If we don't have a cpu_die method, abort before we reach the point
282 * of no return. CPU0 may not have an cpu_ops, so test for it.
284 if (!cpu_ops
[cpu
] || !cpu_ops
[cpu
]->cpu_die
)
288 * We may need to abort a hot unplug for some other mechanism-specific
291 if (cpu_ops
[cpu
]->cpu_disable
)
292 return cpu_ops
[cpu
]->cpu_disable(cpu
);
298 * __cpu_disable runs on the processor to be shutdown.
300 int __cpu_disable(void)
302 unsigned int cpu
= smp_processor_id();
305 ret
= op_cpu_disable(cpu
);
309 remove_cpu_topology(cpu
);
310 numa_remove_cpu(cpu
);
313 * Take this CPU offline. Once we clear this, we can't return,
314 * and we must not schedule until we're ready to give up the cpu.
316 set_cpu_online(cpu
, false);
319 * OK - migrate IRQs away from this CPU
321 irq_migrate_all_off_this_cpu();
326 static int op_cpu_kill(unsigned int cpu
)
329 * If we have no means of synchronising with the dying CPU, then assume
330 * that it is really dead. We can only wait for an arbitrary length of
331 * time and hope that it's dead, so let's skip the wait and just hope.
333 if (!cpu_ops
[cpu
]->cpu_kill
)
336 return cpu_ops
[cpu
]->cpu_kill(cpu
);
340 * called on the thread which is asking for a CPU to be shutdown -
341 * waits until shutdown has completed, or it is timed out.
343 void __cpu_die(unsigned int cpu
)
347 if (!cpu_wait_death(cpu
, 5)) {
348 pr_crit("CPU%u: cpu didn't die\n", cpu
);
351 pr_notice("CPU%u: shutdown\n", cpu
);
354 * Now that the dying CPU is beyond the point of no return w.r.t.
355 * in-kernel synchronisation, try to get the firwmare to help us to
356 * verify that it has really left the kernel before we consider
357 * clobbering anything it might still be using.
359 err
= op_cpu_kill(cpu
);
361 pr_warn("CPU%d may not have shut down cleanly: %d\n",
366 * Called from the idle thread for the CPU which has been shutdown.
371 unsigned int cpu
= smp_processor_id();
377 /* Tell __cpu_die() that this CPU is now safe to dispose of */
378 (void)cpu_report_death();
381 * Actually shutdown the CPU. This must never fail. The specific hotplug
382 * mechanism must perform all required cache maintenance to ensure that
383 * no dirty lines are lost in the process of shutting down the CPU.
385 cpu_ops
[cpu
]->cpu_die(cpu
);
392 * Kill the calling secondary CPU, early in bringup before it is turned
395 void cpu_die_early(void)
397 int cpu
= smp_processor_id();
399 pr_crit("CPU%d: will not boot\n", cpu
);
401 /* Mark this CPU absent */
402 set_cpu_present(cpu
, 0);
404 #ifdef CONFIG_HOTPLUG_CPU
405 update_cpu_boot_status(CPU_KILL_ME
);
406 /* Check if we can park ourselves */
407 if (cpu_ops
[cpu
] && cpu_ops
[cpu
]->cpu_die
)
408 cpu_ops
[cpu
]->cpu_die(cpu
);
410 update_cpu_boot_status(CPU_STUCK_IN_KERNEL
);
415 static void __init
hyp_mode_check(void)
417 if (is_hyp_mode_available())
418 pr_info("CPU: All CPU(s) started at EL2\n");
419 else if (is_hyp_mode_mismatched())
420 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC
,
421 "CPU: CPUs started in inconsistent modes");
423 pr_info("CPU: All CPU(s) started at EL1\n");
426 void __init
smp_cpus_done(unsigned int max_cpus
)
428 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
429 setup_cpu_features();
431 apply_alternatives_all();
432 mark_linear_text_alias_ro();
435 void __init
smp_prepare_boot_cpu(void)
437 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
439 * Initialise the static keys early as they may be enabled by the
443 cpuinfo_store_boot_cpu();
446 * We now know enough about the boot CPU to apply the
447 * alternatives that cannot wait until interrupt handling
448 * and/or scheduling is enabled.
450 apply_boot_alternatives();
452 /* Conditionally switch to GIC PMR for interrupt masking */
453 if (system_uses_irq_prio_masking())
454 init_gic_priority_masking();
457 static u64 __init
of_get_cpu_mpidr(struct device_node
*dn
)
463 * A cpu node with missing "reg" property is
464 * considered invalid to build a cpu_logical_map
467 cell
= of_get_property(dn
, "reg", NULL
);
469 pr_err("%pOF: missing reg property\n", dn
);
473 hwid
= of_read_number(cell
, of_n_addr_cells(dn
));
475 * Non affinity bits must be set to 0 in the DT
477 if (hwid
& ~MPIDR_HWID_BITMASK
) {
478 pr_err("%pOF: invalid reg property\n", dn
);
485 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
486 * entries and check for duplicates. If any is found just ignore the
487 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
488 * matching valid MPIDR values.
490 static bool __init
is_mpidr_duplicate(unsigned int cpu
, u64 hwid
)
494 for (i
= 1; (i
< cpu
) && (i
< NR_CPUS
); i
++)
495 if (cpu_logical_map(i
) == hwid
)
501 * Initialize cpu operations for a logical cpu and
502 * set it in the possible mask on success
504 static int __init
smp_cpu_setup(int cpu
)
506 if (cpu_read_ops(cpu
))
509 if (cpu_ops
[cpu
]->cpu_init(cpu
))
512 set_cpu_possible(cpu
, true);
517 static bool bootcpu_valid __initdata
;
518 static unsigned int cpu_count
= 1;
521 static struct acpi_madt_generic_interrupt cpu_madt_gicc
[NR_CPUS
];
523 struct acpi_madt_generic_interrupt
*acpi_cpu_get_madt_gicc(int cpu
)
525 return &cpu_madt_gicc
[cpu
];
529 * acpi_map_gic_cpu_interface - parse processor MADT entry
531 * Carry out sanity checks on MADT processor entry and initialize
532 * cpu_logical_map on success
535 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt
*processor
)
537 u64 hwid
= processor
->arm_mpidr
;
539 if (!(processor
->flags
& ACPI_MADT_ENABLED
)) {
540 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid
);
544 if (hwid
& ~MPIDR_HWID_BITMASK
|| hwid
== INVALID_HWID
) {
545 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid
);
549 if (is_mpidr_duplicate(cpu_count
, hwid
)) {
550 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid
);
554 /* Check if GICC structure of boot CPU is available in the MADT */
555 if (cpu_logical_map(0) == hwid
) {
557 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
561 bootcpu_valid
= true;
562 cpu_madt_gicc
[0] = *processor
;
566 if (cpu_count
>= NR_CPUS
)
569 /* map the logical cpu id to cpu MPIDR */
570 cpu_logical_map(cpu_count
) = hwid
;
572 cpu_madt_gicc
[cpu_count
] = *processor
;
575 * Set-up the ACPI parking protocol cpu entries
576 * while initializing the cpu_logical_map to
577 * avoid parsing MADT entries multiple times for
578 * nothing (ie a valid cpu_logical_map entry should
579 * contain a valid parking protocol data set to
580 * initialize the cpu if the parking protocol is
581 * the only available enable method).
583 acpi_set_mailbox_entry(cpu_count
, processor
);
589 acpi_parse_gic_cpu_interface(union acpi_subtable_headers
*header
,
590 const unsigned long end
)
592 struct acpi_madt_generic_interrupt
*processor
;
594 processor
= (struct acpi_madt_generic_interrupt
*)header
;
595 if (BAD_MADT_GICC_ENTRY(processor
, end
))
598 acpi_table_print_madt_entry(&header
->common
);
600 acpi_map_gic_cpu_interface(processor
);
605 static void __init
acpi_parse_and_init_cpus(void)
610 * do a walk of MADT to determine how many CPUs
611 * we have including disabled CPUs, and get information
612 * we need for SMP init.
614 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT
,
615 acpi_parse_gic_cpu_interface
, 0);
618 * In ACPI, SMP and CPU NUMA information is provided in separate
619 * static tables, namely the MADT and the SRAT.
621 * Thus, it is simpler to first create the cpu logical map through
622 * an MADT walk and then map the logical cpus to their node ids
625 acpi_map_cpus_to_nodes();
627 for (i
= 0; i
< nr_cpu_ids
; i
++)
628 early_map_cpu_to_node(i
, acpi_numa_get_nid(i
));
631 #define acpi_parse_and_init_cpus(...) do { } while (0)
635 * Enumerate the possible CPU set from the device tree and build the
636 * cpu logical map array containing MPIDR values related to logical
637 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
639 static void __init
of_parse_and_init_cpus(void)
641 struct device_node
*dn
;
643 for_each_of_cpu_node(dn
) {
644 u64 hwid
= of_get_cpu_mpidr(dn
);
646 if (hwid
== INVALID_HWID
)
649 if (is_mpidr_duplicate(cpu_count
, hwid
)) {
650 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
656 * The numbering scheme requires that the boot CPU
657 * must be assigned logical id 0. Record it so that
658 * the logical map built from DT is validated and can
661 if (hwid
== cpu_logical_map(0)) {
663 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
668 bootcpu_valid
= true;
669 early_map_cpu_to_node(0, of_node_to_nid(dn
));
672 * cpu_logical_map has already been
673 * initialized and the boot cpu doesn't need
674 * the enable-method so continue without
680 if (cpu_count
>= NR_CPUS
)
683 pr_debug("cpu logical map 0x%llx\n", hwid
);
684 cpu_logical_map(cpu_count
) = hwid
;
686 early_map_cpu_to_node(cpu_count
, of_node_to_nid(dn
));
693 * Enumerate the possible CPU set from the device tree or ACPI and build the
694 * cpu logical map array containing MPIDR values related to logical
695 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
697 void __init
smp_init_cpus(void)
702 of_parse_and_init_cpus();
704 acpi_parse_and_init_cpus();
706 if (cpu_count
> nr_cpu_ids
)
707 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
708 cpu_count
, nr_cpu_ids
);
710 if (!bootcpu_valid
) {
711 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
716 * We need to set the cpu_logical_map entries before enabling
717 * the cpus so that cpu processor description entries (DT cpu nodes
718 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
719 * with entries in cpu_logical_map while initializing the cpus.
720 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
722 for (i
= 1; i
< nr_cpu_ids
; i
++) {
723 if (cpu_logical_map(i
) != INVALID_HWID
) {
724 if (smp_cpu_setup(i
))
725 cpu_logical_map(i
) = INVALID_HWID
;
730 void __init
smp_prepare_cpus(unsigned int max_cpus
)
734 unsigned int this_cpu
;
738 this_cpu
= smp_processor_id();
739 store_cpu_topology(this_cpu
);
740 numa_store_cpu_info(this_cpu
);
741 numa_add_cpu(this_cpu
);
744 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
745 * secondary CPUs present.
751 * Initialise the present map (which describes the set of CPUs
752 * actually populated at the present time) and release the
753 * secondaries from the bootloader.
755 for_each_possible_cpu(cpu
) {
757 per_cpu(cpu_number
, cpu
) = cpu
;
759 if (cpu
== smp_processor_id())
765 err
= cpu_ops
[cpu
]->cpu_prepare(cpu
);
769 set_cpu_present(cpu
, true);
770 numa_store_cpu_info(cpu
);
774 void (*__smp_cross_call
)(const struct cpumask
*, unsigned int);
776 void __init
set_smp_cross_call(void (*fn
)(const struct cpumask
*, unsigned int))
778 __smp_cross_call
= fn
;
781 static const char *ipi_types
[NR_IPI
] __tracepoint_string
= {
782 #define S(x,s) [x] = s
783 S(IPI_RESCHEDULE
, "Rescheduling interrupts"),
784 S(IPI_CALL_FUNC
, "Function call interrupts"),
785 S(IPI_CPU_STOP
, "CPU stop interrupts"),
786 S(IPI_CPU_CRASH_STOP
, "CPU stop (for crash dump) interrupts"),
787 S(IPI_TIMER
, "Timer broadcast interrupts"),
788 S(IPI_IRQ_WORK
, "IRQ work interrupts"),
789 S(IPI_WAKEUP
, "CPU wake-up interrupts"),
792 static void smp_cross_call(const struct cpumask
*target
, unsigned int ipinr
)
794 trace_ipi_raise(target
, ipi_types
[ipinr
]);
795 __smp_cross_call(target
, ipinr
);
798 void show_ipi_list(struct seq_file
*p
, int prec
)
802 for (i
= 0; i
< NR_IPI
; i
++) {
803 seq_printf(p
, "%*s%u:%s", prec
- 1, "IPI", i
,
804 prec
>= 4 ? " " : "");
805 for_each_online_cpu(cpu
)
806 seq_printf(p
, "%10u ",
807 __get_irq_stat(cpu
, ipi_irqs
[i
]));
808 seq_printf(p
, " %s\n", ipi_types
[i
]);
812 u64
smp_irq_stat_cpu(unsigned int cpu
)
817 for (i
= 0; i
< NR_IPI
; i
++)
818 sum
+= __get_irq_stat(cpu
, ipi_irqs
[i
]);
823 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
825 smp_cross_call(mask
, IPI_CALL_FUNC
);
828 void arch_send_call_function_single_ipi(int cpu
)
830 smp_cross_call(cpumask_of(cpu
), IPI_CALL_FUNC
);
833 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
834 void arch_send_wakeup_ipi_mask(const struct cpumask
*mask
)
836 smp_cross_call(mask
, IPI_WAKEUP
);
840 #ifdef CONFIG_IRQ_WORK
841 void arch_irq_work_raise(void)
843 if (__smp_cross_call
)
844 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK
);
849 * ipi_cpu_stop - handle IPI from smp_send_stop()
851 static void ipi_cpu_stop(unsigned int cpu
)
853 set_cpu_online(cpu
, false);
856 sdei_mask_local_cpu();
862 #ifdef CONFIG_KEXEC_CORE
863 static atomic_t waiting_for_crash_ipi
= ATOMIC_INIT(0);
866 static void ipi_cpu_crash_stop(unsigned int cpu
, struct pt_regs
*regs
)
868 #ifdef CONFIG_KEXEC_CORE
869 crash_save_cpu(regs
, cpu
);
871 atomic_dec(&waiting_for_crash_ipi
);
874 sdei_mask_local_cpu();
876 #ifdef CONFIG_HOTPLUG_CPU
877 if (cpu_ops
[cpu
]->cpu_die
)
878 cpu_ops
[cpu
]->cpu_die(cpu
);
887 * Main handler for inter-processor interrupts
889 void handle_IPI(int ipinr
, struct pt_regs
*regs
)
891 unsigned int cpu
= smp_processor_id();
892 struct pt_regs
*old_regs
= set_irq_regs(regs
);
894 if ((unsigned)ipinr
< NR_IPI
) {
895 trace_ipi_entry_rcuidle(ipi_types
[ipinr
]);
896 __inc_irq_stat(cpu
, ipi_irqs
[ipinr
]);
906 generic_smp_call_function_interrupt();
916 case IPI_CPU_CRASH_STOP
:
917 if (IS_ENABLED(CONFIG_KEXEC_CORE
)) {
919 ipi_cpu_crash_stop(cpu
, regs
);
925 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
928 tick_receive_broadcast();
933 #ifdef CONFIG_IRQ_WORK
941 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
943 WARN_ONCE(!acpi_parking_protocol_valid(cpu
),
944 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
950 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu
, ipinr
);
954 if ((unsigned)ipinr
< NR_IPI
)
955 trace_ipi_exit_rcuidle(ipi_types
[ipinr
]);
956 set_irq_regs(old_regs
);
959 void smp_send_reschedule(int cpu
)
961 smp_cross_call(cpumask_of(cpu
), IPI_RESCHEDULE
);
964 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
965 void tick_broadcast(const struct cpumask
*mask
)
967 smp_cross_call(mask
, IPI_TIMER
);
971 void smp_send_stop(void)
973 unsigned long timeout
;
975 if (num_online_cpus() > 1) {
978 cpumask_copy(&mask
, cpu_online_mask
);
979 cpumask_clear_cpu(smp_processor_id(), &mask
);
981 if (system_state
<= SYSTEM_RUNNING
)
982 pr_crit("SMP: stopping secondary CPUs\n");
983 smp_cross_call(&mask
, IPI_CPU_STOP
);
986 /* Wait up to one second for other CPUs to stop */
987 timeout
= USEC_PER_SEC
;
988 while (num_online_cpus() > 1 && timeout
--)
991 if (num_online_cpus() > 1)
992 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
993 cpumask_pr_args(cpu_online_mask
));
995 sdei_mask_local_cpu();
998 #ifdef CONFIG_KEXEC_CORE
999 void crash_smp_send_stop(void)
1001 static int cpus_stopped
;
1003 unsigned long timeout
;
1006 * This function can be called twice in panic path, but obviously
1007 * we execute this only once.
1014 if (num_online_cpus() == 1) {
1015 sdei_mask_local_cpu();
1019 cpumask_copy(&mask
, cpu_online_mask
);
1020 cpumask_clear_cpu(smp_processor_id(), &mask
);
1022 atomic_set(&waiting_for_crash_ipi
, num_online_cpus() - 1);
1024 pr_crit("SMP: stopping secondary CPUs\n");
1025 smp_cross_call(&mask
, IPI_CPU_CRASH_STOP
);
1027 /* Wait up to one second for other CPUs to stop */
1028 timeout
= USEC_PER_SEC
;
1029 while ((atomic_read(&waiting_for_crash_ipi
) > 0) && timeout
--)
1032 if (atomic_read(&waiting_for_crash_ipi
) > 0)
1033 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1034 cpumask_pr_args(&mask
));
1036 sdei_mask_local_cpu();
1039 bool smp_crash_stop_failed(void)
1041 return (atomic_read(&waiting_for_crash_ipi
) > 0);
1046 * not supported here
1048 int setup_profiling_timer(unsigned int multiplier
)
1053 static bool have_cpu_die(void)
1055 #ifdef CONFIG_HOTPLUG_CPU
1056 int any_cpu
= raw_smp_processor_id();
1058 if (cpu_ops
[any_cpu
] && cpu_ops
[any_cpu
]->cpu_die
)
1064 bool cpus_are_stuck_in_kernel(void)
1066 bool smp_spin_tables
= (num_possible_cpus() > 1 && !have_cpu_die());
1068 return !!cpus_stuck_in_kernel
|| smp_spin_tables
;