1 // SPDX-License-Identifier: GPL-2.0-only
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
11 #include <linux/cpu.h>
12 #include <linux/interrupt.h>
13 #include <linux/profile.h>
14 #include <linux/smp.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/delay.h>
19 #include <asm/clint.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
24 enum ipi_message_type
{
31 unsigned long __cpuid_to_hartid_map
[NR_CPUS
] = {
32 [0 ... NR_CPUS
-1] = INVALID_HARTID
35 void __init
smp_setup_processor_id(void)
37 cpuid_to_hartid_map(0) = boot_cpu_hartid
;
40 /* A collection of single bit ipi messages. */
42 unsigned long stats
[IPI_MAX
] ____cacheline_aligned
;
43 unsigned long bits ____cacheline_aligned
;
44 } ipi_data
[NR_CPUS
] __cacheline_aligned
;
46 int riscv_hartid_to_cpuid(int hartid
)
50 for (i
= 0; i
< NR_CPUS
; i
++)
51 if (cpuid_to_hartid_map(i
) == hartid
)
54 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid
);
58 void riscv_cpuid_to_hartid_mask(const struct cpumask
*in
, struct cpumask
*out
)
64 cpumask_set_cpu(cpuid_to_hartid_map(cpu
), out
);
67 bool arch_match_cpu_phys_id(int cpu
, u64 phys_id
)
69 return phys_id
== cpuid_to_hartid_map(cpu
);
73 int setup_profiling_timer(unsigned int multiplier
)
78 static void ipi_stop(void)
80 set_cpu_online(smp_processor_id(), false);
85 static void send_ipi_mask(const struct cpumask
*mask
, enum ipi_message_type op
)
87 struct cpumask hartid_mask
;
90 smp_mb__before_atomic();
91 for_each_cpu(cpu
, mask
)
92 set_bit(op
, &ipi_data
[cpu
].bits
);
93 smp_mb__after_atomic();
95 riscv_cpuid_to_hartid_mask(mask
, &hartid_mask
);
96 if (IS_ENABLED(CONFIG_RISCV_SBI
))
97 sbi_send_ipi(cpumask_bits(&hartid_mask
));
99 clint_send_ipi_mask(mask
);
102 static void send_ipi_single(int cpu
, enum ipi_message_type op
)
104 int hartid
= cpuid_to_hartid_map(cpu
);
106 smp_mb__before_atomic();
107 set_bit(op
, &ipi_data
[cpu
].bits
);
108 smp_mb__after_atomic();
110 if (IS_ENABLED(CONFIG_RISCV_SBI
))
111 sbi_send_ipi(cpumask_bits(cpumask_of(hartid
)));
113 clint_send_ipi_single(hartid
);
116 static inline void clear_ipi(void)
118 if (IS_ENABLED(CONFIG_RISCV_SBI
))
119 csr_clear(CSR_IP
, IE_SIE
);
121 clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
124 void riscv_software_interrupt(void)
126 unsigned long *pending_ipis
= &ipi_data
[smp_processor_id()].bits
;
127 unsigned long *stats
= ipi_data
[smp_processor_id()].stats
;
134 /* Order bit clearing and data access. */
137 ops
= xchg(pending_ipis
, 0);
141 if (ops
& (1 << IPI_RESCHEDULE
)) {
142 stats
[IPI_RESCHEDULE
]++;
146 if (ops
& (1 << IPI_CALL_FUNC
)) {
147 stats
[IPI_CALL_FUNC
]++;
148 generic_smp_call_function_interrupt();
151 if (ops
& (1 << IPI_CPU_STOP
)) {
152 stats
[IPI_CPU_STOP
]++;
156 BUG_ON((ops
>> IPI_MAX
) != 0);
158 /* Order data access and bit testing. */
163 static const char * const ipi_names
[] = {
164 [IPI_RESCHEDULE
] = "Rescheduling interrupts",
165 [IPI_CALL_FUNC
] = "Function call interrupts",
166 [IPI_CPU_STOP
] = "CPU stop interrupts",
169 void show_ipi_stats(struct seq_file
*p
, int prec
)
173 for (i
= 0; i
< IPI_MAX
; i
++) {
174 seq_printf(p
, "%*s%u:%s", prec
- 1, "IPI", i
,
175 prec
>= 4 ? " " : "");
176 for_each_online_cpu(cpu
)
177 seq_printf(p
, "%10lu ", ipi_data
[cpu
].stats
[i
]);
178 seq_printf(p
, " %s\n", ipi_names
[i
]);
182 void arch_send_call_function_ipi_mask(struct cpumask
*mask
)
184 send_ipi_mask(mask
, IPI_CALL_FUNC
);
187 void arch_send_call_function_single_ipi(int cpu
)
189 send_ipi_single(cpu
, IPI_CALL_FUNC
);
192 void smp_send_stop(void)
194 unsigned long timeout
;
196 if (num_online_cpus() > 1) {
199 cpumask_copy(&mask
, cpu_online_mask
);
200 cpumask_clear_cpu(smp_processor_id(), &mask
);
202 if (system_state
<= SYSTEM_RUNNING
)
203 pr_crit("SMP: stopping secondary CPUs\n");
204 send_ipi_mask(&mask
, IPI_CPU_STOP
);
207 /* Wait up to one second for other CPUs to stop */
208 timeout
= USEC_PER_SEC
;
209 while (num_online_cpus() > 1 && timeout
--)
212 if (num_online_cpus() > 1)
213 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
214 cpumask_pr_args(cpu_online_mask
));
217 void smp_send_reschedule(int cpu
)
219 send_ipi_single(cpu
, IPI_RESCHEDULE
);
221 EXPORT_SYMBOL_GPL(smp_send_reschedule
);