hugetlb: introduce generic version of hugetlb_free_pgd_range
[linux/fpc-iii.git] / arch / riscv / kernel / smp.c
blob57b1383e5ef7480142ce857e600a6f0e73a0944e
1 /*
2 * SMP initialisation and IPI support
3 * Based on arch/arm64/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Regents of the University of California
7 * Copyright (C) 2017 SiFive
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/interrupt.h>
23 #include <linux/smp.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
27 #include <asm/sbi.h>
28 #include <asm/tlbflush.h>
29 #include <asm/cacheflush.h>
31 enum ipi_message_type {
32 IPI_RESCHEDULE,
33 IPI_CALL_FUNC,
34 IPI_MAX
37 /* A collection of single bit ipi messages. */
38 static struct {
39 unsigned long stats[IPI_MAX] ____cacheline_aligned;
40 unsigned long bits ____cacheline_aligned;
41 } ipi_data[NR_CPUS] __cacheline_aligned;
43 int riscv_hartid_to_cpuid(int hartid)
45 int i = -1;
47 for (i = 0; i < NR_CPUS; i++)
48 if (cpuid_to_hartid_map(i) == hartid)
49 return i;
51 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
52 BUG();
53 return i;
56 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
58 int cpu;
60 for_each_cpu(cpu, in)
61 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
63 /* Unsupported */
64 int setup_profiling_timer(unsigned int multiplier)
66 return -EINVAL;
69 void riscv_software_interrupt(void)
71 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
72 unsigned long *stats = ipi_data[smp_processor_id()].stats;
74 /* Clear pending IPI */
75 csr_clear(sip, SIE_SSIE);
77 while (true) {
78 unsigned long ops;
80 /* Order bit clearing and data access. */
81 mb();
83 ops = xchg(pending_ipis, 0);
84 if (ops == 0)
85 return;
87 if (ops & (1 << IPI_RESCHEDULE)) {
88 stats[IPI_RESCHEDULE]++;
89 scheduler_ipi();
92 if (ops & (1 << IPI_CALL_FUNC)) {
93 stats[IPI_CALL_FUNC]++;
94 generic_smp_call_function_interrupt();
97 BUG_ON((ops >> IPI_MAX) != 0);
99 /* Order data access and bit testing. */
100 mb();
104 static void
105 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
107 int cpuid, hartid;
108 struct cpumask hartid_mask;
110 cpumask_clear(&hartid_mask);
111 mb();
112 for_each_cpu(cpuid, to_whom) {
113 set_bit(operation, &ipi_data[cpuid].bits);
114 hartid = cpuid_to_hartid_map(cpuid);
115 cpumask_set_cpu(hartid, &hartid_mask);
117 mb();
118 sbi_send_ipi(cpumask_bits(&hartid_mask));
121 static const char * const ipi_names[] = {
122 [IPI_RESCHEDULE] = "Rescheduling interrupts",
123 [IPI_CALL_FUNC] = "Function call interrupts",
126 void show_ipi_stats(struct seq_file *p, int prec)
128 unsigned int cpu, i;
130 for (i = 0; i < IPI_MAX; i++) {
131 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
132 prec >= 4 ? " " : "");
133 for_each_online_cpu(cpu)
134 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
135 seq_printf(p, " %s\n", ipi_names[i]);
139 void arch_send_call_function_ipi_mask(struct cpumask *mask)
141 send_ipi_message(mask, IPI_CALL_FUNC);
144 void arch_send_call_function_single_ipi(int cpu)
146 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
149 static void ipi_stop(void *unused)
151 while (1)
152 wait_for_interrupt();
155 void smp_send_stop(void)
157 on_each_cpu(ipi_stop, NULL, 1);
160 void smp_send_reschedule(int cpu)
162 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
166 * Performs an icache flush for the given MM context. RISC-V has no direct
167 * mechanism for instruction cache shoot downs, so instead we send an IPI that
168 * informs the remote harts they need to flush their local instruction caches.
169 * To avoid pathologically slow behavior in a common case (a bunch of
170 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
171 * IPIs for harts that are not currently executing a MM context and instead
172 * schedule a deferred local instruction cache flush to be performed before
173 * execution resumes on each hart.
175 void flush_icache_mm(struct mm_struct *mm, bool local)
177 unsigned int cpu;
178 cpumask_t others, hmask, *mask;
180 preempt_disable();
182 /* Mark every hart's icache as needing a flush for this MM. */
183 mask = &mm->context.icache_stale_mask;
184 cpumask_setall(mask);
185 /* Flush this hart's I$ now, and mark it as flushed. */
186 cpu = smp_processor_id();
187 cpumask_clear_cpu(cpu, mask);
188 local_flush_icache_all();
191 * Flush the I$ of other harts concurrently executing, and mark them as
192 * flushed.
194 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
195 local |= cpumask_empty(&others);
196 if (mm != current->active_mm || !local) {
197 cpumask_clear(&hmask);
198 riscv_cpuid_to_hartid_mask(&others, &hmask);
199 sbi_remote_fence_i(hmask.bits);
200 } else {
202 * It's assumed that at least one strongly ordered operation is
203 * performed on this hart between setting a hart's cpumask bit
204 * and scheduling this MM context on that hart. Sending an SBI
205 * remote message will do this, but in the case where no
206 * messages are sent we still need to order this hart's writes
207 * with flush_icache_deferred().
209 smp_mb();
212 preempt_enable();