Linux 4.19.168
[linux/fpc-iii.git] / arch / riscv / kernel / smp.c
blob906fe21ea21bf3f10db2e460cfe2b4fbd21ac790
1 /*
2 * SMP initialisation and IPI support
3 * Based on arch/arm64/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
6 * Copyright (C) 2015 Regents of the University of California
7 * Copyright (C) 2017 SiFive
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/interrupt.h>
23 #include <linux/smp.h>
24 #include <linux/sched.h>
26 #include <asm/sbi.h>
27 #include <asm/tlbflush.h>
28 #include <asm/cacheflush.h>
30 /* A collection of single bit ipi messages. */
31 static struct {
32 unsigned long bits ____cacheline_aligned;
33 } ipi_data[NR_CPUS] __cacheline_aligned;
35 enum ipi_message_type {
36 IPI_RESCHEDULE,
37 IPI_CALL_FUNC,
38 IPI_MAX
42 /* Unsupported */
43 int setup_profiling_timer(unsigned int multiplier)
45 return -EINVAL;
48 void riscv_software_interrupt(void)
50 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
52 /* Clear pending IPI */
53 csr_clear(sip, SIE_SSIE);
55 while (true) {
56 unsigned long ops;
58 /* Order bit clearing and data access. */
59 mb();
61 ops = xchg(pending_ipis, 0);
62 if (ops == 0)
63 return;
65 if (ops & (1 << IPI_RESCHEDULE))
66 scheduler_ipi();
68 if (ops & (1 << IPI_CALL_FUNC))
69 generic_smp_call_function_interrupt();
71 BUG_ON((ops >> IPI_MAX) != 0);
73 /* Order data access and bit testing. */
74 mb();
78 static void
79 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
81 int i;
83 mb();
84 for_each_cpu(i, to_whom)
85 set_bit(operation, &ipi_data[i].bits);
87 mb();
88 sbi_send_ipi(cpumask_bits(to_whom));
91 void arch_send_call_function_ipi_mask(struct cpumask *mask)
93 send_ipi_message(mask, IPI_CALL_FUNC);
96 void arch_send_call_function_single_ipi(int cpu)
98 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
101 static void ipi_stop(void *unused)
103 while (1)
104 wait_for_interrupt();
107 void smp_send_stop(void)
109 on_each_cpu(ipi_stop, NULL, 1);
112 void smp_send_reschedule(int cpu)
114 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
118 * Performs an icache flush for the given MM context. RISC-V has no direct
119 * mechanism for instruction cache shoot downs, so instead we send an IPI that
120 * informs the remote harts they need to flush their local instruction caches.
121 * To avoid pathologically slow behavior in a common case (a bunch of
122 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
123 * IPIs for harts that are not currently executing a MM context and instead
124 * schedule a deferred local instruction cache flush to be performed before
125 * execution resumes on each hart.
127 void flush_icache_mm(struct mm_struct *mm, bool local)
129 unsigned int cpu;
130 cpumask_t others, *mask;
132 preempt_disable();
134 /* Mark every hart's icache as needing a flush for this MM. */
135 mask = &mm->context.icache_stale_mask;
136 cpumask_setall(mask);
137 /* Flush this hart's I$ now, and mark it as flushed. */
138 cpu = smp_processor_id();
139 cpumask_clear_cpu(cpu, mask);
140 local_flush_icache_all();
143 * Flush the I$ of other harts concurrently executing, and mark them as
144 * flushed.
146 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
147 local |= cpumask_empty(&others);
148 if (mm != current->active_mm || !local)
149 sbi_remote_fence_i(others.bits);
150 else {
152 * It's assumed that at least one strongly ordered operation is
153 * performed on this hart between setting a hart's cpumask bit
154 * and scheduling this MM context on that hart. Sending an SBI
155 * remote message will do this, but in the case where no
156 * messages are sent we still need to order this hart's writes
157 * with flush_icache_deferred().
159 smp_mb();
162 preempt_enable();