mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / tile / kernel / smp.c
blob94a62e1197ce8a0e3b4438e2e70ddb957a6fc5c0
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * TILE SMP support routines.
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/irq_work.h>
22 #include <linux/module.h>
23 #include <asm/cacheflush.h>
24 #include <asm/homecache.h>
27 * We write to width and height with a single store in head_NN.S,
28 * so make the variable aligned to "long".
30 HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
31 EXPORT_SYMBOL(smp_topology);
33 #if CHIP_HAS_IPI()
34 static unsigned long __iomem *ipi_mappings[NR_CPUS];
35 #endif
37 /* Does messaging work correctly to the local cpu? */
38 bool self_interrupt_ok;
41 * Top-level send_IPI*() functions to send messages to other cpus.
44 /* Set by smp_send_stop() to avoid recursive panics. */
45 static int stopping_cpus;
47 static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
49 int sent = 0;
50 while (sent < nrecip) {
51 int rc = hv_send_message(recip, nrecip,
52 (HV_VirtAddr)&tag, sizeof(tag));
53 if (rc < 0) {
54 if (!stopping_cpus) /* avoid recursive panic */
55 panic("hv_send_message returned %d", rc);
56 break;
58 WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
59 sent += rc;
63 void send_IPI_single(int cpu, int tag)
65 HV_Recipient recip = {
66 .y = cpu / smp_width,
67 .x = cpu % smp_width,
68 .state = HV_TO_BE_SENT
70 __send_IPI_many(&recip, 1, tag);
73 void send_IPI_many(const struct cpumask *mask, int tag)
75 HV_Recipient recip[NR_CPUS];
76 int cpu;
77 int nrecip = 0;
78 int my_cpu = smp_processor_id();
79 for_each_cpu(cpu, mask) {
80 HV_Recipient *r;
81 BUG_ON(cpu == my_cpu);
82 r = &recip[nrecip++];
83 r->y = cpu / smp_width;
84 r->x = cpu % smp_width;
85 r->state = HV_TO_BE_SENT;
87 __send_IPI_many(recip, nrecip, tag);
90 void send_IPI_allbutself(int tag)
92 struct cpumask mask;
93 cpumask_copy(&mask, cpu_online_mask);
94 cpumask_clear_cpu(smp_processor_id(), &mask);
95 send_IPI_many(&mask, tag);
99 * Functions related to starting/stopping cpus.
102 /* Handler to start the current cpu. */
103 static void smp_start_cpu_interrupt(void)
105 get_irq_regs()->pc = start_cpu_function_addr;
108 /* Handler to stop the current cpu. */
109 static void smp_stop_cpu_interrupt(void)
111 arch_local_irq_disable_all();
112 set_cpu_online(smp_processor_id(), 0);
113 for (;;)
114 asm("nap; nop");
117 /* This function calls the 'stop' function on all other CPUs in the system. */
118 void smp_send_stop(void)
120 stopping_cpus = 1;
121 send_IPI_allbutself(MSG_TAG_STOP_CPU);
124 /* On panic, just wait; we may get an smp_send_stop() later on. */
125 void panic_smp_self_stop(void)
127 while (1)
128 asm("nap; nop");
132 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
134 void evaluate_message(int tag)
136 switch (tag) {
137 case MSG_TAG_START_CPU: /* Start up a cpu */
138 smp_start_cpu_interrupt();
139 break;
141 case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
142 smp_stop_cpu_interrupt();
143 break;
145 case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
146 generic_smp_call_function_interrupt();
147 break;
149 case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
150 generic_smp_call_function_single_interrupt();
151 break;
153 case MSG_TAG_IRQ_WORK: /* Invoke IRQ work */
154 irq_work_run();
155 break;
157 default:
158 panic("Unknown IPI message tag %d", tag);
159 break;
165 * flush_icache_range() code uses smp_call_function().
168 struct ipi_flush {
169 unsigned long start;
170 unsigned long end;
173 static void ipi_flush_icache_range(void *info)
175 struct ipi_flush *flush = (struct ipi_flush *) info;
176 __flush_icache_range(flush->start, flush->end);
179 void flush_icache_range(unsigned long start, unsigned long end)
181 struct ipi_flush flush = { start, end };
183 /* If invoked with irqs disabled, we can not issue IPIs. */
184 if (irqs_disabled())
185 flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
186 NULL, NULL, 0);
187 else {
188 preempt_disable();
189 on_each_cpu(ipi_flush_icache_range, &flush, 1);
190 preempt_enable();
193 EXPORT_SYMBOL(flush_icache_range);
196 #ifdef CONFIG_IRQ_WORK
197 void arch_irq_work_raise(void)
199 if (arch_irq_work_has_interrupt())
200 send_IPI_single(smp_processor_id(), MSG_TAG_IRQ_WORK);
202 #endif
205 /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
206 static irqreturn_t handle_reschedule_ipi(int irq, void *token)
208 __this_cpu_inc(irq_stat.irq_resched_count);
209 scheduler_ipi();
211 return IRQ_HANDLED;
214 static struct irqaction resched_action = {
215 .handler = handle_reschedule_ipi,
216 .name = "resched",
217 .dev_id = handle_reschedule_ipi /* unique token */,
220 void __init ipi_init(void)
222 int cpu = smp_processor_id();
223 HV_Recipient recip = { .y = cpu_y(cpu), .x = cpu_x(cpu),
224 .state = HV_TO_BE_SENT };
225 int tag = MSG_TAG_CALL_FUNCTION_SINGLE;
228 * Test if we can message ourselves for arch_irq_work_raise.
229 * This functionality is only available in the Tilera hypervisor
230 * in versions 4.3.4 and following.
232 if (hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)) == 1)
233 self_interrupt_ok = true;
234 else
235 pr_warn("Older hypervisor: disabling fast irq_work_raise\n");
237 #if CHIP_HAS_IPI()
238 /* Map IPI trigger MMIO addresses. */
239 for_each_possible_cpu(cpu) {
240 HV_Coord tile;
241 HV_PTE pte;
242 unsigned long offset;
244 tile.x = cpu_x(cpu);
245 tile.y = cpu_y(cpu);
246 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
247 panic("Failed to initialize IPI for cpu %d\n", cpu);
249 offset = PFN_PHYS(pte_pfn(pte));
250 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
252 #endif
254 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
255 tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
256 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
259 #if CHIP_HAS_IPI()
261 void smp_send_reschedule(int cpu)
263 WARN_ON(cpu_is_offline(cpu));
266 * We just want to do an MMIO store. The traditional writeq()
267 * functions aren't really correct here, since they're always
268 * directed at the PCI shim. For now, just do a raw store,
269 * casting away the __iomem attribute.
271 ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
274 #else
276 void smp_send_reschedule(int cpu)
278 HV_Coord coord;
280 WARN_ON(cpu_is_offline(cpu));
282 coord.y = cpu_y(cpu);
283 coord.x = cpu_x(cpu);
284 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
287 #endif /* CHIP_HAS_IPI() */