2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE SMP support routines.
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <asm/cacheflush.h>
24 HV_Topology smp_topology __write_once
;
25 EXPORT_SYMBOL(smp_topology
);
28 static unsigned long __iomem
*ipi_mappings
[NR_CPUS
];
33 * Top-level send_IPI*() functions to send messages to other cpus.
36 /* Set by smp_send_stop() to avoid recursive panics. */
37 static int stopping_cpus
;
39 static void __send_IPI_many(HV_Recipient
*recip
, int nrecip
, int tag
)
42 while (sent
< nrecip
) {
43 int rc
= hv_send_message(recip
, nrecip
,
44 (HV_VirtAddr
)&tag
, sizeof(tag
));
46 if (!stopping_cpus
) /* avoid recursive panic */
47 panic("hv_send_message returned %d", rc
);
50 WARN_ONCE(rc
== 0, "hv_send_message() returned zero\n");
55 void send_IPI_single(int cpu
, int tag
)
57 HV_Recipient recip
= {
60 .state
= HV_TO_BE_SENT
62 __send_IPI_many(&recip
, 1, tag
);
65 void send_IPI_many(const struct cpumask
*mask
, int tag
)
67 HV_Recipient recip
[NR_CPUS
];
70 int my_cpu
= smp_processor_id();
71 for_each_cpu(cpu
, mask
) {
73 BUG_ON(cpu
== my_cpu
);
75 r
->y
= cpu
/ smp_width
;
76 r
->x
= cpu
% smp_width
;
77 r
->state
= HV_TO_BE_SENT
;
79 __send_IPI_many(recip
, nrecip
, tag
);
82 void send_IPI_allbutself(int tag
)
85 cpumask_copy(&mask
, cpu_online_mask
);
86 cpumask_clear_cpu(smp_processor_id(), &mask
);
87 send_IPI_many(&mask
, tag
);
92 * Provide smp_call_function_mask, but also run function locally
93 * if specified in the mask.
95 void on_each_cpu_mask(const struct cpumask
*mask
, void (*func
)(void *),
96 void *info
, bool wait
)
99 smp_call_function_many(mask
, func
, info
, wait
);
100 if (cpumask_test_cpu(cpu
, mask
)) {
110 * Functions related to starting/stopping cpus.
113 /* Handler to start the current cpu. */
114 static void smp_start_cpu_interrupt(void)
116 get_irq_regs()->pc
= start_cpu_function_addr
;
119 /* Handler to stop the current cpu. */
120 static void smp_stop_cpu_interrupt(void)
122 set_cpu_online(smp_processor_id(), 0);
123 arch_local_irq_disable_all();
128 /* This function calls the 'stop' function on all other CPUs in the system. */
129 void smp_send_stop(void)
132 send_IPI_allbutself(MSG_TAG_STOP_CPU
);
137 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
139 void evaluate_message(int tag
)
142 case MSG_TAG_START_CPU
: /* Start up a cpu */
143 smp_start_cpu_interrupt();
146 case MSG_TAG_STOP_CPU
: /* Sent to shut down slave CPU's */
147 smp_stop_cpu_interrupt();
150 case MSG_TAG_CALL_FUNCTION_MANY
: /* Call function on cpumask */
151 generic_smp_call_function_interrupt();
154 case MSG_TAG_CALL_FUNCTION_SINGLE
: /* Call function on one other CPU */
155 generic_smp_call_function_single_interrupt();
159 panic("Unknown IPI message tag %d", tag
);
166 * flush_icache_range() code uses smp_call_function().
174 static void ipi_flush_icache_range(void *info
)
176 struct ipi_flush
*flush
= (struct ipi_flush
*) info
;
177 __flush_icache_range(flush
->start
, flush
->end
);
180 void flush_icache_range(unsigned long start
, unsigned long end
)
182 struct ipi_flush flush
= { start
, end
};
184 on_each_cpu(ipi_flush_icache_range
, &flush
, 1);
189 /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
190 static irqreturn_t
handle_reschedule_ipi(int irq
, void *token
)
192 __get_cpu_var(irq_stat
).irq_resched_count
++;
198 static struct irqaction resched_action
= {
199 .handler
= handle_reschedule_ipi
,
201 .dev_id
= handle_reschedule_ipi
/* unique token */,
204 void __init
ipi_init(void)
208 /* Map IPI trigger MMIO addresses. */
209 for_each_possible_cpu(cpu
) {
212 unsigned long offset
;
216 if (hv_get_ipi_pte(tile
, KERNEL_PL
, &pte
) != 0)
217 panic("Failed to initialize IPI for cpu %d\n", cpu
);
219 offset
= hv_pte_get_pfn(pte
) << PAGE_SHIFT
;
220 ipi_mappings
[cpu
] = ioremap_prot(offset
, PAGE_SIZE
, pte
);
224 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
225 tile_irq_activate(IRQ_RESCHEDULE
, TILE_IRQ_PERCPU
);
226 BUG_ON(setup_irq(IRQ_RESCHEDULE
, &resched_action
));
231 void smp_send_reschedule(int cpu
)
233 WARN_ON(cpu_is_offline(cpu
));
236 * We just want to do an MMIO store. The traditional writeq()
237 * functions aren't really correct here, since they're always
238 * directed at the PCI shim. For now, just do a raw store,
239 * casting away the __iomem attribute.
241 ((unsigned long __force
*)ipi_mappings
[cpu
])[IRQ_RESCHEDULE
] = 0;
246 void smp_send_reschedule(int cpu
)
250 WARN_ON(cpu_is_offline(cpu
));
252 coord
.y
= cpu_y(cpu
);
253 coord
.x
= cpu_x(cpu
);
254 hv_trigger_ipi(coord
, IRQ_RESCHEDULE
);
257 #endif /* CHIP_HAS_IPI() */