ia64/pv_ops/xen: implement xen pv_time_ops.
[pv_ops_mirror.git] / arch / ia64 / xen / irq_xen.c
blob1f34ba9d32225ea142a509631f99e2e9bf6053dc
1 /******************************************************************************
2 * arch/ia64/xen/irq_xen.c
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/cpu.h>
25 #include <xen/interface/xen.h>
26 #include <xen/interface/callback.h>
27 #include <xen/events.h>
29 #include "irq_xen.h"
31 /***************************************************************************
32 * pv_irq_ops
33 * irq operations
36 static int
37 xen_assign_irq_vector(int irq)
39 struct physdev_irq irq_op;
41 irq_op.irq = irq;
42 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
43 return -ENOSPC;
45 return irq_op.vector;
48 static void
49 xen_free_irq_vector(int vector)
51 struct physdev_irq irq_op;
53 if (vector < IA64_FIRST_DEVICE_VECTOR ||
54 vector > IA64_LAST_DEVICE_VECTOR)
55 return;
57 irq_op.vector = vector;
58 if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
59 printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
60 __func__, vector);
64 static DEFINE_PER_CPU(int, timer_irq) = -1;
65 static DEFINE_PER_CPU(int, ipi_irq) = -1;
66 static DEFINE_PER_CPU(int, resched_irq) = -1;
67 static DEFINE_PER_CPU(int, cmc_irq) = -1;
68 static DEFINE_PER_CPU(int, cmcp_irq) = -1;
69 static DEFINE_PER_CPU(int, cpep_irq) = -1;
70 #define NAME_SIZE 15
71 static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
72 static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
73 static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
74 static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
75 static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
76 static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
77 #undef NAME_SIZE
79 struct saved_irq {
80 unsigned int irq;
81 struct irqaction *action;
83 /* 16 should be far optimistic value, since only several percpu irqs
84 * are registered early.
86 #define MAX_LATE_IRQ 16
87 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
88 static unsigned short late_irq_cnt;
89 static unsigned short saved_irq_cnt;
90 static int xen_slab_ready;
92 #ifdef CONFIG_SMP
93 /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
94 * it ends up to issue several memory accesses upon percpu data and
95 * thus adds unnecessary traffic to other paths.
97 static irqreturn_t
98 xen_dummy_handler(int irq, void *dev_id)
101 return IRQ_HANDLED;
104 static struct irqaction xen_ipi_irqaction = {
105 .handler = handle_IPI,
106 .flags = IRQF_DISABLED,
107 .name = "IPI"
110 static struct irqaction xen_resched_irqaction = {
111 .handler = xen_dummy_handler,
112 .flags = IRQF_DISABLED,
113 .name = "resched"
116 static struct irqaction xen_tlb_irqaction = {
117 .handler = xen_dummy_handler,
118 .flags = IRQF_DISABLED,
119 .name = "tlb_flush"
121 #endif
124 * This is xen version percpu irq registration, which needs bind
125 * to xen specific evtchn sub-system. One trick here is that xen
126 * evtchn binding interface depends on kmalloc because related
127 * port needs to be freed at device/cpu down. So we cache the
128 * registration on BSP before slab is ready and then deal them
129 * at later point. For rest instances happening after slab ready,
130 * we hook them to xen evtchn immediately.
132 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
133 * required.
135 static void
136 __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
137 struct irqaction *action, int save)
139 irq_desc_t *desc;
140 int irq = 0;
142 if (xen_slab_ready) {
143 switch (vec) {
144 case IA64_TIMER_VECTOR:
145 snprintf(per_cpu(timer_name, cpu),
146 sizeof(per_cpu(timer_name, cpu)),
147 "%s%d", action->name, cpu);
148 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
149 action->handler, action->flags,
150 per_cpu(timer_name, cpu), action->dev_id);
151 per_cpu(timer_irq, cpu) = irq;
152 break;
153 case IA64_IPI_RESCHEDULE:
154 snprintf(per_cpu(resched_name, cpu),
155 sizeof(per_cpu(resched_name, cpu)),
156 "%s%d", action->name, cpu);
157 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
158 action->handler, action->flags,
159 per_cpu(resched_name, cpu), action->dev_id);
160 per_cpu(resched_irq, cpu) = irq;
161 break;
162 case IA64_IPI_VECTOR:
163 snprintf(per_cpu(ipi_name, cpu),
164 sizeof(per_cpu(ipi_name, cpu)),
165 "%s%d", action->name, cpu);
166 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
167 action->handler, action->flags,
168 per_cpu(ipi_name, cpu), action->dev_id);
169 per_cpu(ipi_irq, cpu) = irq;
170 break;
171 case IA64_CMC_VECTOR:
172 snprintf(per_cpu(cmc_name, cpu),
173 sizeof(per_cpu(cmc_name, cpu)),
174 "%s%d", action->name, cpu);
175 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
176 action->handler,
177 action->flags,
178 per_cpu(cmc_name, cpu),
179 action->dev_id);
180 per_cpu(cmc_irq, cpu) = irq;
181 break;
182 case IA64_CMCP_VECTOR:
183 snprintf(per_cpu(cmcp_name, cpu),
184 sizeof(per_cpu(cmcp_name, cpu)),
185 "%s%d", action->name, cpu);
186 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
187 action->handler,
188 action->flags,
189 per_cpu(cmcp_name, cpu),
190 action->dev_id);
191 per_cpu(cmcp_irq, cpu) = irq;
192 break;
193 case IA64_CPEP_VECTOR:
194 snprintf(per_cpu(cpep_name, cpu),
195 sizeof(per_cpu(cpep_name, cpu)),
196 "%s%d", action->name, cpu);
197 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
198 action->handler,
199 action->flags,
200 per_cpu(cpep_name, cpu),
201 action->dev_id);
202 per_cpu(cpep_irq, cpu) = irq;
203 break;
204 case IA64_CPE_VECTOR:
205 case IA64_MCA_RENDEZ_VECTOR:
206 case IA64_PERFMON_VECTOR:
207 case IA64_MCA_WAKEUP_VECTOR:
208 case IA64_SPURIOUS_INT_VECTOR:
209 /* No need to complain, these aren't supported. */
210 break;
211 default:
212 printk(KERN_WARNING "Percpu irq %d is unsupported "
213 "by xen!\n", vec);
214 break;
216 BUG_ON(irq < 0);
218 if (irq > 0) {
220 * Mark percpu. Without this, migrate_irqs() will
221 * mark the interrupt for migrations and trigger it
222 * on cpu hotplug.
224 desc = irq_desc + irq;
225 desc->status |= IRQ_PER_CPU;
229 /* For BSP, we cache registered percpu irqs, and then re-walk
230 * them when initializing APs
232 if (!cpu && save) {
233 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
234 saved_percpu_irqs[saved_irq_cnt].irq = vec;
235 saved_percpu_irqs[saved_irq_cnt].action = action;
236 saved_irq_cnt++;
237 if (!xen_slab_ready)
238 late_irq_cnt++;
242 static void
243 xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
245 __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
248 static void
249 xen_bind_early_percpu_irq(void)
251 int i;
253 xen_slab_ready = 1;
254 /* There's no race when accessing this cached array, since only
255 * BSP will face with such step shortly
257 for (i = 0; i < late_irq_cnt; i++)
258 __xen_register_percpu_irq(smp_processor_id(),
259 saved_percpu_irqs[i].irq,
260 saved_percpu_irqs[i].action, 0);
263 /* FIXME: There's no obvious point to check whether slab is ready. So
264 * a hack is used here by utilizing a late time hook.
267 #ifdef CONFIG_HOTPLUG_CPU
268 static int __devinit
269 unbind_evtchn_callback(struct notifier_block *nfb,
270 unsigned long action, void *hcpu)
272 unsigned int cpu = (unsigned long)hcpu;
274 if (action == CPU_DEAD) {
275 /* Unregister evtchn. */
276 if (per_cpu(cpep_irq, cpu) >= 0) {
277 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
278 per_cpu(cpep_irq, cpu) = -1;
280 if (per_cpu(cmcp_irq, cpu) >= 0) {
281 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
282 per_cpu(cmcp_irq, cpu) = -1;
284 if (per_cpu(cmc_irq, cpu) >= 0) {
285 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
286 per_cpu(cmc_irq, cpu) = -1;
288 if (per_cpu(ipi_irq, cpu) >= 0) {
289 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
290 per_cpu(ipi_irq, cpu) = -1;
292 if (per_cpu(resched_irq, cpu) >= 0) {
293 unbind_from_irqhandler(per_cpu(resched_irq, cpu),
294 NULL);
295 per_cpu(resched_irq, cpu) = -1;
297 if (per_cpu(timer_irq, cpu) >= 0) {
298 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
299 per_cpu(timer_irq, cpu) = -1;
302 return NOTIFY_OK;
305 static struct notifier_block unbind_evtchn_notifier = {
306 .notifier_call = unbind_evtchn_callback,
307 .priority = 0
309 #endif
311 void xen_smp_intr_init_early(unsigned int cpu)
313 #ifdef CONFIG_SMP
314 unsigned int i;
316 for (i = 0; i < saved_irq_cnt; i++)
317 __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
318 saved_percpu_irqs[i].action, 0);
319 #endif
322 void xen_smp_intr_init(void)
324 #ifdef CONFIG_SMP
325 unsigned int cpu = smp_processor_id();
326 struct callback_register event = {
327 .type = CALLBACKTYPE_event,
328 .address = { .ip = (unsigned long)&xen_event_callback },
331 if (cpu == 0) {
332 /* Initialization was already done for boot cpu. */
333 #ifdef CONFIG_HOTPLUG_CPU
334 /* Register the notifier only once. */
335 register_cpu_notifier(&unbind_evtchn_notifier);
336 #endif
337 return;
340 /* This should be piggyback when setup vcpu guest context */
341 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
342 #endif /* CONFIG_SMP */
345 void __init
346 xen_irq_init(void)
348 struct callback_register event = {
349 .type = CALLBACKTYPE_event,
350 .address = { .ip = (unsigned long)&xen_event_callback },
353 xen_init_IRQ();
354 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
355 late_time_init = xen_bind_early_percpu_irq;
358 void
359 xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
361 #ifdef CONFIG_SMP
362 /* TODO: we need to call vcpu_up here */
363 if (unlikely(vector == ap_wakeup_vector)) {
364 /* XXX
365 * This should be in __cpu_up(cpu) in ia64 smpboot.c
366 * like x86. But don't want to modify it,
367 * keep it untouched.
369 xen_smp_intr_init_early(cpu);
371 xen_send_ipi(cpu, vector);
372 /* vcpu_prepare_and_up(cpu); */
373 return;
375 #endif
377 switch (vector) {
378 case IA64_IPI_VECTOR:
379 xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
380 break;
381 case IA64_IPI_RESCHEDULE:
382 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
383 break;
384 case IA64_CMCP_VECTOR:
385 xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
386 break;
387 case IA64_CPEP_VECTOR:
388 xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
389 break;
390 default:
391 printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
392 vector);
393 notify_remote_via_irq(0); /* defaults to 0 irq */
394 break;
398 static void __init
399 xen_register_ipi(void)
401 #ifdef CONFIG_SMP
402 register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
403 register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
404 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
405 #endif
408 static void
409 xen_resend_irq(unsigned int vector)
411 (void)resend_irq_on_evtchn(vector);
414 const struct pv_irq_ops xen_irq_ops __initdata = {
415 .register_ipi = xen_register_ipi,
417 .assign_irq_vector = xen_assign_irq_vector,
418 .free_irq_vector = xen_free_irq_vector,
419 .register_percpu_irq = xen_register_percpu_irq,
421 .resend_irq = xen_resend_irq,