2 * linux/arch/ia64/kernel/irq_ia64.c
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
17 #include <linux/module.h>
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/ptrace.h>
26 #include <linux/signal.h>
27 #include <linux/smp.h>
28 #include <linux/threads.h>
29 #include <linux/bitops.h>
30 #include <linux/irq.h>
31 #include <linux/ratelimit.h>
32 #include <linux/acpi.h>
33 #include <linux/sched.h>
35 #include <asm/delay.h>
36 #include <asm/intrinsics.h>
38 #include <asm/hw_irq.h>
39 #include <asm/machvec.h>
40 #include <asm/pgtable.h>
41 #include <asm/tlbflush.h>
44 # include <asm/perfmon.h>
49 #define IRQ_VECTOR_UNASSIGNED (0)
51 #define IRQ_UNUSED (0)
55 /* These can be overridden in platform_irq_init */
56 int ia64_first_device_vector
= IA64_DEF_FIRST_DEVICE_VECTOR
;
57 int ia64_last_device_vector
= IA64_DEF_LAST_DEVICE_VECTOR
;
59 /* default base addr of IPI table */
60 void __iomem
*ipi_base_addr
= ((void __iomem
*)
61 (__IA64_UNCACHED_OFFSET
| IA64_IPI_DEFAULT_BASE_ADDR
));
63 static cpumask_t
vector_allocation_domain(int cpu
);
66 * Legacy IRQ to IA-64 vector translation table.
68 __u8 isa_irq_to_vector_map
[16] = {
69 /* 8259 IRQ translation, first 16 entries */
70 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
71 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
73 EXPORT_SYMBOL(isa_irq_to_vector_map
);
75 DEFINE_SPINLOCK(vector_lock
);
77 struct irq_cfg irq_cfg
[NR_IRQS
] __read_mostly
= {
78 [0 ... NR_IRQS
- 1] = {
79 .vector
= IRQ_VECTOR_UNASSIGNED
,
80 .domain
= CPU_MASK_NONE
84 DEFINE_PER_CPU(int[IA64_NUM_VECTORS
], vector_irq
) = {
85 [0 ... IA64_NUM_VECTORS
- 1] = -1
88 static cpumask_t vector_table
[IA64_NUM_VECTORS
] = {
89 [0 ... IA64_NUM_VECTORS
- 1] = CPU_MASK_NONE
92 static int irq_status
[NR_IRQS
] = {
93 [0 ... NR_IRQS
-1] = IRQ_UNUSED
96 static inline int find_unassigned_irq(void)
100 for (irq
= IA64_FIRST_DEVICE_VECTOR
; irq
< NR_IRQS
; irq
++)
101 if (irq_status
[irq
] == IRQ_UNUSED
)
106 static inline int find_unassigned_vector(cpumask_t domain
)
111 cpumask_and(&mask
, &domain
, cpu_online_mask
);
112 if (cpumask_empty(&mask
))
115 for (pos
= 0; pos
< IA64_NUM_DEVICE_VECTORS
; pos
++) {
116 vector
= IA64_FIRST_DEVICE_VECTOR
+ pos
;
117 cpumask_and(&mask
, &domain
, &vector_table
[vector
]);
118 if (!cpumask_empty(&mask
))
125 static int __bind_irq_vector(int irq
, int vector
, cpumask_t domain
)
129 struct irq_cfg
*cfg
= &irq_cfg
[irq
];
131 BUG_ON((unsigned)irq
>= NR_IRQS
);
132 BUG_ON((unsigned)vector
>= IA64_NUM_VECTORS
);
134 cpumask_and(&mask
, &domain
, cpu_online_mask
);
135 if (cpumask_empty(&mask
))
137 if ((cfg
->vector
== vector
) && cpumask_equal(&cfg
->domain
, &domain
))
139 if (cfg
->vector
!= IRQ_VECTOR_UNASSIGNED
)
141 for_each_cpu(cpu
, &mask
)
142 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
143 cfg
->vector
= vector
;
144 cfg
->domain
= domain
;
145 irq_status
[irq
] = IRQ_USED
;
146 cpumask_or(&vector_table
[vector
], &vector_table
[vector
], &domain
);
150 int bind_irq_vector(int irq
, int vector
, cpumask_t domain
)
155 spin_lock_irqsave(&vector_lock
, flags
);
156 ret
= __bind_irq_vector(irq
, vector
, domain
);
157 spin_unlock_irqrestore(&vector_lock
, flags
);
161 static void __clear_irq_vector(int irq
)
165 struct irq_cfg
*cfg
= &irq_cfg
[irq
];
167 BUG_ON((unsigned)irq
>= NR_IRQS
);
168 BUG_ON(cfg
->vector
== IRQ_VECTOR_UNASSIGNED
);
169 vector
= cfg
->vector
;
170 domain
= cfg
->domain
;
171 for_each_cpu_and(cpu
, &cfg
->domain
, cpu_online_mask
)
172 per_cpu(vector_irq
, cpu
)[vector
] = -1;
173 cfg
->vector
= IRQ_VECTOR_UNASSIGNED
;
174 cfg
->domain
= CPU_MASK_NONE
;
175 irq_status
[irq
] = IRQ_UNUSED
;
176 cpumask_andnot(&vector_table
[vector
], &vector_table
[vector
], &domain
);
179 static void clear_irq_vector(int irq
)
183 spin_lock_irqsave(&vector_lock
, flags
);
184 __clear_irq_vector(irq
);
185 spin_unlock_irqrestore(&vector_lock
, flags
);
189 ia64_native_assign_irq_vector (int irq
)
193 cpumask_t domain
= CPU_MASK_NONE
;
197 spin_lock_irqsave(&vector_lock
, flags
);
198 for_each_online_cpu(cpu
) {
199 domain
= vector_allocation_domain(cpu
);
200 vector
= find_unassigned_vector(domain
);
206 if (irq
== AUTO_ASSIGN
)
208 BUG_ON(__bind_irq_vector(irq
, vector
, domain
));
210 spin_unlock_irqrestore(&vector_lock
, flags
);
215 ia64_native_free_irq_vector (int vector
)
217 if (vector
< IA64_FIRST_DEVICE_VECTOR
||
218 vector
> IA64_LAST_DEVICE_VECTOR
)
220 clear_irq_vector(vector
);
224 reserve_irq_vector (int vector
)
226 if (vector
< IA64_FIRST_DEVICE_VECTOR
||
227 vector
> IA64_LAST_DEVICE_VECTOR
)
229 return !!bind_irq_vector(vector
, vector
, CPU_MASK_ALL
);
233 * Initialize vector_irq on a new cpu. This function must be called
234 * with vector_lock held.
236 void __setup_vector_irq(int cpu
)
240 /* Clear vector_irq */
241 for (vector
= 0; vector
< IA64_NUM_VECTORS
; ++vector
)
242 per_cpu(vector_irq
, cpu
)[vector
] = -1;
243 /* Mark the inuse vectors */
244 for (irq
= 0; irq
< NR_IRQS
; ++irq
) {
245 if (!cpumask_test_cpu(cpu
, &irq_cfg
[irq
].domain
))
247 vector
= irq_to_vector(irq
);
248 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
252 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
254 static enum vector_domain_type
{
257 } vector_domain_type
= VECTOR_DOMAIN_NONE
;
259 static cpumask_t
vector_allocation_domain(int cpu
)
261 if (vector_domain_type
== VECTOR_DOMAIN_PERCPU
)
262 return *cpumask_of(cpu
);
266 static int __irq_prepare_move(int irq
, int cpu
)
268 struct irq_cfg
*cfg
= &irq_cfg
[irq
];
272 if (cfg
->move_in_progress
|| cfg
->move_cleanup_count
)
274 if (cfg
->vector
== IRQ_VECTOR_UNASSIGNED
|| !cpu_online(cpu
))
276 if (cpumask_test_cpu(cpu
, &cfg
->domain
))
278 domain
= vector_allocation_domain(cpu
);
279 vector
= find_unassigned_vector(domain
);
282 cfg
->move_in_progress
= 1;
283 cfg
->old_domain
= cfg
->domain
;
284 cfg
->vector
= IRQ_VECTOR_UNASSIGNED
;
285 cfg
->domain
= CPU_MASK_NONE
;
286 BUG_ON(__bind_irq_vector(irq
, vector
, domain
));
290 int irq_prepare_move(int irq
, int cpu
)
295 spin_lock_irqsave(&vector_lock
, flags
);
296 ret
= __irq_prepare_move(irq
, cpu
);
297 spin_unlock_irqrestore(&vector_lock
, flags
);
301 void irq_complete_move(unsigned irq
)
303 struct irq_cfg
*cfg
= &irq_cfg
[irq
];
304 cpumask_t cleanup_mask
;
307 if (likely(!cfg
->move_in_progress
))
310 if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg
->old_domain
)))
313 cpumask_and(&cleanup_mask
, &cfg
->old_domain
, cpu_online_mask
);
314 cfg
->move_cleanup_count
= cpumask_weight(&cleanup_mask
);
315 for_each_cpu(i
, &cleanup_mask
)
316 platform_send_ipi(i
, IA64_IRQ_MOVE_VECTOR
, IA64_IPI_DM_INT
, 0);
317 cfg
->move_in_progress
= 0;
320 static irqreturn_t
smp_irq_move_cleanup_interrupt(int irq
, void *dev_id
)
322 int me
= smp_processor_id();
326 for (vector
= IA64_FIRST_DEVICE_VECTOR
;
327 vector
< IA64_LAST_DEVICE_VECTOR
; vector
++) {
329 struct irq_desc
*desc
;
331 irq
= __this_cpu_read(vector_irq
[vector
]);
335 desc
= irq_to_desc(irq
);
337 raw_spin_lock(&desc
->lock
);
338 if (!cfg
->move_cleanup_count
)
341 if (!cpumask_test_cpu(me
, &cfg
->old_domain
))
344 spin_lock_irqsave(&vector_lock
, flags
);
345 __this_cpu_write(vector_irq
[vector
], -1);
346 cpumask_clear_cpu(me
, &vector_table
[vector
]);
347 spin_unlock_irqrestore(&vector_lock
, flags
);
348 cfg
->move_cleanup_count
--;
350 raw_spin_unlock(&desc
->lock
);
355 static struct irqaction irq_move_irqaction
= {
356 .handler
= smp_irq_move_cleanup_interrupt
,
360 static int __init
parse_vector_domain(char *arg
)
364 if (!strcmp(arg
, "percpu")) {
365 vector_domain_type
= VECTOR_DOMAIN_PERCPU
;
370 early_param("vector", parse_vector_domain
);
372 static cpumask_t
vector_allocation_domain(int cpu
)
379 void destroy_and_reserve_irq(unsigned int irq
)
384 spin_lock_irqsave(&vector_lock
, flags
);
385 __clear_irq_vector(irq
);
386 irq_status
[irq
] = IRQ_RSVD
;
387 spin_unlock_irqrestore(&vector_lock
, flags
);
391 * Dynamic irq allocate and deallocation for MSI
396 int irq
, vector
, cpu
;
397 cpumask_t domain
= CPU_MASK_NONE
;
399 irq
= vector
= -ENOSPC
;
400 spin_lock_irqsave(&vector_lock
, flags
);
401 for_each_online_cpu(cpu
) {
402 domain
= vector_allocation_domain(cpu
);
403 vector
= find_unassigned_vector(domain
);
409 irq
= find_unassigned_irq();
412 BUG_ON(__bind_irq_vector(irq
, vector
, domain
));
414 spin_unlock_irqrestore(&vector_lock
, flags
);
420 void destroy_irq(unsigned int irq
)
423 clear_irq_vector(irq
);
427 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
428 # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
430 # define IS_RESCHEDULE(vec) (0)
431 # define IS_LOCAL_TLB_FLUSH(vec) (0)
434 * That's where the IVT branches when we get an external
435 * interrupt. This branches to the correct hardware IRQ handler via
439 ia64_handle_irq (ia64_vector vector
, struct pt_regs
*regs
)
441 struct pt_regs
*old_regs
= set_irq_regs(regs
);
442 unsigned long saved_tpr
;
446 unsigned long bsp
, sp
;
449 * Note: if the interrupt happened while executing in
450 * the context switch routine (ia64_switch_to), we may
451 * get a spurious stack overflow here. This is
452 * because the register and the memory stack are not
453 * switched atomically.
455 bsp
= ia64_getreg(_IA64_REG_AR_BSP
);
456 sp
= ia64_getreg(_IA64_REG_SP
);
458 if ((sp
- bsp
) < 1024) {
459 static DEFINE_RATELIMIT_STATE(ratelimit
, 5 * HZ
, 5);
461 if (__ratelimit(&ratelimit
)) {
462 printk("ia64_handle_irq: DANGER: less than "
463 "1KB of free stack space!!\n"
464 "(bsp=0x%lx, sp=%lx)\n", bsp
, sp
);
468 #endif /* IRQ_DEBUG */
471 * Always set TPR to limit maximum interrupt nesting depth to
472 * 16 (without this, it would be ~240, which could easily lead
473 * to kernel stack overflows).
476 saved_tpr
= ia64_getreg(_IA64_REG_CR_TPR
);
478 while (vector
!= IA64_SPURIOUS_INT_VECTOR
) {
479 int irq
= local_vector_to_irq(vector
);
481 if (unlikely(IS_LOCAL_TLB_FLUSH(vector
))) {
482 smp_local_flush_tlb();
483 kstat_incr_irq_this_cpu(irq
);
484 } else if (unlikely(IS_RESCHEDULE(vector
))) {
486 kstat_incr_irq_this_cpu(irq
);
488 ia64_setreg(_IA64_REG_CR_TPR
, vector
);
491 if (unlikely(irq
< 0)) {
492 printk(KERN_ERR
"%s: Unexpected interrupt "
493 "vector %d on CPU %d is not mapped "
494 "to any IRQ!\n", __func__
, vector
,
497 generic_handle_irq(irq
);
500 * Disable interrupts and send EOI:
503 ia64_setreg(_IA64_REG_CR_TPR
, saved_tpr
);
506 vector
= ia64_get_ivr();
509 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
510 * handler needs to be able to wait for further keyboard interrupts, which can't
511 * come through until ia64_eoi() has been done.
514 set_irq_regs(old_regs
);
517 #ifdef CONFIG_HOTPLUG_CPU
519 * This function emulates a interrupt processing when a cpu is about to be
522 void ia64_process_pending_intr(void)
525 unsigned long saved_tpr
;
526 extern unsigned int vectors_in_migration
[NR_IRQS
];
528 vector
= ia64_get_ivr();
531 saved_tpr
= ia64_getreg(_IA64_REG_CR_TPR
);
535 * Perform normal interrupt style processing
537 while (vector
!= IA64_SPURIOUS_INT_VECTOR
) {
538 int irq
= local_vector_to_irq(vector
);
540 if (unlikely(IS_LOCAL_TLB_FLUSH(vector
))) {
541 smp_local_flush_tlb();
542 kstat_incr_irq_this_cpu(irq
);
543 } else if (unlikely(IS_RESCHEDULE(vector
))) {
544 kstat_incr_irq_this_cpu(irq
);
546 struct pt_regs
*old_regs
= set_irq_regs(NULL
);
548 ia64_setreg(_IA64_REG_CR_TPR
, vector
);
552 * Now try calling normal ia64_handle_irq as it would have got called
553 * from a real intr handler. Try passing null for pt_regs, hopefully
554 * it will work. I hope it works!.
555 * Probably could shared code.
557 if (unlikely(irq
< 0)) {
558 printk(KERN_ERR
"%s: Unexpected interrupt "
559 "vector %d on CPU %d not being mapped "
560 "to any IRQ!!\n", __func__
, vector
,
563 vectors_in_migration
[irq
]=0;
564 generic_handle_irq(irq
);
566 set_irq_regs(old_regs
);
569 * Disable interrupts and send EOI
572 ia64_setreg(_IA64_REG_CR_TPR
, saved_tpr
);
575 vector
= ia64_get_ivr();
584 static irqreturn_t
dummy_handler (int irq
, void *dev_id
)
589 static struct irqaction ipi_irqaction
= {
590 .handler
= handle_IPI
,
595 * KVM uses this interrupt to force a cpu out of guest mode
597 static struct irqaction resched_irqaction
= {
598 .handler
= dummy_handler
,
602 static struct irqaction tlb_irqaction
= {
603 .handler
= dummy_handler
,
610 ia64_native_register_percpu_irq (ia64_vector vec
, struct irqaction
*action
)
615 BUG_ON(bind_irq_vector(irq
, vec
, CPU_MASK_ALL
));
616 irq_set_status_flags(irq
, IRQ_PER_CPU
);
617 irq_set_chip(irq
, &irq_type_ia64_lsapic
);
619 setup_irq(irq
, action
);
620 irq_set_handler(irq
, handle_percpu_irq
);
624 ia64_native_register_ipi(void)
627 register_percpu_irq(IA64_IPI_VECTOR
, &ipi_irqaction
);
628 register_percpu_irq(IA64_IPI_RESCHEDULE
, &resched_irqaction
);
629 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH
, &tlb_irqaction
);
640 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR
, NULL
);
642 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
643 if (vector_domain_type
!= VECTOR_DOMAIN_NONE
)
644 register_percpu_irq(IA64_IRQ_MOVE_VECTOR
, &irq_move_irqaction
);
647 #ifdef CONFIG_PERFMON
654 ia64_send_ipi (int cpu
, int vector
, int delivery_mode
, int redirect
)
656 void __iomem
*ipi_addr
;
657 unsigned long ipi_data
;
658 unsigned long phys_cpu_id
;
660 phys_cpu_id
= cpu_physical_id(cpu
);
663 * cpu number is in 8bit ID and 8bit EID
666 ipi_data
= (delivery_mode
<< 8) | (vector
& 0xff);
667 ipi_addr
= ipi_base_addr
+ ((phys_cpu_id
<< 4) | ((redirect
& 1) << 3));
669 writeq(ipi_data
, ipi_addr
);