OMAP: Add new function to check wether there is irq pending
[linux-ginger.git] / arch / x86 / include / asm / hardirq.h
blob37555e52f980ac170e779dbe2aaa1e5e0c8ca07a
1 #ifndef _ASM_X86_HARDIRQ_H
2 #define _ASM_X86_HARDIRQ_H
4 #include <linux/threads.h>
5 #include <linux/irq.h>
7 typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
11 #ifdef CONFIG_X86_LOCAL_APIC
12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count;
14 #endif
15 unsigned int generic_irqs; /* arch dependent */
16 #ifdef CONFIG_SMP
17 unsigned int irq_resched_count;
18 unsigned int irq_call_count;
19 unsigned int irq_tlb_count;
20 #endif
21 #ifdef CONFIG_X86_MCE
22 unsigned int irq_thermal_count;
23 # ifdef CONFIG_X86_64
24 unsigned int irq_threshold_count;
25 # endif
26 #endif
27 } ____cacheline_aligned irq_cpustat_t;
29 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
31 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
32 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
34 #define __ARCH_IRQ_STAT
36 #define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
38 #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
40 #define __ARCH_SET_SOFTIRQ_PENDING
42 #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
43 #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
45 extern void ack_bad_irq(unsigned int irq);
47 extern u64 arch_irq_stat_cpu(unsigned int cpu);
48 #define arch_irq_stat_cpu arch_irq_stat_cpu
50 extern u64 arch_irq_stat(void);
51 #define arch_irq_stat arch_irq_stat
53 #endif /* _ASM_X86_HARDIRQ_H */