1 #ifndef _ASM_X86_HARDIRQ_H
2 #define _ASM_X86_HARDIRQ_H
4 #include <linux/threads.h>
8 unsigned int __softirq_pending
;
9 unsigned int __nmi_count
; /* arch dependent */
10 #ifdef CONFIG_X86_LOCAL_APIC
11 unsigned int apic_timer_irqs
; /* arch dependent */
12 unsigned int irq_spurious_count
;
13 unsigned int icr_read_retry_count
;
15 #ifdef CONFIG_HAVE_KVM
16 unsigned int kvm_posted_intr_ipis
;
18 unsigned int x86_platform_ipis
; /* arch dependent */
19 unsigned int apic_perf_irqs
;
20 unsigned int apic_irq_work_irqs
;
22 unsigned int irq_resched_count
;
23 unsigned int irq_call_count
;
25 * irq_tlb_count is double-counted in irq_call_count, so it must be
26 * subtracted from irq_call_count when displaying irq_call_count
28 unsigned int irq_tlb_count
;
30 #ifdef CONFIG_X86_THERMAL_VECTOR
31 unsigned int irq_thermal_count
;
33 #ifdef CONFIG_X86_MCE_THRESHOLD
34 unsigned int irq_threshold_count
;
36 } ____cacheline_aligned irq_cpustat_t
;
38 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
40 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
41 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
43 #define __ARCH_IRQ_STAT
45 #define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
47 #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
49 #define __ARCH_SET_SOFTIRQ_PENDING
51 #define set_softirq_pending(x) \
52 this_cpu_write(irq_stat.__softirq_pending, (x))
53 #define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
55 extern void ack_bad_irq(unsigned int irq
);
57 extern u64
arch_irq_stat_cpu(unsigned int cpu
);
58 #define arch_irq_stat_cpu arch_irq_stat_cpu
60 extern u64
arch_irq_stat(void);
61 #define arch_irq_stat arch_irq_stat
63 #endif /* _ASM_X86_HARDIRQ_H */