target: Drop incorrect se_lun_acl release for dynamic -> explict ACL conversion
[zen-stable.git] / arch / x86 / include / asm / hardirq.h
blobda0b3ca815b7a302986f8eb24262a4ffadefaf63
1 #ifndef _ASM_X86_HARDIRQ_H
2 #define _ASM_X86_HARDIRQ_H
4 #include <linux/threads.h>
5 #include <linux/irq.h>
7 typedef struct {
8 unsigned int __softirq_pending;
9 unsigned int __nmi_count; /* arch dependent */
10 unsigned int irq0_irqs;
11 #ifdef CONFIG_X86_LOCAL_APIC
12 unsigned int apic_timer_irqs; /* arch dependent */
13 unsigned int irq_spurious_count;
14 unsigned int icr_read_retry_count;
15 #endif
16 unsigned int x86_platform_ipis; /* arch dependent */
17 unsigned int apic_perf_irqs;
18 unsigned int apic_irq_work_irqs;
19 #ifdef CONFIG_SMP
20 unsigned int irq_resched_count;
21 unsigned int irq_call_count;
22 unsigned int irq_tlb_count;
23 #endif
24 #ifdef CONFIG_X86_THERMAL_VECTOR
25 unsigned int irq_thermal_count;
26 #endif
27 #ifdef CONFIG_X86_MCE_THRESHOLD
28 unsigned int irq_threshold_count;
29 #endif
30 } ____cacheline_aligned irq_cpustat_t;
32 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
34 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */
35 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
37 #define __ARCH_IRQ_STAT
39 #define inc_irq_stat(member) percpu_inc(irq_stat.member)
41 #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
43 #define __ARCH_SET_SOFTIRQ_PENDING
45 #define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
46 #define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
48 extern void ack_bad_irq(unsigned int irq);
50 extern u64 arch_irq_stat_cpu(unsigned int cpu);
51 #define arch_irq_stat_cpu arch_irq_stat_cpu
53 extern u64 arch_irq_stat(void);
54 #define arch_irq_stat arch_irq_stat
56 #endif /* _ASM_X86_HARDIRQ_H */