1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
19 unsigned int __softirq_pending
;
20 unsigned int ipi_irqs
[NR_IPI
];
21 } ____cacheline_aligned irq_cpustat_t
;
23 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
25 #define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
26 #define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
28 u64
smp_irq_stat_cpu(unsigned int cpu
);
29 #define arch_irq_stat_cpu smp_irq_stat_cpu
31 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
37 DECLARE_PER_CPU(struct nmi_ctx
, nmi_contexts
);
39 #define arch_nmi_enter() \
41 if (is_kernel_in_hyp_mode()) { \
42 struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
43 nmi_ctx->hcr = read_sysreg(hcr_el2); \
44 if (!(nmi_ctx->hcr & HCR_TGE)) { \
45 write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
51 #define arch_nmi_exit() \
53 if (is_kernel_in_hyp_mode()) { \
54 struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
55 if (!(nmi_ctx->hcr & HCR_TGE)) \
56 write_sysreg(nmi_ctx->hcr, hcr_el2); \
60 static inline void ack_bad_irq(unsigned int irq
)
62 extern unsigned long irq_err_count
;
66 #endif /* __ASM_HARDIRQ_H */