1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
5 #ifndef __ASM_HARDIRQ_H
6 #define __ASM_HARDIRQ_H
8 #include <linux/cache.h>
9 #include <linux/percpu.h>
10 #include <linux/threads.h>
11 #include <asm/barrier.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/sysreg.h>
16 #define ack_bad_irq ack_bad_irq
17 #include <asm-generic/hardirq.h>
19 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
26 DECLARE_PER_CPU(struct nmi_ctx
, nmi_contexts
);
28 #define arch_nmi_enter() \
30 struct nmi_ctx *___ctx; \
33 if (!is_kernel_in_hyp_mode()) \
36 ___ctx = this_cpu_ptr(&nmi_contexts); \
42 ___hcr = read_sysreg(hcr_el2); \
43 if (!(___hcr & HCR_TGE)) { \
44 write_sysreg(___hcr | HCR_TGE, hcr_el2); \
48 * Make sure the sysreg write is performed before ___ctx->cnt \
49 * is set to 1. NMIs that see cnt == 1 will rely on us. \
54 * Make sure ___ctx->cnt is set before we save ___hcr. We \
55 * don't want ___ctx->hcr to be overwritten. \
58 ___ctx->hcr = ___hcr; \
61 #define arch_nmi_exit() \
63 struct nmi_ctx *___ctx; \
66 if (!is_kernel_in_hyp_mode()) \
69 ___ctx = this_cpu_ptr(&nmi_contexts); \
70 ___hcr = ___ctx->hcr; \
72 * Make sure we read ___ctx->hcr before we release \
73 * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
78 * Make sure ___ctx->cnt release is visible before we \
79 * restore the sysreg. Otherwise a new NMI occurring \
80 * right after write_sysreg() can be fooled and think \
81 * we secured things for it. \
84 if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
85 write_sysreg(___hcr, hcr_el2); \
88 static inline void ack_bad_irq(unsigned int irq
)
90 extern unsigned long irq_err_count
;
94 #endif /* __ASM_HARDIRQ_H */