0001
0002
0003
0004
0005 #ifndef __ASM_HARDIRQ_H
0006 #define __ASM_HARDIRQ_H
0007
0008 #include <linux/cache.h>
0009 #include <linux/percpu.h>
0010 #include <linux/threads.h>
0011 #include <asm/barrier.h>
0012 #include <asm/irq.h>
0013 #include <asm/kvm_arm.h>
0014 #include <asm/sysreg.h>
0015
0016 #define ack_bad_irq ack_bad_irq
0017 #include <asm-generic/hardirq.h>
0018
0019 #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
0020
0021 struct nmi_ctx {
0022 u64 hcr;
0023 unsigned int cnt;
0024 };
0025
0026 DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
0027
0028 #define arch_nmi_enter() \
0029 do { \
0030 struct nmi_ctx *___ctx; \
0031 u64 ___hcr; \
0032 \
0033 if (!is_kernel_in_hyp_mode()) \
0034 break; \
0035 \
0036 ___ctx = this_cpu_ptr(&nmi_contexts); \
0037 if (___ctx->cnt) { \
0038 ___ctx->cnt++; \
0039 break; \
0040 } \
0041 \
0042 ___hcr = read_sysreg(hcr_el2); \
0043 if (!(___hcr & HCR_TGE)) { \
0044 write_sysreg(___hcr | HCR_TGE, hcr_el2); \
0045 isb(); \
0046 } \
0047
0048
0049
0050 \
0051 barrier(); \
0052 ___ctx->cnt = 1; \
0053
0054
0055
0056 \
0057 barrier(); \
0058 ___ctx->hcr = ___hcr; \
0059 } while (0)
0060
0061 #define arch_nmi_exit() \
0062 do { \
0063 struct nmi_ctx *___ctx; \
0064 u64 ___hcr; \
0065 \
0066 if (!is_kernel_in_hyp_mode()) \
0067 break; \
0068 \
0069 ___ctx = this_cpu_ptr(&nmi_contexts); \
0070 ___hcr = ___ctx->hcr; \
0071
0072
0073
0074 \
0075 barrier(); \
0076 ___ctx->cnt--; \
0077
0078
0079
0080
0081
0082 \
0083 barrier(); \
0084 if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
0085 write_sysreg(___hcr, hcr_el2); \
0086 } while (0)
0087
0088 static inline void ack_bad_irq(unsigned int irq)
0089 {
0090 extern unsigned long irq_err_count;
0091 irq_err_count++;
0092 }
0093
0094 #endif