Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2012 ARM Ltd.
0004  */
0005 #ifndef __ASM_IRQFLAGS_H
0006 #define __ASM_IRQFLAGS_H
0007 
0008 #include <asm/alternative.h>
0009 #include <asm/barrier.h>
0010 #include <asm/ptrace.h>
0011 #include <asm/sysreg.h>
0012 
0013 /*
0014  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
0015  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
0016  * order:
0017  * Masking debug exceptions causes all other exceptions to be masked too/
0018  * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
0019  * always masked and unmasked together, and have no side effects for other
0020  * flags. Keeping to this order makes it easier for entry.S to know which
0021  * exceptions should be unmasked.
0022  */
0023 
0024 /*
0025  * CPU interrupt mask handling.
0026  */
0027 static inline void arch_local_irq_enable(void)
0028 {
0029     if (system_has_prio_mask_debugging()) {
0030         u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
0031 
0032         WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
0033     }
0034 
0035     asm volatile(ALTERNATIVE(
0036         "msr    daifclr, #3     // arch_local_irq_enable",
0037         __msr_s(SYS_ICC_PMR_EL1, "%0"),
0038         ARM64_HAS_IRQ_PRIO_MASKING)
0039         :
0040         : "r" ((unsigned long) GIC_PRIO_IRQON)
0041         : "memory");
0042 
0043     pmr_sync();
0044 }
0045 
0046 static inline void arch_local_irq_disable(void)
0047 {
0048     if (system_has_prio_mask_debugging()) {
0049         u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
0050 
0051         WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
0052     }
0053 
0054     asm volatile(ALTERNATIVE(
0055         "msr    daifset, #3     // arch_local_irq_disable",
0056         __msr_s(SYS_ICC_PMR_EL1, "%0"),
0057         ARM64_HAS_IRQ_PRIO_MASKING)
0058         :
0059         : "r" ((unsigned long) GIC_PRIO_IRQOFF)
0060         : "memory");
0061 }
0062 
0063 /*
0064  * Save the current interrupt enable state.
0065  */
0066 static inline unsigned long arch_local_save_flags(void)
0067 {
0068     unsigned long flags;
0069 
0070     asm volatile(ALTERNATIVE(
0071         "mrs    %0, daif",
0072         __mrs_s("%0", SYS_ICC_PMR_EL1),
0073         ARM64_HAS_IRQ_PRIO_MASKING)
0074         : "=&r" (flags)
0075         :
0076         : "memory");
0077 
0078     return flags;
0079 }
0080 
0081 static inline int arch_irqs_disabled_flags(unsigned long flags)
0082 {
0083     int res;
0084 
0085     asm volatile(ALTERNATIVE(
0086         "and    %w0, %w1, #" __stringify(PSR_I_BIT),
0087         "eor    %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
0088         ARM64_HAS_IRQ_PRIO_MASKING)
0089         : "=&r" (res)
0090         : "r" ((int) flags)
0091         : "memory");
0092 
0093     return res;
0094 }
0095 
0096 static inline int arch_irqs_disabled(void)
0097 {
0098     return arch_irqs_disabled_flags(arch_local_save_flags());
0099 }
0100 
0101 static inline unsigned long arch_local_irq_save(void)
0102 {
0103     unsigned long flags;
0104 
0105     flags = arch_local_save_flags();
0106 
0107     /*
0108      * There are too many states with IRQs disabled, just keep the current
0109      * state if interrupts are already disabled/masked.
0110      */
0111     if (!arch_irqs_disabled_flags(flags))
0112         arch_local_irq_disable();
0113 
0114     return flags;
0115 }
0116 
0117 /*
0118  * restore saved IRQ state
0119  */
0120 static inline void arch_local_irq_restore(unsigned long flags)
0121 {
0122     asm volatile(ALTERNATIVE(
0123         "msr    daif, %0",
0124         __msr_s(SYS_ICC_PMR_EL1, "%0"),
0125         ARM64_HAS_IRQ_PRIO_MASKING)
0126         :
0127         : "r" (flags)
0128         : "memory");
0129 
0130     pmr_sync();
0131 }
0132 
0133 #endif /* __ASM_IRQFLAGS_H */