Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _X86_IRQFLAGS_H_
0003 #define _X86_IRQFLAGS_H_
0004 
0005 #include <asm/processor-flags.h>
0006 
0007 #ifndef __ASSEMBLY__
0008 
0009 #include <asm/nospec-branch.h>
0010 
0011 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */
0012 #define __cpuidle __section(".cpuidle.text")
0013 
0014 /*
0015  * Interrupt control:
0016  */
0017 
0018 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
0019 extern inline unsigned long native_save_fl(void);
0020 extern __always_inline unsigned long native_save_fl(void)
0021 {
0022     unsigned long flags;
0023 
0024     /*
0025      * "=rm" is safe here, because "pop" adjusts the stack before
0026      * it evaluates its effective address -- this is part of the
0027      * documented behavior of the "pop" instruction.
0028      */
0029     asm volatile("# __raw_save_flags\n\t"
0030              "pushf ; pop %0"
0031              : "=rm" (flags)
0032              : /* no input */
0033              : "memory");
0034 
0035     return flags;
0036 }
0037 
0038 static __always_inline void native_irq_disable(void)
0039 {
0040     asm volatile("cli": : :"memory");
0041 }
0042 
0043 static __always_inline void native_irq_enable(void)
0044 {
0045     asm volatile("sti": : :"memory");
0046 }
0047 
0048 static inline __cpuidle void native_safe_halt(void)
0049 {
0050     mds_idle_clear_cpu_buffers();
0051     asm volatile("sti; hlt": : :"memory");
0052 }
0053 
0054 static inline __cpuidle void native_halt(void)
0055 {
0056     mds_idle_clear_cpu_buffers();
0057     asm volatile("hlt": : :"memory");
0058 }
0059 
0060 #endif
0061 
0062 #ifdef CONFIG_PARAVIRT_XXL
0063 #include <asm/paravirt.h>
0064 #else
0065 #ifndef __ASSEMBLY__
0066 #include <linux/types.h>
0067 
0068 static __always_inline unsigned long arch_local_save_flags(void)
0069 {
0070     return native_save_fl();
0071 }
0072 
0073 static __always_inline void arch_local_irq_disable(void)
0074 {
0075     native_irq_disable();
0076 }
0077 
0078 static __always_inline void arch_local_irq_enable(void)
0079 {
0080     native_irq_enable();
0081 }
0082 
0083 /*
0084  * Used in the idle loop; sti takes one instruction cycle
0085  * to complete:
0086  */
0087 static inline __cpuidle void arch_safe_halt(void)
0088 {
0089     native_safe_halt();
0090 }
0091 
0092 /*
0093  * Used when interrupts are already enabled or to
0094  * shutdown the processor:
0095  */
0096 static inline __cpuidle void halt(void)
0097 {
0098     native_halt();
0099 }
0100 
0101 /*
0102  * For spinlocks, etc:
0103  */
0104 static __always_inline unsigned long arch_local_irq_save(void)
0105 {
0106     unsigned long flags = arch_local_save_flags();
0107     arch_local_irq_disable();
0108     return flags;
0109 }
0110 #else
0111 
0112 #ifdef CONFIG_X86_64
0113 #ifdef CONFIG_DEBUG_ENTRY
0114 #define SAVE_FLAGS      pushfq; popq %rax
0115 #endif
0116 
0117 #endif
0118 
0119 #endif /* __ASSEMBLY__ */
0120 #endif /* CONFIG_PARAVIRT_XXL */
0121 
0122 #ifndef __ASSEMBLY__
0123 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
0124 {
0125     return !(flags & X86_EFLAGS_IF);
0126 }
0127 
0128 static __always_inline int arch_irqs_disabled(void)
0129 {
0130     unsigned long flags = arch_local_save_flags();
0131 
0132     return arch_irqs_disabled_flags(flags);
0133 }
0134 
0135 static __always_inline void arch_local_irq_restore(unsigned long flags)
0136 {
0137     if (!arch_irqs_disabled_flags(flags))
0138         arch_local_irq_enable();
0139 }
0140 #endif /* !__ASSEMBLY__ */
0141 
0142 #endif