Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_SCHED_IDLE_H
0003 #define _LINUX_SCHED_IDLE_H
0004 
0005 #include <linux/sched.h>
0006 
0007 enum cpu_idle_type {
0008     CPU_IDLE,
0009     CPU_NOT_IDLE,
0010     CPU_NEWLY_IDLE,
0011     CPU_MAX_IDLE_TYPES
0012 };
0013 
0014 #ifdef CONFIG_SMP
0015 extern void wake_up_if_idle(int cpu);
0016 #else
0017 static inline void wake_up_if_idle(int cpu) { }
0018 #endif
0019 
0020 /*
0021  * Idle thread specific functions to determine the need_resched
0022  * polling state.
0023  */
0024 #ifdef TIF_POLLING_NRFLAG
0025 
0026 static inline void __current_set_polling(void)
0027 {
0028     set_thread_flag(TIF_POLLING_NRFLAG);
0029 }
0030 
0031 static inline bool __must_check current_set_polling_and_test(void)
0032 {
0033     __current_set_polling();
0034 
0035     /*
0036      * Polling state must be visible before we test NEED_RESCHED,
0037      * paired by resched_curr()
0038      */
0039     smp_mb__after_atomic();
0040 
0041     return unlikely(tif_need_resched());
0042 }
0043 
0044 static inline void __current_clr_polling(void)
0045 {
0046     clear_thread_flag(TIF_POLLING_NRFLAG);
0047 }
0048 
0049 static inline bool __must_check current_clr_polling_and_test(void)
0050 {
0051     __current_clr_polling();
0052 
0053     /*
0054      * Polling state must be visible before we test NEED_RESCHED,
0055      * paired by resched_curr()
0056      */
0057     smp_mb__after_atomic();
0058 
0059     return unlikely(tif_need_resched());
0060 }
0061 
0062 #else
0063 static inline void __current_set_polling(void) { }
0064 static inline void __current_clr_polling(void) { }
0065 
0066 static inline bool __must_check current_set_polling_and_test(void)
0067 {
0068     return unlikely(tif_need_resched());
0069 }
0070 static inline bool __must_check current_clr_polling_and_test(void)
0071 {
0072     return unlikely(tif_need_resched());
0073 }
0074 #endif
0075 
0076 static inline void current_clr_polling(void)
0077 {
0078     __current_clr_polling();
0079 
0080     /*
0081      * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
0082      * Once the bit is cleared, we'll get IPIs with every new
0083      * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
0084      * fold.
0085      */
0086     smp_mb(); /* paired with resched_curr() */
0087 
0088     preempt_fold_need_resched();
0089 }
0090 
0091 #endif /* _LINUX_SCHED_IDLE_H */