Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __ASM_PREEMPT_H
0003 #define __ASM_PREEMPT_H
0004 
0005 #include <linux/thread_info.h>
0006 
0007 #define PREEMPT_ENABLED (0)
0008 
0009 static __always_inline int preempt_count(void)
0010 {
0011     return READ_ONCE(current_thread_info()->preempt_count);
0012 }
0013 
0014 static __always_inline volatile int *preempt_count_ptr(void)
0015 {
0016     return &current_thread_info()->preempt_count;
0017 }
0018 
0019 static __always_inline void preempt_count_set(int pc)
0020 {
0021     *preempt_count_ptr() = pc;
0022 }
0023 
0024 /*
0025  * must be macros to avoid header recursion hell
0026  */
0027 #define init_task_preempt_count(p) do { \
0028     task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
0029 } while (0)
0030 
0031 #define init_idle_preempt_count(p, cpu) do { \
0032     task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
0033 } while (0)
0034 
0035 static __always_inline void set_preempt_need_resched(void)
0036 {
0037 }
0038 
0039 static __always_inline void clear_preempt_need_resched(void)
0040 {
0041 }
0042 
0043 static __always_inline bool test_preempt_need_resched(void)
0044 {
0045     return false;
0046 }
0047 
0048 /*
0049  * The various preempt_count add/sub methods
0050  */
0051 
0052 static __always_inline void __preempt_count_add(int val)
0053 {
0054     *preempt_count_ptr() += val;
0055 }
0056 
0057 static __always_inline void __preempt_count_sub(int val)
0058 {
0059     *preempt_count_ptr() -= val;
0060 }
0061 
0062 static __always_inline bool __preempt_count_dec_and_test(void)
0063 {
0064     /*
0065      * Because of load-store architectures cannot do per-cpu atomic
0066      * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
0067      * lost.
0068      */
0069     return !--*preempt_count_ptr() && tif_need_resched();
0070 }
0071 
0072 /*
0073  * Returns true when we need to resched and can (barring IRQ state).
0074  */
0075 static __always_inline bool should_resched(int preempt_offset)
0076 {
0077     return unlikely(preempt_count() == preempt_offset &&
0078             tif_need_resched());
0079 }
0080 
0081 #ifdef CONFIG_PREEMPTION
0082 extern asmlinkage void preempt_schedule(void);
0083 #define __preempt_schedule() preempt_schedule()
0084 extern asmlinkage void preempt_schedule_notrace(void);
0085 #define __preempt_schedule_notrace() preempt_schedule_notrace()
0086 #endif /* CONFIG_PREEMPTION */
0087 
0088 #endif /* __ASM_PREEMPT_H */