0001
0002 #ifndef __ASM_PREEMPT_H
0003 #define __ASM_PREEMPT_H
0004
0005 #include <asm/current.h>
0006 #include <linux/thread_info.h>
0007 #include <asm/atomic_ops.h>
0008
0009 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
0010
0011
0012 #define PREEMPT_NEED_RESCHED 0x80000000
0013 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
0014
0015 static inline int preempt_count(void)
0016 {
0017 return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
0018 }
0019
0020 static inline void preempt_count_set(int pc)
0021 {
0022 int old, new;
0023
0024 do {
0025 old = READ_ONCE(S390_lowcore.preempt_count);
0026 new = (old & PREEMPT_NEED_RESCHED) |
0027 (pc & ~PREEMPT_NEED_RESCHED);
0028 } while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
0029 old, new) != old);
0030 }
0031
0032 static inline void set_preempt_need_resched(void)
0033 {
0034 __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
0035 }
0036
0037 static inline void clear_preempt_need_resched(void)
0038 {
0039 __atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
0040 }
0041
0042 static inline bool test_preempt_need_resched(void)
0043 {
0044 return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
0045 }
0046
0047 static inline void __preempt_count_add(int val)
0048 {
0049
0050
0051
0052
0053 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
0054 if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
0055 __atomic_add_const(val, &S390_lowcore.preempt_count);
0056 return;
0057 }
0058 }
0059 __atomic_add(val, &S390_lowcore.preempt_count);
0060 }
0061
0062 static inline void __preempt_count_sub(int val)
0063 {
0064 __preempt_count_add(-val);
0065 }
0066
0067 static inline bool __preempt_count_dec_and_test(void)
0068 {
0069 return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
0070 }
0071
0072 static inline bool should_resched(int preempt_offset)
0073 {
0074 return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
0075 preempt_offset);
0076 }
0077
0078 #else
0079
0080 #define PREEMPT_ENABLED (0)
0081
0082 static inline int preempt_count(void)
0083 {
0084 return READ_ONCE(S390_lowcore.preempt_count);
0085 }
0086
0087 static inline void preempt_count_set(int pc)
0088 {
0089 S390_lowcore.preempt_count = pc;
0090 }
0091
0092 static inline void set_preempt_need_resched(void)
0093 {
0094 }
0095
0096 static inline void clear_preempt_need_resched(void)
0097 {
0098 }
0099
0100 static inline bool test_preempt_need_resched(void)
0101 {
0102 return false;
0103 }
0104
0105 static inline void __preempt_count_add(int val)
0106 {
0107 S390_lowcore.preempt_count += val;
0108 }
0109
0110 static inline void __preempt_count_sub(int val)
0111 {
0112 S390_lowcore.preempt_count -= val;
0113 }
0114
0115 static inline bool __preempt_count_dec_and_test(void)
0116 {
0117 return !--S390_lowcore.preempt_count && tif_need_resched();
0118 }
0119
0120 static inline bool should_resched(int preempt_offset)
0121 {
0122 return unlikely(preempt_count() == preempt_offset &&
0123 tif_need_resched());
0124 }
0125
0126 #endif
0127
0128 #define init_task_preempt_count(p) do { } while (0)
0129
0130 #define init_idle_preempt_count(p, cpu) do { } while (0)
0131
0132 #ifdef CONFIG_PREEMPTION
0133 extern void preempt_schedule(void);
0134 #define __preempt_schedule() preempt_schedule()
0135 extern void preempt_schedule_notrace(void);
0136 #define __preempt_schedule_notrace() preempt_schedule_notrace()
0137 #endif
0138
0139 #endif