0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027 #include <linux/compiler.h>
0028 #include <linux/kernel.h>
0029 #include <linux/export.h>
0030 #include <linux/sched.h>
0031 #include <linux/sched/debug.h>
0032 #include <linux/semaphore.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/ftrace.h>
0035 #include <trace/events/lock.h>
0036
0037 static noinline void __down(struct semaphore *sem);
0038 static noinline int __down_interruptible(struct semaphore *sem);
0039 static noinline int __down_killable(struct semaphore *sem);
0040 static noinline int __down_timeout(struct semaphore *sem, long timeout);
0041 static noinline void __up(struct semaphore *sem);
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 void down(struct semaphore *sem)
0055 {
0056 unsigned long flags;
0057
0058 might_sleep();
0059 raw_spin_lock_irqsave(&sem->lock, flags);
0060 if (likely(sem->count > 0))
0061 sem->count--;
0062 else
0063 __down(sem);
0064 raw_spin_unlock_irqrestore(&sem->lock, flags);
0065 }
0066 EXPORT_SYMBOL(down);
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 int down_interruptible(struct semaphore *sem)
0078 {
0079 unsigned long flags;
0080 int result = 0;
0081
0082 might_sleep();
0083 raw_spin_lock_irqsave(&sem->lock, flags);
0084 if (likely(sem->count > 0))
0085 sem->count--;
0086 else
0087 result = __down_interruptible(sem);
0088 raw_spin_unlock_irqrestore(&sem->lock, flags);
0089
0090 return result;
0091 }
0092 EXPORT_SYMBOL(down_interruptible);
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 int down_killable(struct semaphore *sem)
0105 {
0106 unsigned long flags;
0107 int result = 0;
0108
0109 might_sleep();
0110 raw_spin_lock_irqsave(&sem->lock, flags);
0111 if (likely(sem->count > 0))
0112 sem->count--;
0113 else
0114 result = __down_killable(sem);
0115 raw_spin_unlock_irqrestore(&sem->lock, flags);
0116
0117 return result;
0118 }
0119 EXPORT_SYMBOL(down_killable);
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 int down_trylock(struct semaphore *sem)
0135 {
0136 unsigned long flags;
0137 int count;
0138
0139 raw_spin_lock_irqsave(&sem->lock, flags);
0140 count = sem->count - 1;
0141 if (likely(count >= 0))
0142 sem->count = count;
0143 raw_spin_unlock_irqrestore(&sem->lock, flags);
0144
0145 return (count < 0);
0146 }
0147 EXPORT_SYMBOL(down_trylock);
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159 int down_timeout(struct semaphore *sem, long timeout)
0160 {
0161 unsigned long flags;
0162 int result = 0;
0163
0164 might_sleep();
0165 raw_spin_lock_irqsave(&sem->lock, flags);
0166 if (likely(sem->count > 0))
0167 sem->count--;
0168 else
0169 result = __down_timeout(sem, timeout);
0170 raw_spin_unlock_irqrestore(&sem->lock, flags);
0171
0172 return result;
0173 }
0174 EXPORT_SYMBOL(down_timeout);
0175
0176
0177
0178
0179
0180
0181
0182
0183 void up(struct semaphore *sem)
0184 {
0185 unsigned long flags;
0186
0187 raw_spin_lock_irqsave(&sem->lock, flags);
0188 if (likely(list_empty(&sem->wait_list)))
0189 sem->count++;
0190 else
0191 __up(sem);
0192 raw_spin_unlock_irqrestore(&sem->lock, flags);
0193 }
0194 EXPORT_SYMBOL(up);
0195
0196
0197
0198 struct semaphore_waiter {
0199 struct list_head list;
0200 struct task_struct *task;
0201 bool up;
0202 };
0203
0204
0205
0206
0207
0208
0209 static inline int __sched ___down_common(struct semaphore *sem, long state,
0210 long timeout)
0211 {
0212 struct semaphore_waiter waiter;
0213
0214 list_add_tail(&waiter.list, &sem->wait_list);
0215 waiter.task = current;
0216 waiter.up = false;
0217
0218 for (;;) {
0219 if (signal_pending_state(state, current))
0220 goto interrupted;
0221 if (unlikely(timeout <= 0))
0222 goto timed_out;
0223 __set_current_state(state);
0224 raw_spin_unlock_irq(&sem->lock);
0225 timeout = schedule_timeout(timeout);
0226 raw_spin_lock_irq(&sem->lock);
0227 if (waiter.up)
0228 return 0;
0229 }
0230
0231 timed_out:
0232 list_del(&waiter.list);
0233 return -ETIME;
0234
0235 interrupted:
0236 list_del(&waiter.list);
0237 return -EINTR;
0238 }
0239
0240 static inline int __sched __down_common(struct semaphore *sem, long state,
0241 long timeout)
0242 {
0243 int ret;
0244
0245 trace_contention_begin(sem, 0);
0246 ret = ___down_common(sem, state, timeout);
0247 trace_contention_end(sem, ret);
0248
0249 return ret;
0250 }
0251
0252 static noinline void __sched __down(struct semaphore *sem)
0253 {
0254 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
0255 }
0256
0257 static noinline int __sched __down_interruptible(struct semaphore *sem)
0258 {
0259 return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
0260 }
0261
0262 static noinline int __sched __down_killable(struct semaphore *sem)
0263 {
0264 return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
0265 }
0266
0267 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
0268 {
0269 return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
0270 }
0271
0272 static noinline void __sched __up(struct semaphore *sem)
0273 {
0274 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
0275 struct semaphore_waiter, list);
0276 list_del(&waiter->list);
0277 waiter->up = true;
0278 wake_up_process(waiter->task);
0279 }