Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2008 Intel Corporation
0004  * Author: Matthew Wilcox <willy@linux.intel.com>
0005  *
0006  * This file implements counting semaphores.
0007  * A counting semaphore may be acquired 'n' times before sleeping.
0008  * See mutex.c for single-acquisition sleeping locks which enforce
0009  * rules which allow code to be debugged more easily.
0010  */
0011 
0012 /*
0013  * Some notes on the implementation:
0014  *
0015  * The spinlock controls access to the other members of the semaphore.
0016  * down_trylock() and up() can be called from interrupt context, so we
0017  * have to disable interrupts when taking the lock.  It turns out various
0018  * parts of the kernel expect to be able to use down() on a semaphore in
0019  * interrupt context when they know it will succeed, so we have to use
0020  * irqsave variants for down(), down_interruptible() and down_killable()
0021  * too.
0022  *
0023  * The ->count variable represents how many more tasks can acquire this
0024  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
0025  */
0026 
0027 #include <linux/compiler.h>
0028 #include <linux/kernel.h>
0029 #include <linux/export.h>
0030 #include <linux/sched.h>
0031 #include <linux/sched/debug.h>
0032 #include <linux/semaphore.h>
0033 #include <linux/spinlock.h>
0034 #include <linux/ftrace.h>
0035 #include <trace/events/lock.h>
0036 
0037 static noinline void __down(struct semaphore *sem);
0038 static noinline int __down_interruptible(struct semaphore *sem);
0039 static noinline int __down_killable(struct semaphore *sem);
0040 static noinline int __down_timeout(struct semaphore *sem, long timeout);
0041 static noinline void __up(struct semaphore *sem);
0042 
0043 /**
0044  * down - acquire the semaphore
0045  * @sem: the semaphore to be acquired
0046  *
0047  * Acquires the semaphore.  If no more tasks are allowed to acquire the
0048  * semaphore, calling this function will put the task to sleep until the
0049  * semaphore is released.
0050  *
0051  * Use of this function is deprecated, please use down_interruptible() or
0052  * down_killable() instead.
0053  */
0054 void down(struct semaphore *sem)
0055 {
0056     unsigned long flags;
0057 
0058     might_sleep();
0059     raw_spin_lock_irqsave(&sem->lock, flags);
0060     if (likely(sem->count > 0))
0061         sem->count--;
0062     else
0063         __down(sem);
0064     raw_spin_unlock_irqrestore(&sem->lock, flags);
0065 }
0066 EXPORT_SYMBOL(down);
0067 
0068 /**
0069  * down_interruptible - acquire the semaphore unless interrupted
0070  * @sem: the semaphore to be acquired
0071  *
0072  * Attempts to acquire the semaphore.  If no more tasks are allowed to
0073  * acquire the semaphore, calling this function will put the task to sleep.
0074  * If the sleep is interrupted by a signal, this function will return -EINTR.
0075  * If the semaphore is successfully acquired, this function returns 0.
0076  */
0077 int down_interruptible(struct semaphore *sem)
0078 {
0079     unsigned long flags;
0080     int result = 0;
0081 
0082     might_sleep();
0083     raw_spin_lock_irqsave(&sem->lock, flags);
0084     if (likely(sem->count > 0))
0085         sem->count--;
0086     else
0087         result = __down_interruptible(sem);
0088     raw_spin_unlock_irqrestore(&sem->lock, flags);
0089 
0090     return result;
0091 }
0092 EXPORT_SYMBOL(down_interruptible);
0093 
0094 /**
0095  * down_killable - acquire the semaphore unless killed
0096  * @sem: the semaphore to be acquired
0097  *
0098  * Attempts to acquire the semaphore.  If no more tasks are allowed to
0099  * acquire the semaphore, calling this function will put the task to sleep.
0100  * If the sleep is interrupted by a fatal signal, this function will return
0101  * -EINTR.  If the semaphore is successfully acquired, this function returns
0102  * 0.
0103  */
0104 int down_killable(struct semaphore *sem)
0105 {
0106     unsigned long flags;
0107     int result = 0;
0108 
0109     might_sleep();
0110     raw_spin_lock_irqsave(&sem->lock, flags);
0111     if (likely(sem->count > 0))
0112         sem->count--;
0113     else
0114         result = __down_killable(sem);
0115     raw_spin_unlock_irqrestore(&sem->lock, flags);
0116 
0117     return result;
0118 }
0119 EXPORT_SYMBOL(down_killable);
0120 
0121 /**
0122  * down_trylock - try to acquire the semaphore, without waiting
0123  * @sem: the semaphore to be acquired
0124  *
0125  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
0126  * been acquired successfully or 1 if it cannot be acquired.
0127  *
0128  * NOTE: This return value is inverted from both spin_trylock and
0129  * mutex_trylock!  Be careful about this when converting code.
0130  *
0131  * Unlike mutex_trylock, this function can be used from interrupt context,
0132  * and the semaphore can be released by any task or interrupt.
0133  */
0134 int down_trylock(struct semaphore *sem)
0135 {
0136     unsigned long flags;
0137     int count;
0138 
0139     raw_spin_lock_irqsave(&sem->lock, flags);
0140     count = sem->count - 1;
0141     if (likely(count >= 0))
0142         sem->count = count;
0143     raw_spin_unlock_irqrestore(&sem->lock, flags);
0144 
0145     return (count < 0);
0146 }
0147 EXPORT_SYMBOL(down_trylock);
0148 
0149 /**
0150  * down_timeout - acquire the semaphore within a specified time
0151  * @sem: the semaphore to be acquired
0152  * @timeout: how long to wait before failing
0153  *
0154  * Attempts to acquire the semaphore.  If no more tasks are allowed to
0155  * acquire the semaphore, calling this function will put the task to sleep.
0156  * If the semaphore is not released within the specified number of jiffies,
0157  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
0158  */
0159 int down_timeout(struct semaphore *sem, long timeout)
0160 {
0161     unsigned long flags;
0162     int result = 0;
0163 
0164     might_sleep();
0165     raw_spin_lock_irqsave(&sem->lock, flags);
0166     if (likely(sem->count > 0))
0167         sem->count--;
0168     else
0169         result = __down_timeout(sem, timeout);
0170     raw_spin_unlock_irqrestore(&sem->lock, flags);
0171 
0172     return result;
0173 }
0174 EXPORT_SYMBOL(down_timeout);
0175 
0176 /**
0177  * up - release the semaphore
0178  * @sem: the semaphore to release
0179  *
0180  * Release the semaphore.  Unlike mutexes, up() may be called from any
0181  * context and even by tasks which have never called down().
0182  */
0183 void up(struct semaphore *sem)
0184 {
0185     unsigned long flags;
0186 
0187     raw_spin_lock_irqsave(&sem->lock, flags);
0188     if (likely(list_empty(&sem->wait_list)))
0189         sem->count++;
0190     else
0191         __up(sem);
0192     raw_spin_unlock_irqrestore(&sem->lock, flags);
0193 }
0194 EXPORT_SYMBOL(up);
0195 
0196 /* Functions for the contended case */
0197 
0198 struct semaphore_waiter {
0199     struct list_head list;
0200     struct task_struct *task;
0201     bool up;
0202 };
0203 
0204 /*
0205  * Because this function is inlined, the 'state' parameter will be
0206  * constant, and thus optimised away by the compiler.  Likewise the
0207  * 'timeout' parameter for the cases without timeouts.
0208  */
0209 static inline int __sched ___down_common(struct semaphore *sem, long state,
0210                                 long timeout)
0211 {
0212     struct semaphore_waiter waiter;
0213 
0214     list_add_tail(&waiter.list, &sem->wait_list);
0215     waiter.task = current;
0216     waiter.up = false;
0217 
0218     for (;;) {
0219         if (signal_pending_state(state, current))
0220             goto interrupted;
0221         if (unlikely(timeout <= 0))
0222             goto timed_out;
0223         __set_current_state(state);
0224         raw_spin_unlock_irq(&sem->lock);
0225         timeout = schedule_timeout(timeout);
0226         raw_spin_lock_irq(&sem->lock);
0227         if (waiter.up)
0228             return 0;
0229     }
0230 
0231  timed_out:
0232     list_del(&waiter.list);
0233     return -ETIME;
0234 
0235  interrupted:
0236     list_del(&waiter.list);
0237     return -EINTR;
0238 }
0239 
0240 static inline int __sched __down_common(struct semaphore *sem, long state,
0241                     long timeout)
0242 {
0243     int ret;
0244 
0245     trace_contention_begin(sem, 0);
0246     ret = ___down_common(sem, state, timeout);
0247     trace_contention_end(sem, ret);
0248 
0249     return ret;
0250 }
0251 
0252 static noinline void __sched __down(struct semaphore *sem)
0253 {
0254     __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
0255 }
0256 
0257 static noinline int __sched __down_interruptible(struct semaphore *sem)
0258 {
0259     return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
0260 }
0261 
0262 static noinline int __sched __down_killable(struct semaphore *sem)
0263 {
0264     return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
0265 }
0266 
0267 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
0268 {
0269     return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
0270 }
0271 
0272 static noinline void __sched __up(struct semaphore *sem)
0273 {
0274     struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
0275                         struct semaphore_waiter, list);
0276     list_del(&waiter->list);
0277     waiter->up = true;
0278     wake_up_process(waiter->task);
0279 }