Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Ldisc rw semaphore
0004  *
0005  * The ldisc semaphore is semantically a rw_semaphore but which enforces
0006  * an alternate policy, namely:
0007  *   1) Supports lock wait timeouts
0008  *   2) Write waiter has priority
0009  *   3) Downgrading is not supported
0010  *
0011  * Implementation notes:
0012  *   1) Upper half of semaphore count is a wait count (differs from rwsem
0013  *  in that rwsem normalizes the upper half to the wait bias)
0014  *   2) Lacks overflow checking
0015  *
0016  * The generic counting was copied and modified from include/asm-generic/rwsem.h
0017  * by Paul Mackerras <paulus@samba.org>.
0018  *
0019  * The scheduling policy was copied and modified from lib/rwsem.c
0020  * Written by David Howells (dhowells@redhat.com).
0021  *
0022  * This implementation incorporates the write lock stealing work of
0023  * Michel Lespinasse <walken@google.com>.
0024  *
0025  * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
0026  */
0027 
0028 #include <linux/list.h>
0029 #include <linux/spinlock.h>
0030 #include <linux/atomic.h>
0031 #include <linux/tty.h>
0032 #include <linux/sched.h>
0033 #include <linux/sched/debug.h>
0034 #include <linux/sched/task.h>
0035 
0036 
0037 #if BITS_PER_LONG == 64
0038 # define LDSEM_ACTIVE_MASK  0xffffffffL
0039 #else
0040 # define LDSEM_ACTIVE_MASK  0x0000ffffL
0041 #endif
0042 
0043 #define LDSEM_UNLOCKED      0L
0044 #define LDSEM_ACTIVE_BIAS   1L
0045 #define LDSEM_WAIT_BIAS     (-LDSEM_ACTIVE_MASK-1)
0046 #define LDSEM_READ_BIAS     LDSEM_ACTIVE_BIAS
0047 #define LDSEM_WRITE_BIAS    (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
0048 
0049 struct ldsem_waiter {
0050     struct list_head list;
0051     struct task_struct *task;
0052 };
0053 
0054 /*
0055  * Initialize an ldsem:
0056  */
0057 void __init_ldsem(struct ld_semaphore *sem, const char *name,
0058           struct lock_class_key *key)
0059 {
0060 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0061     /*
0062      * Make sure we are not reinitializing a held semaphore:
0063      */
0064     debug_check_no_locks_freed((void *)sem, sizeof(*sem));
0065     lockdep_init_map(&sem->dep_map, name, key, 0);
0066 #endif
0067     atomic_long_set(&sem->count, LDSEM_UNLOCKED);
0068     sem->wait_readers = 0;
0069     raw_spin_lock_init(&sem->wait_lock);
0070     INIT_LIST_HEAD(&sem->read_wait);
0071     INIT_LIST_HEAD(&sem->write_wait);
0072 }
0073 
0074 static void __ldsem_wake_readers(struct ld_semaphore *sem)
0075 {
0076     struct ldsem_waiter *waiter, *next;
0077     struct task_struct *tsk;
0078     long adjust, count;
0079 
0080     /*
0081      * Try to grant read locks to all readers on the read wait list.
0082      * Note the 'active part' of the count is incremented by
0083      * the number of readers before waking any processes up.
0084      */
0085     adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
0086     count = atomic_long_add_return(adjust, &sem->count);
0087     do {
0088         if (count > 0)
0089             break;
0090         if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
0091             return;
0092     } while (1);
0093 
0094     list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
0095         tsk = waiter->task;
0096         smp_store_release(&waiter->task, NULL);
0097         wake_up_process(tsk);
0098         put_task_struct(tsk);
0099     }
0100     INIT_LIST_HEAD(&sem->read_wait);
0101     sem->wait_readers = 0;
0102 }
0103 
0104 static inline int writer_trylock(struct ld_semaphore *sem)
0105 {
0106     /*
0107      * Only wake this writer if the active part of the count can be
0108      * transitioned from 0 -> 1
0109      */
0110     long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
0111     do {
0112         if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
0113             return 1;
0114         if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
0115             return 0;
0116     } while (1);
0117 }
0118 
0119 static void __ldsem_wake_writer(struct ld_semaphore *sem)
0120 {
0121     struct ldsem_waiter *waiter;
0122 
0123     waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
0124     wake_up_process(waiter->task);
0125 }
0126 
0127 /*
0128  * handle the lock release when processes blocked on it that can now run
0129  * - if we come here from up_xxxx(), then:
0130  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
0131  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
0132  * - the spinlock must be held by the caller
0133  * - woken process blocks are discarded from the list after having task zeroed
0134  */
0135 static void __ldsem_wake(struct ld_semaphore *sem)
0136 {
0137     if (!list_empty(&sem->write_wait))
0138         __ldsem_wake_writer(sem);
0139     else if (!list_empty(&sem->read_wait))
0140         __ldsem_wake_readers(sem);
0141 }
0142 
0143 static void ldsem_wake(struct ld_semaphore *sem)
0144 {
0145     unsigned long flags;
0146 
0147     raw_spin_lock_irqsave(&sem->wait_lock, flags);
0148     __ldsem_wake(sem);
0149     raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
0150 }
0151 
0152 /*
0153  * wait for the read lock to be granted
0154  */
0155 static struct ld_semaphore __sched *
0156 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
0157 {
0158     struct ldsem_waiter waiter;
0159     long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
0160 
0161     /* set up my own style of waitqueue */
0162     raw_spin_lock_irq(&sem->wait_lock);
0163 
0164     /*
0165      * Try to reverse the lock attempt but if the count has changed
0166      * so that reversing fails, check if there are no waiters,
0167      * and early-out if not
0168      */
0169     do {
0170         if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
0171             count += adjust;
0172             break;
0173         }
0174         if (count > 0) {
0175             raw_spin_unlock_irq(&sem->wait_lock);
0176             return sem;
0177         }
0178     } while (1);
0179 
0180     list_add_tail(&waiter.list, &sem->read_wait);
0181     sem->wait_readers++;
0182 
0183     waiter.task = current;
0184     get_task_struct(current);
0185 
0186     /* if there are no active locks, wake the new lock owner(s) */
0187     if ((count & LDSEM_ACTIVE_MASK) == 0)
0188         __ldsem_wake(sem);
0189 
0190     raw_spin_unlock_irq(&sem->wait_lock);
0191 
0192     /* wait to be given the lock */
0193     for (;;) {
0194         set_current_state(TASK_UNINTERRUPTIBLE);
0195 
0196         if (!smp_load_acquire(&waiter.task))
0197             break;
0198         if (!timeout)
0199             break;
0200         timeout = schedule_timeout(timeout);
0201     }
0202 
0203     __set_current_state(TASK_RUNNING);
0204 
0205     if (!timeout) {
0206         /*
0207          * Lock timed out but check if this task was just
0208          * granted lock ownership - if so, pretend there
0209          * was no timeout; otherwise, cleanup lock wait.
0210          */
0211         raw_spin_lock_irq(&sem->wait_lock);
0212         if (waiter.task) {
0213             atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
0214             sem->wait_readers--;
0215             list_del(&waiter.list);
0216             raw_spin_unlock_irq(&sem->wait_lock);
0217             put_task_struct(waiter.task);
0218             return NULL;
0219         }
0220         raw_spin_unlock_irq(&sem->wait_lock);
0221     }
0222 
0223     return sem;
0224 }
0225 
0226 /*
0227  * wait for the write lock to be granted
0228  */
0229 static struct ld_semaphore __sched *
0230 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
0231 {
0232     struct ldsem_waiter waiter;
0233     long adjust = -LDSEM_ACTIVE_BIAS;
0234     int locked = 0;
0235 
0236     /* set up my own style of waitqueue */
0237     raw_spin_lock_irq(&sem->wait_lock);
0238 
0239     /*
0240      * Try to reverse the lock attempt but if the count has changed
0241      * so that reversing fails, check if the lock is now owned,
0242      * and early-out if so.
0243      */
0244     do {
0245         if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
0246             break;
0247         if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
0248             raw_spin_unlock_irq(&sem->wait_lock);
0249             return sem;
0250         }
0251     } while (1);
0252 
0253     list_add_tail(&waiter.list, &sem->write_wait);
0254 
0255     waiter.task = current;
0256 
0257     set_current_state(TASK_UNINTERRUPTIBLE);
0258     for (;;) {
0259         if (!timeout)
0260             break;
0261         raw_spin_unlock_irq(&sem->wait_lock);
0262         timeout = schedule_timeout(timeout);
0263         raw_spin_lock_irq(&sem->wait_lock);
0264         set_current_state(TASK_UNINTERRUPTIBLE);
0265         locked = writer_trylock(sem);
0266         if (locked)
0267             break;
0268     }
0269 
0270     if (!locked)
0271         atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
0272     list_del(&waiter.list);
0273 
0274     /*
0275      * In case of timeout, wake up every reader who gave the right of way
0276      * to writer. Prevent separation readers into two groups:
0277      * one that helds semaphore and another that sleeps.
0278      * (in case of no contention with a writer)
0279      */
0280     if (!locked && list_empty(&sem->write_wait))
0281         __ldsem_wake_readers(sem);
0282 
0283     raw_spin_unlock_irq(&sem->wait_lock);
0284 
0285     __set_current_state(TASK_RUNNING);
0286 
0287     /* lock wait may have timed out */
0288     if (!locked)
0289         return NULL;
0290     return sem;
0291 }
0292 
0293 
0294 
0295 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
0296                        int subclass, long timeout)
0297 {
0298     long count;
0299 
0300     rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
0301 
0302     count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
0303     if (count <= 0) {
0304         lock_contended(&sem->dep_map, _RET_IP_);
0305         if (!down_read_failed(sem, count, timeout)) {
0306             rwsem_release(&sem->dep_map, _RET_IP_);
0307             return 0;
0308         }
0309     }
0310     lock_acquired(&sem->dep_map, _RET_IP_);
0311     return 1;
0312 }
0313 
0314 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
0315                         int subclass, long timeout)
0316 {
0317     long count;
0318 
0319     rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
0320 
0321     count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
0322     if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
0323         lock_contended(&sem->dep_map, _RET_IP_);
0324         if (!down_write_failed(sem, count, timeout)) {
0325             rwsem_release(&sem->dep_map, _RET_IP_);
0326             return 0;
0327         }
0328     }
0329     lock_acquired(&sem->dep_map, _RET_IP_);
0330     return 1;
0331 }
0332 
0333 
0334 /*
0335  * lock for reading -- returns 1 if successful, 0 if timed out
0336  */
0337 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
0338 {
0339     might_sleep();
0340     return __ldsem_down_read_nested(sem, 0, timeout);
0341 }
0342 
0343 /*
0344  * trylock for reading -- returns 1 if successful, 0 if contention
0345  */
0346 int ldsem_down_read_trylock(struct ld_semaphore *sem)
0347 {
0348     long count = atomic_long_read(&sem->count);
0349 
0350     while (count >= 0) {
0351         if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
0352             rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
0353             lock_acquired(&sem->dep_map, _RET_IP_);
0354             return 1;
0355         }
0356     }
0357     return 0;
0358 }
0359 
0360 /*
0361  * lock for writing -- returns 1 if successful, 0 if timed out
0362  */
0363 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
0364 {
0365     might_sleep();
0366     return __ldsem_down_write_nested(sem, 0, timeout);
0367 }
0368 
0369 /*
0370  * trylock for writing -- returns 1 if successful, 0 if contention
0371  */
0372 int ldsem_down_write_trylock(struct ld_semaphore *sem)
0373 {
0374     long count = atomic_long_read(&sem->count);
0375 
0376     while ((count & LDSEM_ACTIVE_MASK) == 0) {
0377         if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
0378             rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
0379             lock_acquired(&sem->dep_map, _RET_IP_);
0380             return 1;
0381         }
0382     }
0383     return 0;
0384 }
0385 
0386 /*
0387  * release a read lock
0388  */
0389 void ldsem_up_read(struct ld_semaphore *sem)
0390 {
0391     long count;
0392 
0393     rwsem_release(&sem->dep_map, _RET_IP_);
0394 
0395     count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
0396     if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
0397         ldsem_wake(sem);
0398 }
0399 
0400 /*
0401  * release a write lock
0402  */
0403 void ldsem_up_write(struct ld_semaphore *sem)
0404 {
0405     long count;
0406 
0407     rwsem_release(&sem->dep_map, _RET_IP_);
0408 
0409     count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
0410     if (count < 0)
0411         ldsem_wake(sem);
0412 }
0413 
0414 
0415 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0416 
0417 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
0418 {
0419     might_sleep();
0420     return __ldsem_down_read_nested(sem, subclass, timeout);
0421 }
0422 
0423 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
0424                 long timeout)
0425 {
0426     might_sleep();
0427     return __ldsem_down_write_nested(sem, subclass, timeout);
0428 }
0429 
0430 #endif