Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * Queue read/write lock
0004  *
0005  * These use generic atomic and locking routines, but depend on a fair spinlock
0006  * implementation in order to be fair themselves.  The implementation in
0007  * asm-generic/spinlock.h meets these requirements.
0008  *
0009  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
0010  *
0011  * Authors: Waiman Long <waiman.long@hp.com>
0012  */
0013 #ifndef __ASM_GENERIC_QRWLOCK_H
0014 #define __ASM_GENERIC_QRWLOCK_H
0015 
0016 #include <linux/atomic.h>
0017 #include <asm/barrier.h>
0018 #include <asm/processor.h>
0019 
0020 #include <asm-generic/qrwlock_types.h>
0021 
0022 /* Must be included from asm/spinlock.h after defining arch_spin_is_locked.  */
0023 
0024 /*
0025  * Writer states & reader shift and bias.
0026  */
0027 #define _QW_WAITING 0x100       /* A writer is waiting     */
0028 #define _QW_LOCKED  0x0ff       /* A writer holds the lock */
0029 #define _QW_WMASK   0x1ff       /* Writer mask         */
0030 #define _QR_SHIFT   9       /* Reader count shift      */
0031 #define _QR_BIAS    (1U << _QR_SHIFT)
0032 
0033 /*
0034  * External function declarations
0035  */
0036 extern void queued_read_lock_slowpath(struct qrwlock *lock);
0037 extern void queued_write_lock_slowpath(struct qrwlock *lock);
0038 
0039 /**
0040  * queued_read_trylock - try to acquire read lock of a queued rwlock
0041  * @lock : Pointer to queued rwlock structure
0042  * Return: 1 if lock acquired, 0 if failed
0043  */
0044 static inline int queued_read_trylock(struct qrwlock *lock)
0045 {
0046     int cnts;
0047 
0048     cnts = atomic_read(&lock->cnts);
0049     if (likely(!(cnts & _QW_WMASK))) {
0050         cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
0051         if (likely(!(cnts & _QW_WMASK)))
0052             return 1;
0053         atomic_sub(_QR_BIAS, &lock->cnts);
0054     }
0055     return 0;
0056 }
0057 
0058 /**
0059  * queued_write_trylock - try to acquire write lock of a queued rwlock
0060  * @lock : Pointer to queued rwlock structure
0061  * Return: 1 if lock acquired, 0 if failed
0062  */
0063 static inline int queued_write_trylock(struct qrwlock *lock)
0064 {
0065     int cnts;
0066 
0067     cnts = atomic_read(&lock->cnts);
0068     if (unlikely(cnts))
0069         return 0;
0070 
0071     return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
0072                 _QW_LOCKED));
0073 }
0074 /**
0075  * queued_read_lock - acquire read lock of a queued rwlock
0076  * @lock: Pointer to queued rwlock structure
0077  */
0078 static inline void queued_read_lock(struct qrwlock *lock)
0079 {
0080     int cnts;
0081 
0082     cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
0083     if (likely(!(cnts & _QW_WMASK)))
0084         return;
0085 
0086     /* The slowpath will decrement the reader count, if necessary. */
0087     queued_read_lock_slowpath(lock);
0088 }
0089 
0090 /**
0091  * queued_write_lock - acquire write lock of a queued rwlock
0092  * @lock : Pointer to queued rwlock structure
0093  */
0094 static inline void queued_write_lock(struct qrwlock *lock)
0095 {
0096     int cnts = 0;
0097     /* Optimize for the unfair lock case where the fair flag is 0. */
0098     if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
0099         return;
0100 
0101     queued_write_lock_slowpath(lock);
0102 }
0103 
0104 /**
0105  * queued_read_unlock - release read lock of a queued rwlock
0106  * @lock : Pointer to queued rwlock structure
0107  */
0108 static inline void queued_read_unlock(struct qrwlock *lock)
0109 {
0110     /*
0111      * Atomically decrement the reader count
0112      */
0113     (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
0114 }
0115 
0116 /**
0117  * queued_write_unlock - release write lock of a queued rwlock
0118  * @lock : Pointer to queued rwlock structure
0119  */
0120 static inline void queued_write_unlock(struct qrwlock *lock)
0121 {
0122     smp_store_release(&lock->wlocked, 0);
0123 }
0124 
0125 /**
0126  * queued_rwlock_is_contended - check if the lock is contended
0127  * @lock : Pointer to queued rwlock structure
0128  * Return: 1 if lock contended, 0 otherwise
0129  */
0130 static inline int queued_rwlock_is_contended(struct qrwlock *lock)
0131 {
0132     return arch_spin_is_locked(&lock->wait_lock);
0133 }
0134 
0135 /*
0136  * Remapping rwlock architecture specific functions to the corresponding
0137  * queued rwlock functions.
0138  */
0139 #define arch_read_lock(l)       queued_read_lock(l)
0140 #define arch_write_lock(l)      queued_write_lock(l)
0141 #define arch_read_trylock(l)        queued_read_trylock(l)
0142 #define arch_write_trylock(l)       queued_write_trylock(l)
0143 #define arch_read_unlock(l)     queued_read_unlock(l)
0144 #define arch_write_unlock(l)        queued_write_unlock(l)
0145 #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
0146 
0147 #endif /* __ASM_GENERIC_QRWLOCK_H */