0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #ifndef __ASM_GENERIC_QRWLOCK_H
0014 #define __ASM_GENERIC_QRWLOCK_H
0015
0016 #include <linux/atomic.h>
0017 #include <asm/barrier.h>
0018 #include <asm/processor.h>
0019
0020 #include <asm-generic/qrwlock_types.h>
0021
0022
0023
0024
0025
0026
0027 #define _QW_WAITING 0x100
0028 #define _QW_LOCKED 0x0ff
0029 #define _QW_WMASK 0x1ff
0030 #define _QR_SHIFT 9
0031 #define _QR_BIAS (1U << _QR_SHIFT)
0032
0033
0034
0035
0036 extern void queued_read_lock_slowpath(struct qrwlock *lock);
0037 extern void queued_write_lock_slowpath(struct qrwlock *lock);
0038
0039
0040
0041
0042
0043
0044 static inline int queued_read_trylock(struct qrwlock *lock)
0045 {
0046 int cnts;
0047
0048 cnts = atomic_read(&lock->cnts);
0049 if (likely(!(cnts & _QW_WMASK))) {
0050 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
0051 if (likely(!(cnts & _QW_WMASK)))
0052 return 1;
0053 atomic_sub(_QR_BIAS, &lock->cnts);
0054 }
0055 return 0;
0056 }
0057
0058
0059
0060
0061
0062
0063 static inline int queued_write_trylock(struct qrwlock *lock)
0064 {
0065 int cnts;
0066
0067 cnts = atomic_read(&lock->cnts);
0068 if (unlikely(cnts))
0069 return 0;
0070
0071 return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
0072 _QW_LOCKED));
0073 }
0074
0075
0076
0077
0078 static inline void queued_read_lock(struct qrwlock *lock)
0079 {
0080 int cnts;
0081
0082 cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
0083 if (likely(!(cnts & _QW_WMASK)))
0084 return;
0085
0086
0087 queued_read_lock_slowpath(lock);
0088 }
0089
0090
0091
0092
0093
0094 static inline void queued_write_lock(struct qrwlock *lock)
0095 {
0096 int cnts = 0;
0097
0098 if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
0099 return;
0100
0101 queued_write_lock_slowpath(lock);
0102 }
0103
0104
0105
0106
0107
0108 static inline void queued_read_unlock(struct qrwlock *lock)
0109 {
0110
0111
0112
0113 (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
0114 }
0115
0116
0117
0118
0119
0120 static inline void queued_write_unlock(struct qrwlock *lock)
0121 {
0122 smp_store_release(&lock->wlocked, 0);
0123 }
0124
0125
0126
0127
0128
0129
0130 static inline int queued_rwlock_is_contended(struct qrwlock *lock)
0131 {
0132 return arch_spin_is_locked(&lock->wait_lock);
0133 }
0134
0135
0136
0137
0138
0139 #define arch_read_lock(l) queued_read_lock(l)
0140 #define arch_write_lock(l) queued_write_lock(l)
0141 #define arch_read_trylock(l) queued_read_trylock(l)
0142 #define arch_write_trylock(l) queued_write_trylock(l)
0143 #define arch_read_unlock(l) queued_read_unlock(l)
0144 #define arch_write_unlock(l) queued_write_unlock(l)
0145 #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
0146
0147 #endif