0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _LINUX_RWSEM_H
0009 #define _LINUX_RWSEM_H
0010
0011 #include <linux/linkage.h>
0012
0013 #include <linux/types.h>
0014 #include <linux/list.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/atomic.h>
0017 #include <linux/err.h>
0018
0019 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0020 # define __RWSEM_DEP_MAP_INIT(lockname) \
0021 .dep_map = { \
0022 .name = #lockname, \
0023 .wait_type_inner = LD_WAIT_SLEEP, \
0024 },
0025 #else
0026 # define __RWSEM_DEP_MAP_INIT(lockname)
0027 #endif
0028
0029 #ifndef CONFIG_PREEMPT_RT
0030
0031 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
0032 #include <linux/osq_lock.h>
0033 #endif
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 struct rw_semaphore {
0048 atomic_long_t count;
0049
0050
0051
0052
0053
0054 atomic_long_t owner;
0055 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
0056 struct optimistic_spin_queue osq;
0057 #endif
0058 raw_spinlock_t wait_lock;
0059 struct list_head wait_list;
0060 #ifdef CONFIG_DEBUG_RWSEMS
0061 void *magic;
0062 #endif
0063 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0064 struct lockdep_map dep_map;
0065 #endif
0066 };
0067
0068
0069 static inline int rwsem_is_locked(struct rw_semaphore *sem)
0070 {
0071 return atomic_long_read(&sem->count) != 0;
0072 }
0073
0074 #define RWSEM_UNLOCKED_VALUE 0L
0075 #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
0076
0077
0078
0079 #ifdef CONFIG_DEBUG_RWSEMS
0080 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
0081 #else
0082 # define __RWSEM_DEBUG_INIT(lockname)
0083 #endif
0084
0085 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
0086 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
0087 #else
0088 #define __RWSEM_OPT_INIT(lockname)
0089 #endif
0090
0091 #define __RWSEM_INITIALIZER(name) \
0092 { __RWSEM_COUNT_INIT(name), \
0093 .owner = ATOMIC_LONG_INIT(0), \
0094 __RWSEM_OPT_INIT(name) \
0095 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
0096 .wait_list = LIST_HEAD_INIT((name).wait_list), \
0097 __RWSEM_DEBUG_INIT(name) \
0098 __RWSEM_DEP_MAP_INIT(name) }
0099
0100 #define DECLARE_RWSEM(name) \
0101 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
0102
0103 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
0104 struct lock_class_key *key);
0105
0106 #define init_rwsem(sem) \
0107 do { \
0108 static struct lock_class_key __key; \
0109 \
0110 __init_rwsem((sem), #sem, &__key); \
0111 } while (0)
0112
0113
0114
0115
0116
0117
0118
0119 static inline int rwsem_is_contended(struct rw_semaphore *sem)
0120 {
0121 return !list_empty(&sem->wait_list);
0122 }
0123
0124 #else
0125
0126 #include <linux/rwbase_rt.h>
0127
0128 struct rw_semaphore {
0129 struct rwbase_rt rwbase;
0130 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0131 struct lockdep_map dep_map;
0132 #endif
0133 };
0134
0135 #define __RWSEM_INITIALIZER(name) \
0136 { \
0137 .rwbase = __RWBASE_INITIALIZER(name), \
0138 __RWSEM_DEP_MAP_INIT(name) \
0139 }
0140
0141 #define DECLARE_RWSEM(lockname) \
0142 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
0143
0144 extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
0145 struct lock_class_key *key);
0146
0147 #define init_rwsem(sem) \
0148 do { \
0149 static struct lock_class_key __key; \
0150 \
0151 __init_rwsem((sem), #sem, &__key); \
0152 } while (0)
0153
0154 static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
0155 {
0156 return rw_base_is_locked(&sem->rwbase);
0157 }
0158
0159 static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
0160 {
0161 return rw_base_is_contended(&sem->rwbase);
0162 }
0163
0164 #endif
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 extern void down_read(struct rw_semaphore *sem);
0175 extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
0176 extern int __must_check down_read_killable(struct rw_semaphore *sem);
0177
0178
0179
0180
0181 extern int down_read_trylock(struct rw_semaphore *sem);
0182
0183
0184
0185
0186 extern void down_write(struct rw_semaphore *sem);
0187 extern int __must_check down_write_killable(struct rw_semaphore *sem);
0188
0189
0190
0191
0192 extern int down_write_trylock(struct rw_semaphore *sem);
0193
0194
0195
0196
0197 extern void up_read(struct rw_semaphore *sem);
0198
0199
0200
0201
0202 extern void up_write(struct rw_semaphore *sem);
0203
0204
0205
0206
0207 extern void downgrade_write(struct rw_semaphore *sem);
0208
0209 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
0224 extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
0225 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
0226 extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
0227 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
0228
0229 # define down_write_nest_lock(sem, nest_lock) \
0230 do { \
0231 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
0232 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
0233 } while (0)
0234
0235
0236
0237
0238
0239
0240
0241 extern void down_read_non_owner(struct rw_semaphore *sem);
0242 extern void up_read_non_owner(struct rw_semaphore *sem);
0243 #else
0244 # define down_read_nested(sem, subclass) down_read(sem)
0245 # define down_read_killable_nested(sem, subclass) down_read_killable(sem)
0246 # define down_write_nest_lock(sem, nest_lock) down_write(sem)
0247 # define down_write_nested(sem, subclass) down_write(sem)
0248 # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
0249 # define down_read_non_owner(sem) down_read(sem)
0250 # define up_read_non_owner(sem) up_read(sem)
0251 #endif
0252
0253 #endif