0001
0002 #ifndef __LINUX_SPINLOCK_H
0003 #define __LINUX_SPINLOCK_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 #include <linux/typecheck.h>
0055 #include <linux/preempt.h>
0056 #include <linux/linkage.h>
0057 #include <linux/compiler.h>
0058 #include <linux/irqflags.h>
0059 #include <linux/thread_info.h>
0060 #include <linux/stringify.h>
0061 #include <linux/bottom_half.h>
0062 #include <linux/lockdep.h>
0063 #include <asm/barrier.h>
0064 #include <asm/mmiowb.h>
0065
0066
0067
0068
0069
0070 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
0071
0072 #define LOCK_SECTION_START(extra) \
0073 ".subsection 1\n\t" \
0074 extra \
0075 ".ifndef " LOCK_SECTION_NAME "\n\t" \
0076 LOCK_SECTION_NAME ":\n\t" \
0077 ".endif\n"
0078
0079 #define LOCK_SECTION_END \
0080 ".previous\n\t"
0081
0082 #define __lockfunc __section(".spinlock.text")
0083
0084
0085
0086
0087 #include <linux/spinlock_types.h>
0088
0089
0090
0091
0092 #ifdef CONFIG_SMP
0093 # include <asm/spinlock.h>
0094 #else
0095 # include <linux/spinlock_up.h>
0096 #endif
0097
0098 #ifdef CONFIG_DEBUG_SPINLOCK
0099 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
0100 struct lock_class_key *key, short inner);
0101
0102 # define raw_spin_lock_init(lock) \
0103 do { \
0104 static struct lock_class_key __key; \
0105 \
0106 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
0107 } while (0)
0108
0109 #else
0110 # define raw_spin_lock_init(lock) \
0111 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
0112 #endif
0113
0114 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
0115
0116 #ifdef arch_spin_is_contended
0117 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
0118 #else
0119 #define raw_spin_is_contended(lock) (((void)(lock), 0))
0120 #endif
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173 #ifndef smp_mb__after_spinlock
0174 #define smp_mb__after_spinlock() kcsan_mb()
0175 #endif
0176
0177 #ifdef CONFIG_DEBUG_SPINLOCK
0178 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
0179 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
0180 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
0181 #else
0182 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
0183 {
0184 __acquire(lock);
0185 arch_spin_lock(&lock->raw_lock);
0186 mmiowb_spin_lock();
0187 }
0188
0189 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
0190 {
0191 int ret = arch_spin_trylock(&(lock)->raw_lock);
0192
0193 if (ret)
0194 mmiowb_spin_lock();
0195
0196 return ret;
0197 }
0198
0199 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
0200 {
0201 mmiowb_spin_unlock();
0202 arch_spin_unlock(&lock->raw_lock);
0203 __release(lock);
0204 }
0205 #endif
0206
0207
0208
0209
0210
0211
0212
0213 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
0214
0215 #define raw_spin_lock(lock) _raw_spin_lock(lock)
0216
0217 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0218 # define raw_spin_lock_nested(lock, subclass) \
0219 _raw_spin_lock_nested(lock, subclass)
0220
0221 # define raw_spin_lock_nest_lock(lock, nest_lock) \
0222 do { \
0223 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
0224 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
0225 } while (0)
0226 #else
0227
0228
0229
0230
0231
0232 # define raw_spin_lock_nested(lock, subclass) \
0233 _raw_spin_lock(((void)(subclass), (lock)))
0234 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
0235 #endif
0236
0237 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0238
0239 #define raw_spin_lock_irqsave(lock, flags) \
0240 do { \
0241 typecheck(unsigned long, flags); \
0242 flags = _raw_spin_lock_irqsave(lock); \
0243 } while (0)
0244
0245 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0246 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
0247 do { \
0248 typecheck(unsigned long, flags); \
0249 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
0250 } while (0)
0251 #else
0252 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
0253 do { \
0254 typecheck(unsigned long, flags); \
0255 flags = _raw_spin_lock_irqsave(lock); \
0256 } while (0)
0257 #endif
0258
0259 #else
0260
0261 #define raw_spin_lock_irqsave(lock, flags) \
0262 do { \
0263 typecheck(unsigned long, flags); \
0264 _raw_spin_lock_irqsave(lock, flags); \
0265 } while (0)
0266
0267 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
0268 raw_spin_lock_irqsave(lock, flags)
0269
0270 #endif
0271
0272 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
0273 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
0274 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
0275 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
0276
0277 #define raw_spin_unlock_irqrestore(lock, flags) \
0278 do { \
0279 typecheck(unsigned long, flags); \
0280 _raw_spin_unlock_irqrestore(lock, flags); \
0281 } while (0)
0282 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
0283
0284 #define raw_spin_trylock_bh(lock) \
0285 __cond_lock(lock, _raw_spin_trylock_bh(lock))
0286
0287 #define raw_spin_trylock_irq(lock) \
0288 ({ \
0289 local_irq_disable(); \
0290 raw_spin_trylock(lock) ? \
0291 1 : ({ local_irq_enable(); 0; }); \
0292 })
0293
0294 #define raw_spin_trylock_irqsave(lock, flags) \
0295 ({ \
0296 local_irq_save(flags); \
0297 raw_spin_trylock(lock) ? \
0298 1 : ({ local_irq_restore(flags); 0; }); \
0299 })
0300
0301 #ifndef CONFIG_PREEMPT_RT
0302
0303 #include <linux/rwlock.h>
0304 #endif
0305
0306
0307
0308
0309 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
0310 # include <linux/spinlock_api_smp.h>
0311 #else
0312 # include <linux/spinlock_api_up.h>
0313 #endif
0314
0315
0316 #ifndef CONFIG_PREEMPT_RT
0317
0318
0319
0320
0321
0322 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
0323 {
0324 return &lock->rlock;
0325 }
0326
0327 #ifdef CONFIG_DEBUG_SPINLOCK
0328
0329 # define spin_lock_init(lock) \
0330 do { \
0331 static struct lock_class_key __key; \
0332 \
0333 __raw_spin_lock_init(spinlock_check(lock), \
0334 #lock, &__key, LD_WAIT_CONFIG); \
0335 } while (0)
0336
0337 #else
0338
0339 # define spin_lock_init(_lock) \
0340 do { \
0341 spinlock_check(_lock); \
0342 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
0343 } while (0)
0344
0345 #endif
0346
0347 static __always_inline void spin_lock(spinlock_t *lock)
0348 {
0349 raw_spin_lock(&lock->rlock);
0350 }
0351
0352 static __always_inline void spin_lock_bh(spinlock_t *lock)
0353 {
0354 raw_spin_lock_bh(&lock->rlock);
0355 }
0356
0357 static __always_inline int spin_trylock(spinlock_t *lock)
0358 {
0359 return raw_spin_trylock(&lock->rlock);
0360 }
0361
0362 #define spin_lock_nested(lock, subclass) \
0363 do { \
0364 raw_spin_lock_nested(spinlock_check(lock), subclass); \
0365 } while (0)
0366
0367 #define spin_lock_nest_lock(lock, nest_lock) \
0368 do { \
0369 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
0370 } while (0)
0371
0372 static __always_inline void spin_lock_irq(spinlock_t *lock)
0373 {
0374 raw_spin_lock_irq(&lock->rlock);
0375 }
0376
0377 #define spin_lock_irqsave(lock, flags) \
0378 do { \
0379 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
0380 } while (0)
0381
0382 #define spin_lock_irqsave_nested(lock, flags, subclass) \
0383 do { \
0384 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
0385 } while (0)
0386
0387 static __always_inline void spin_unlock(spinlock_t *lock)
0388 {
0389 raw_spin_unlock(&lock->rlock);
0390 }
0391
0392 static __always_inline void spin_unlock_bh(spinlock_t *lock)
0393 {
0394 raw_spin_unlock_bh(&lock->rlock);
0395 }
0396
0397 static __always_inline void spin_unlock_irq(spinlock_t *lock)
0398 {
0399 raw_spin_unlock_irq(&lock->rlock);
0400 }
0401
0402 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
0403 {
0404 raw_spin_unlock_irqrestore(&lock->rlock, flags);
0405 }
0406
0407 static __always_inline int spin_trylock_bh(spinlock_t *lock)
0408 {
0409 return raw_spin_trylock_bh(&lock->rlock);
0410 }
0411
0412 static __always_inline int spin_trylock_irq(spinlock_t *lock)
0413 {
0414 return raw_spin_trylock_irq(&lock->rlock);
0415 }
0416
0417 #define spin_trylock_irqsave(lock, flags) \
0418 ({ \
0419 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
0420 })
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static __always_inline int spin_is_locked(spinlock_t *lock)
0441 {
0442 return raw_spin_is_locked(&lock->rlock);
0443 }
0444
0445 static __always_inline int spin_is_contended(spinlock_t *lock)
0446 {
0447 return raw_spin_is_contended(&lock->rlock);
0448 }
0449
0450 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
0451
0452 #else
0453 # include <linux/spinlock_rt.h>
0454 #endif
0455
0456
0457
0458
0459
0460 #include <linux/atomic.h>
0461
0462
0463
0464
0465
0466
0467
0468
0469 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
0470 #define atomic_dec_and_lock(atomic, lock) \
0471 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
0472
0473 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
0474 unsigned long *flags);
0475 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
0476 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
0477
0478 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
0479 size_t max_size, unsigned int cpu_mult,
0480 gfp_t gfp, const char *name,
0481 struct lock_class_key *key);
0482
0483 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
0484 ({ \
0485 static struct lock_class_key key; \
0486 int ret; \
0487 \
0488 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
0489 cpu_mult, gfp, #locks, &key); \
0490 ret; \
0491 })
0492
0493 void free_bucket_spinlocks(spinlock_t *locks);
0494
0495 #endif