0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/linkage.h>
0019 #include <linux/preempt.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/debug_locks.h>
0023 #include <linux/export.h>
0024
0025 #ifdef CONFIG_MMIOWB
0026 #ifndef arch_mmiowb_state
0027 DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
0028 EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
0029 #endif
0030 #endif
0031
0032
0033
0034
0035
0036
0037 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
0038
0039
0040
0041
0042
0043 #else
0044
0045
0046
0047
0048 #ifndef arch_read_relax
0049 # define arch_read_relax(l) cpu_relax()
0050 #endif
0051 #ifndef arch_write_relax
0052 # define arch_write_relax(l) cpu_relax()
0053 #endif
0054 #ifndef arch_spin_relax
0055 # define arch_spin_relax(l) cpu_relax()
0056 #endif
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 #define BUILD_LOCK_OPS(op, locktype) \
0068 void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
0069 { \
0070 for (;;) { \
0071 preempt_disable(); \
0072 if (likely(do_raw_##op##_trylock(lock))) \
0073 break; \
0074 preempt_enable(); \
0075 \
0076 arch_##op##_relax(&lock->raw_lock); \
0077 } \
0078 } \
0079 \
0080 unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
0081 { \
0082 unsigned long flags; \
0083 \
0084 for (;;) { \
0085 preempt_disable(); \
0086 local_irq_save(flags); \
0087 if (likely(do_raw_##op##_trylock(lock))) \
0088 break; \
0089 local_irq_restore(flags); \
0090 preempt_enable(); \
0091 \
0092 arch_##op##_relax(&lock->raw_lock); \
0093 } \
0094 \
0095 return flags; \
0096 } \
0097 \
0098 void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
0099 { \
0100 _raw_##op##_lock_irqsave(lock); \
0101 } \
0102 \
0103 void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
0104 { \
0105 unsigned long flags; \
0106 \
0107 \
0108 \
0109 \
0110 \
0111 \
0112 flags = _raw_##op##_lock_irqsave(lock); \
0113 local_bh_disable(); \
0114 local_irq_restore(flags); \
0115 } \
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126 BUILD_LOCK_OPS(spin, raw_spinlock);
0127
0128 #ifndef CONFIG_PREEMPT_RT
0129 BUILD_LOCK_OPS(read, rwlock);
0130 BUILD_LOCK_OPS(write, rwlock);
0131 #endif
0132
0133 #endif
0134
0135 #ifndef CONFIG_INLINE_SPIN_TRYLOCK
0136 int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
0137 {
0138 return __raw_spin_trylock(lock);
0139 }
0140 EXPORT_SYMBOL(_raw_spin_trylock);
0141 #endif
0142
0143 #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
0144 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
0145 {
0146 return __raw_spin_trylock_bh(lock);
0147 }
0148 EXPORT_SYMBOL(_raw_spin_trylock_bh);
0149 #endif
0150
0151 #ifndef CONFIG_INLINE_SPIN_LOCK
0152 void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
0153 {
0154 __raw_spin_lock(lock);
0155 }
0156 EXPORT_SYMBOL(_raw_spin_lock);
0157 #endif
0158
0159 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
0160 unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
0161 {
0162 return __raw_spin_lock_irqsave(lock);
0163 }
0164 EXPORT_SYMBOL(_raw_spin_lock_irqsave);
0165 #endif
0166
0167 #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
0168 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
0169 {
0170 __raw_spin_lock_irq(lock);
0171 }
0172 EXPORT_SYMBOL(_raw_spin_lock_irq);
0173 #endif
0174
0175 #ifndef CONFIG_INLINE_SPIN_LOCK_BH
0176 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
0177 {
0178 __raw_spin_lock_bh(lock);
0179 }
0180 EXPORT_SYMBOL(_raw_spin_lock_bh);
0181 #endif
0182
0183 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
0184 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
0185 {
0186 __raw_spin_unlock(lock);
0187 }
0188 EXPORT_SYMBOL(_raw_spin_unlock);
0189 #endif
0190
0191 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
0192 void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
0193 {
0194 __raw_spin_unlock_irqrestore(lock, flags);
0195 }
0196 EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
0197 #endif
0198
0199 #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
0200 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
0201 {
0202 __raw_spin_unlock_irq(lock);
0203 }
0204 EXPORT_SYMBOL(_raw_spin_unlock_irq);
0205 #endif
0206
0207 #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
0208 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
0209 {
0210 __raw_spin_unlock_bh(lock);
0211 }
0212 EXPORT_SYMBOL(_raw_spin_unlock_bh);
0213 #endif
0214
0215 #ifndef CONFIG_PREEMPT_RT
0216
0217 #ifndef CONFIG_INLINE_READ_TRYLOCK
0218 int __lockfunc _raw_read_trylock(rwlock_t *lock)
0219 {
0220 return __raw_read_trylock(lock);
0221 }
0222 EXPORT_SYMBOL(_raw_read_trylock);
0223 #endif
0224
0225 #ifndef CONFIG_INLINE_READ_LOCK
0226 void __lockfunc _raw_read_lock(rwlock_t *lock)
0227 {
0228 __raw_read_lock(lock);
0229 }
0230 EXPORT_SYMBOL(_raw_read_lock);
0231 #endif
0232
0233 #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
0234 unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
0235 {
0236 return __raw_read_lock_irqsave(lock);
0237 }
0238 EXPORT_SYMBOL(_raw_read_lock_irqsave);
0239 #endif
0240
0241 #ifndef CONFIG_INLINE_READ_LOCK_IRQ
0242 void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
0243 {
0244 __raw_read_lock_irq(lock);
0245 }
0246 EXPORT_SYMBOL(_raw_read_lock_irq);
0247 #endif
0248
0249 #ifndef CONFIG_INLINE_READ_LOCK_BH
0250 void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
0251 {
0252 __raw_read_lock_bh(lock);
0253 }
0254 EXPORT_SYMBOL(_raw_read_lock_bh);
0255 #endif
0256
0257 #ifndef CONFIG_INLINE_READ_UNLOCK
0258 void __lockfunc _raw_read_unlock(rwlock_t *lock)
0259 {
0260 __raw_read_unlock(lock);
0261 }
0262 EXPORT_SYMBOL(_raw_read_unlock);
0263 #endif
0264
0265 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
0266 void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
0267 {
0268 __raw_read_unlock_irqrestore(lock, flags);
0269 }
0270 EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
0271 #endif
0272
0273 #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
0274 void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
0275 {
0276 __raw_read_unlock_irq(lock);
0277 }
0278 EXPORT_SYMBOL(_raw_read_unlock_irq);
0279 #endif
0280
0281 #ifndef CONFIG_INLINE_READ_UNLOCK_BH
0282 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
0283 {
0284 __raw_read_unlock_bh(lock);
0285 }
0286 EXPORT_SYMBOL(_raw_read_unlock_bh);
0287 #endif
0288
0289 #ifndef CONFIG_INLINE_WRITE_TRYLOCK
0290 int __lockfunc _raw_write_trylock(rwlock_t *lock)
0291 {
0292 return __raw_write_trylock(lock);
0293 }
0294 EXPORT_SYMBOL(_raw_write_trylock);
0295 #endif
0296
0297 #ifndef CONFIG_INLINE_WRITE_LOCK
0298 void __lockfunc _raw_write_lock(rwlock_t *lock)
0299 {
0300 __raw_write_lock(lock);
0301 }
0302 EXPORT_SYMBOL(_raw_write_lock);
0303
0304 #ifndef CONFIG_DEBUG_LOCK_ALLOC
0305 #define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
0306 #endif
0307
0308 void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
0309 {
0310 __raw_write_lock_nested(lock, subclass);
0311 }
0312 EXPORT_SYMBOL(_raw_write_lock_nested);
0313 #endif
0314
0315 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
0316 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
0317 {
0318 return __raw_write_lock_irqsave(lock);
0319 }
0320 EXPORT_SYMBOL(_raw_write_lock_irqsave);
0321 #endif
0322
0323 #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
0324 void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
0325 {
0326 __raw_write_lock_irq(lock);
0327 }
0328 EXPORT_SYMBOL(_raw_write_lock_irq);
0329 #endif
0330
0331 #ifndef CONFIG_INLINE_WRITE_LOCK_BH
0332 void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
0333 {
0334 __raw_write_lock_bh(lock);
0335 }
0336 EXPORT_SYMBOL(_raw_write_lock_bh);
0337 #endif
0338
0339 #ifndef CONFIG_INLINE_WRITE_UNLOCK
0340 void __lockfunc _raw_write_unlock(rwlock_t *lock)
0341 {
0342 __raw_write_unlock(lock);
0343 }
0344 EXPORT_SYMBOL(_raw_write_unlock);
0345 #endif
0346
0347 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
0348 void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
0349 {
0350 __raw_write_unlock_irqrestore(lock, flags);
0351 }
0352 EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
0353 #endif
0354
0355 #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
0356 void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
0357 {
0358 __raw_write_unlock_irq(lock);
0359 }
0360 EXPORT_SYMBOL(_raw_write_unlock_irq);
0361 #endif
0362
0363 #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
0364 void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
0365 {
0366 __raw_write_unlock_bh(lock);
0367 }
0368 EXPORT_SYMBOL(_raw_write_unlock_bh);
0369 #endif
0370
0371 #endif
0372
0373 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0374
0375 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
0376 {
0377 preempt_disable();
0378 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
0379 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0380 }
0381 EXPORT_SYMBOL(_raw_spin_lock_nested);
0382
0383 unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
0384 int subclass)
0385 {
0386 unsigned long flags;
0387
0388 local_irq_save(flags);
0389 preempt_disable();
0390 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
0391 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0392 return flags;
0393 }
0394 EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
0395
0396 void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
0397 struct lockdep_map *nest_lock)
0398 {
0399 preempt_disable();
0400 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
0401 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
0402 }
0403 EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
0404
0405 #endif
0406
0407 notrace int in_lock_functions(unsigned long addr)
0408 {
0409
0410 extern char __lock_text_start[], __lock_text_end[];
0411
0412 return addr >= (unsigned long)__lock_text_start
0413 && addr < (unsigned long)__lock_text_end;
0414 }
0415 EXPORT_SYMBOL(in_lock_functions);