0001
0002
0003
0004
0005 #include <linux/spinlock.h>
0006 #include <linux/export.h>
0007
0008 #define RT_MUTEX_BUILD_MUTEX
0009 #include "rtmutex.c"
0010
0011
0012
0013
0014 int max_lock_depth = 1024;
0015
0016
0017
0018
0019
0020
0021
0022 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
0023 unsigned int state,
0024 struct lockdep_map *nest_lock,
0025 unsigned int subclass)
0026 {
0027 int ret;
0028
0029 might_sleep();
0030 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
0031 ret = __rt_mutex_lock(&lock->rtmutex, state);
0032 if (ret)
0033 mutex_release(&lock->dep_map, _RET_IP_);
0034 return ret;
0035 }
0036
0037 void rt_mutex_base_init(struct rt_mutex_base *rtb)
0038 {
0039 __rt_mutex_base_init(rtb);
0040 }
0041 EXPORT_SYMBOL(rt_mutex_base_init);
0042
0043 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0044
0045
0046
0047
0048
0049
0050 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
0051 {
0052 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
0053 }
0054 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
0055
0056 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
0057 {
0058 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
0059 }
0060 EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
0061
0062 #else
0063
0064
0065
0066
0067
0068
0069 void __sched rt_mutex_lock(struct rt_mutex *lock)
0070 {
0071 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
0072 }
0073 EXPORT_SYMBOL_GPL(rt_mutex_lock);
0074 #endif
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
0086 {
0087 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
0088 }
0089 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
0101 {
0102 return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
0103 }
0104 EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 int __sched rt_mutex_trylock(struct rt_mutex *lock)
0119 {
0120 int ret;
0121
0122 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
0123 return 0;
0124
0125 ret = __rt_mutex_trylock(&lock->rtmutex);
0126 if (ret)
0127 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0128
0129 return ret;
0130 }
0131 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
0132
0133
0134
0135
0136
0137
0138 void __sched rt_mutex_unlock(struct rt_mutex *lock)
0139 {
0140 mutex_release(&lock->dep_map, _RET_IP_);
0141 __rt_mutex_unlock(&lock->rtmutex);
0142 }
0143 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
0144
0145
0146
0147
0148 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
0149 {
0150 return rt_mutex_slowtrylock(lock);
0151 }
0152
0153 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
0154 {
0155 return __rt_mutex_slowtrylock(lock);
0156 }
0157
0158
0159
0160
0161
0162
0163
0164
0165 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
0166 struct rt_wake_q_head *wqh)
0167 {
0168 lockdep_assert_held(&lock->wait_lock);
0169
0170 debug_rt_mutex_unlock(lock);
0171
0172 if (!rt_mutex_has_waiters(lock)) {
0173 lock->owner = NULL;
0174 return false;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183 mark_wakeup_next_waiter(wqh, lock);
0184
0185 return true;
0186 }
0187
0188 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
0189 {
0190 DEFINE_RT_WAKE_Q(wqh);
0191 unsigned long flags;
0192 bool postunlock;
0193
0194 raw_spin_lock_irqsave(&lock->wait_lock, flags);
0195 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
0196 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
0197
0198 if (postunlock)
0199 rt_mutex_postunlock(&wqh);
0200 }
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
0214 struct lock_class_key *key)
0215 {
0216 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
0217 __rt_mutex_base_init(&lock->rtmutex);
0218 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
0219 }
0220 EXPORT_SYMBOL_GPL(__rt_mutex_init);
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
0237 struct task_struct *proxy_owner)
0238 {
0239 static struct lock_class_key pi_futex_key;
0240
0241 __rt_mutex_base_init(lock);
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 lockdep_set_class(&lock->wait_lock, &pi_futex_key);
0252 rt_mutex_set_owner(lock, proxy_owner);
0253 }
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
0268 {
0269 debug_rt_mutex_proxy_unlock(lock);
0270 rt_mutex_set_owner(lock, NULL);
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0293 struct rt_mutex_waiter *waiter,
0294 struct task_struct *task)
0295 {
0296 int ret;
0297
0298 lockdep_assert_held(&lock->wait_lock);
0299
0300 if (try_to_take_rt_mutex(lock, task, NULL))
0301 return 1;
0302
0303
0304 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
0305 RT_MUTEX_FULL_CHAINWALK);
0306
0307 if (ret && !rt_mutex_owner(lock)) {
0308
0309
0310
0311
0312
0313
0314 ret = 0;
0315 }
0316
0317 return ret;
0318 }
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
0340 struct rt_mutex_waiter *waiter,
0341 struct task_struct *task)
0342 {
0343 int ret;
0344
0345 raw_spin_lock_irq(&lock->wait_lock);
0346 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
0347 if (unlikely(ret))
0348 remove_waiter(lock, waiter);
0349 raw_spin_unlock_irq(&lock->wait_lock);
0350
0351 return ret;
0352 }
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
0372 struct hrtimer_sleeper *to,
0373 struct rt_mutex_waiter *waiter)
0374 {
0375 int ret;
0376
0377 raw_spin_lock_irq(&lock->wait_lock);
0378
0379 set_current_state(TASK_INTERRUPTIBLE);
0380 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
0381
0382
0383
0384
0385 fixup_rt_mutex_waiters(lock);
0386 raw_spin_unlock_irq(&lock->wait_lock);
0387
0388 return ret;
0389 }
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
0412 struct rt_mutex_waiter *waiter)
0413 {
0414 bool cleanup = false;
0415
0416 raw_spin_lock_irq(&lock->wait_lock);
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428 try_to_take_rt_mutex(lock, current, waiter);
0429
0430
0431
0432
0433 if (rt_mutex_owner(lock) != current) {
0434 remove_waiter(lock, waiter);
0435 cleanup = true;
0436 }
0437
0438
0439
0440
0441 fixup_rt_mutex_waiters(lock);
0442
0443 raw_spin_unlock_irq(&lock->wait_lock);
0444
0445 return cleanup;
0446 }
0447
0448
0449
0450
0451
0452
0453 void __sched rt_mutex_adjust_pi(struct task_struct *task)
0454 {
0455 struct rt_mutex_waiter *waiter;
0456 struct rt_mutex_base *next_lock;
0457 unsigned long flags;
0458
0459 raw_spin_lock_irqsave(&task->pi_lock, flags);
0460
0461 waiter = task->pi_blocked_on;
0462 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
0463 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
0464 return;
0465 }
0466 next_lock = waiter->lock;
0467 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
0468
0469
0470 get_task_struct(task);
0471
0472 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
0473 next_lock, NULL, task);
0474 }
0475
0476
0477
0478
0479 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
0480 {
0481 rt_mutex_wake_up_q(wqh);
0482 }
0483
0484 #ifdef CONFIG_DEBUG_RT_MUTEXES
0485 void rt_mutex_debug_task_free(struct task_struct *task)
0486 {
0487 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
0488 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
0489 }
0490 #endif
0491
0492 #ifdef CONFIG_PREEMPT_RT
0493
0494 void __mutex_rt_init(struct mutex *mutex, const char *name,
0495 struct lock_class_key *key)
0496 {
0497 debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
0498 lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
0499 }
0500 EXPORT_SYMBOL(__mutex_rt_init);
0501
0502 static __always_inline int __mutex_lock_common(struct mutex *lock,
0503 unsigned int state,
0504 unsigned int subclass,
0505 struct lockdep_map *nest_lock,
0506 unsigned long ip)
0507 {
0508 int ret;
0509
0510 might_sleep();
0511 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
0512 ret = __rt_mutex_lock(&lock->rtmutex, state);
0513 if (ret)
0514 mutex_release(&lock->dep_map, ip);
0515 else
0516 lock_acquired(&lock->dep_map, ip);
0517 return ret;
0518 }
0519
0520 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0521 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
0522 {
0523 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
0524 }
0525 EXPORT_SYMBOL_GPL(mutex_lock_nested);
0526
0527 void __sched _mutex_lock_nest_lock(struct mutex *lock,
0528 struct lockdep_map *nest_lock)
0529 {
0530 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
0531 }
0532 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
0533
0534 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
0535 unsigned int subclass)
0536 {
0537 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
0538 }
0539 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
0540
0541 int __sched mutex_lock_killable_nested(struct mutex *lock,
0542 unsigned int subclass)
0543 {
0544 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
0545 }
0546 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
0547
0548 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
0549 {
0550 int token;
0551
0552 might_sleep();
0553
0554 token = io_schedule_prepare();
0555 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
0556 io_schedule_finish(token);
0557 }
0558 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
0559
0560 #else
0561
0562 void __sched mutex_lock(struct mutex *lock)
0563 {
0564 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
0565 }
0566 EXPORT_SYMBOL(mutex_lock);
0567
0568 int __sched mutex_lock_interruptible(struct mutex *lock)
0569 {
0570 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
0571 }
0572 EXPORT_SYMBOL(mutex_lock_interruptible);
0573
0574 int __sched mutex_lock_killable(struct mutex *lock)
0575 {
0576 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
0577 }
0578 EXPORT_SYMBOL(mutex_lock_killable);
0579
0580 void __sched mutex_lock_io(struct mutex *lock)
0581 {
0582 int token = io_schedule_prepare();
0583
0584 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
0585 io_schedule_finish(token);
0586 }
0587 EXPORT_SYMBOL(mutex_lock_io);
0588 #endif
0589
0590 int __sched mutex_trylock(struct mutex *lock)
0591 {
0592 int ret;
0593
0594 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task()))
0595 return 0;
0596
0597 ret = __rt_mutex_trylock(&lock->rtmutex);
0598 if (ret)
0599 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
0600
0601 return ret;
0602 }
0603 EXPORT_SYMBOL(mutex_trylock);
0604
0605 void __sched mutex_unlock(struct mutex *lock)
0606 {
0607 mutex_release(&lock->dep_map, _RET_IP_);
0608 __rt_mutex_unlock(&lock->rtmutex);
0609 }
0610 EXPORT_SYMBOL(mutex_unlock);
0611
0612 #endif