0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #include <linux/mutex.h>
0022 #include <linux/ww_mutex.h>
0023 #include <linux/sched/signal.h>
0024 #include <linux/sched/rt.h>
0025 #include <linux/sched/wake_q.h>
0026 #include <linux/sched/debug.h>
0027 #include <linux/export.h>
0028 #include <linux/spinlock.h>
0029 #include <linux/interrupt.h>
0030 #include <linux/debug_locks.h>
0031 #include <linux/osq_lock.h>
0032
0033 #define CREATE_TRACE_POINTS
0034 #include <trace/events/lock.h>
0035
0036 #ifndef CONFIG_PREEMPT_RT
0037 #include "mutex.h"
0038
0039 #ifdef CONFIG_DEBUG_MUTEXES
0040 # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
0041 #else
0042 # define MUTEX_WARN_ON(cond)
0043 #endif
0044
0045 void
0046 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
0047 {
0048 atomic_long_set(&lock->owner, 0);
0049 raw_spin_lock_init(&lock->wait_lock);
0050 INIT_LIST_HEAD(&lock->wait_list);
0051 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0052 osq_lock_init(&lock->osq);
0053 #endif
0054
0055 debug_mutex_init(lock, name, key);
0056 }
0057 EXPORT_SYMBOL(__mutex_init);
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068 #define MUTEX_FLAG_WAITERS 0x01
0069 #define MUTEX_FLAG_HANDOFF 0x02
0070 #define MUTEX_FLAG_PICKUP 0x04
0071
0072 #define MUTEX_FLAGS 0x07
0073
0074
0075
0076
0077
0078
0079 static inline struct task_struct *__mutex_owner(struct mutex *lock)
0080 {
0081 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
0082 }
0083
0084 static inline struct task_struct *__owner_task(unsigned long owner)
0085 {
0086 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
0087 }
0088
0089 bool mutex_is_locked(struct mutex *lock)
0090 {
0091 return __mutex_owner(lock) != NULL;
0092 }
0093 EXPORT_SYMBOL(mutex_is_locked);
0094
0095 static inline unsigned long __owner_flags(unsigned long owner)
0096 {
0097 return owner & MUTEX_FLAGS;
0098 }
0099
0100
0101
0102
0103 static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
0104 {
0105 unsigned long owner, curr = (unsigned long)current;
0106
0107 owner = atomic_long_read(&lock->owner);
0108 for (;;) {
0109 unsigned long flags = __owner_flags(owner);
0110 unsigned long task = owner & ~MUTEX_FLAGS;
0111
0112 if (task) {
0113 if (flags & MUTEX_FLAG_PICKUP) {
0114 if (task != curr)
0115 break;
0116 flags &= ~MUTEX_FLAG_PICKUP;
0117 } else if (handoff) {
0118 if (flags & MUTEX_FLAG_HANDOFF)
0119 break;
0120 flags |= MUTEX_FLAG_HANDOFF;
0121 } else {
0122 break;
0123 }
0124 } else {
0125 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
0126 task = curr;
0127 }
0128
0129 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
0130 if (task == curr)
0131 return NULL;
0132 break;
0133 }
0134 }
0135
0136 return __owner_task(owner);
0137 }
0138
0139
0140
0141
0142 static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
0143 {
0144 return !__mutex_trylock_common(lock, handoff);
0145 }
0146
0147
0148
0149
0150 static inline bool __mutex_trylock(struct mutex *lock)
0151 {
0152 return !__mutex_trylock_common(lock, false);
0153 }
0154
0155 #ifndef CONFIG_DEBUG_LOCK_ALLOC
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
0167 {
0168 unsigned long curr = (unsigned long)current;
0169 unsigned long zero = 0UL;
0170
0171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
0172 return true;
0173
0174 return false;
0175 }
0176
0177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
0178 {
0179 unsigned long curr = (unsigned long)current;
0180
0181 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
0182 }
0183 #endif
0184
0185 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
0186 {
0187 atomic_long_or(flag, &lock->owner);
0188 }
0189
0190 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
0191 {
0192 atomic_long_andnot(flag, &lock->owner);
0193 }
0194
0195 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
0196 {
0197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
0198 }
0199
0200
0201
0202
0203
0204 static void
0205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
0206 struct list_head *list)
0207 {
0208 debug_mutex_add_waiter(lock, waiter, current);
0209
0210 list_add_tail(&waiter->list, list);
0211 if (__mutex_waiter_is_first(lock, waiter))
0212 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
0213 }
0214
0215 static void
0216 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
0217 {
0218 list_del(&waiter->list);
0219 if (likely(list_empty(&lock->wait_list)))
0220 __mutex_clear_flag(lock, MUTEX_FLAGS);
0221
0222 debug_mutex_remove_waiter(lock, waiter, current);
0223 }
0224
0225
0226
0227
0228
0229
0230
0231 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
0232 {
0233 unsigned long owner = atomic_long_read(&lock->owner);
0234
0235 for (;;) {
0236 unsigned long new;
0237
0238 MUTEX_WARN_ON(__owner_task(owner) != current);
0239 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
0240
0241 new = (owner & MUTEX_FLAG_WAITERS);
0242 new |= (unsigned long)task;
0243 if (task)
0244 new |= MUTEX_FLAG_PICKUP;
0245
0246 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
0247 break;
0248 }
0249 }
0250
0251 #ifndef CONFIG_DEBUG_LOCK_ALLOC
0252
0253
0254
0255
0256
0257
0258 static void __sched __mutex_lock_slowpath(struct mutex *lock);
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 void __sched mutex_lock(struct mutex *lock)
0282 {
0283 might_sleep();
0284
0285 if (!__mutex_trylock_fast(lock))
0286 __mutex_lock_slowpath(lock);
0287 }
0288 EXPORT_SYMBOL(mutex_lock);
0289 #endif
0290
0291 #include "ww_mutex.h"
0292
0293 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0294
0295
0296
0297
0298 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
0299 {
0300 return __mutex_trylock_common(lock, false);
0301 }
0302
0303 static inline
0304 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
0305 struct mutex_waiter *waiter)
0306 {
0307 struct ww_mutex *ww;
0308
0309 ww = container_of(lock, struct ww_mutex, base);
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
0323 return false;
0324
0325
0326
0327
0328
0329
0330
0331
0332 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
0333 return false;
0334
0335
0336
0337
0338
0339 if (waiter && !__mutex_waiter_is_first(lock, waiter))
0340 return false;
0341
0342 return true;
0343 }
0344
0345
0346
0347
0348
0349
0350
0351 static noinline
0352 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
0353 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
0354 {
0355 bool ret = true;
0356
0357 lockdep_assert_preemption_disabled();
0358
0359 while (__mutex_owner(lock) == owner) {
0360
0361
0362
0363
0364
0365
0366
0367
0368 barrier();
0369
0370
0371
0372
0373 if (!owner_on_cpu(owner) || need_resched()) {
0374 ret = false;
0375 break;
0376 }
0377
0378 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
0379 ret = false;
0380 break;
0381 }
0382
0383 cpu_relax();
0384 }
0385
0386 return ret;
0387 }
0388
0389
0390
0391
0392 static inline int mutex_can_spin_on_owner(struct mutex *lock)
0393 {
0394 struct task_struct *owner;
0395 int retval = 1;
0396
0397 lockdep_assert_preemption_disabled();
0398
0399 if (need_resched())
0400 return 0;
0401
0402
0403
0404
0405
0406
0407 owner = __mutex_owner(lock);
0408 if (owner)
0409 retval = owner_on_cpu(owner);
0410
0411
0412
0413
0414
0415
0416 return retval;
0417 }
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static __always_inline bool
0441 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
0442 struct mutex_waiter *waiter)
0443 {
0444 if (!waiter) {
0445
0446
0447
0448
0449
0450
0451
0452 if (!mutex_can_spin_on_owner(lock))
0453 goto fail;
0454
0455
0456
0457
0458
0459
0460 if (!osq_lock(&lock->osq))
0461 goto fail;
0462 }
0463
0464 for (;;) {
0465 struct task_struct *owner;
0466
0467
0468 owner = __mutex_trylock_or_owner(lock);
0469 if (!owner)
0470 break;
0471
0472
0473
0474
0475
0476 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
0477 goto fail_unlock;
0478
0479
0480
0481
0482
0483
0484
0485 cpu_relax();
0486 }
0487
0488 if (!waiter)
0489 osq_unlock(&lock->osq);
0490
0491 return true;
0492
0493
0494 fail_unlock:
0495 if (!waiter)
0496 osq_unlock(&lock->osq);
0497
0498 fail:
0499
0500
0501
0502
0503
0504 if (need_resched()) {
0505
0506
0507
0508
0509 __set_current_state(TASK_RUNNING);
0510 schedule_preempt_disabled();
0511 }
0512
0513 return false;
0514 }
0515 #else
0516 static __always_inline bool
0517 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
0518 struct mutex_waiter *waiter)
0519 {
0520 return false;
0521 }
0522 #endif
0523
0524 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537 void __sched mutex_unlock(struct mutex *lock)
0538 {
0539 #ifndef CONFIG_DEBUG_LOCK_ALLOC
0540 if (__mutex_unlock_fast(lock))
0541 return;
0542 #endif
0543 __mutex_unlock_slowpath(lock, _RET_IP_);
0544 }
0545 EXPORT_SYMBOL(mutex_unlock);
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558 void __sched ww_mutex_unlock(struct ww_mutex *lock)
0559 {
0560 __ww_mutex_unlock(lock);
0561 mutex_unlock(&lock->base);
0562 }
0563 EXPORT_SYMBOL(ww_mutex_unlock);
0564
0565
0566
0567
0568 static __always_inline int __sched
0569 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
0570 struct lockdep_map *nest_lock, unsigned long ip,
0571 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
0572 {
0573 struct mutex_waiter waiter;
0574 struct ww_mutex *ww;
0575 int ret;
0576
0577 if (!use_ww_ctx)
0578 ww_ctx = NULL;
0579
0580 might_sleep();
0581
0582 MUTEX_WARN_ON(lock->magic != lock);
0583
0584 ww = container_of(lock, struct ww_mutex, base);
0585 if (ww_ctx) {
0586 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
0587 return -EALREADY;
0588
0589
0590
0591
0592
0593
0594 if (ww_ctx->acquired == 0)
0595 ww_ctx->wounded = 0;
0596
0597 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0598 nest_lock = &ww_ctx->dep_map;
0599 #endif
0600 }
0601
0602 preempt_disable();
0603 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
0604
0605 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
0606 if (__mutex_trylock(lock) ||
0607 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
0608
0609 lock_acquired(&lock->dep_map, ip);
0610 if (ww_ctx)
0611 ww_mutex_set_context_fastpath(ww, ww_ctx);
0612 trace_contention_end(lock, 0);
0613 preempt_enable();
0614 return 0;
0615 }
0616
0617 raw_spin_lock(&lock->wait_lock);
0618
0619
0620
0621 if (__mutex_trylock(lock)) {
0622 if (ww_ctx)
0623 __ww_mutex_check_waiters(lock, ww_ctx);
0624
0625 goto skip_wait;
0626 }
0627
0628 debug_mutex_lock_common(lock, &waiter);
0629 waiter.task = current;
0630 if (use_ww_ctx)
0631 waiter.ww_ctx = ww_ctx;
0632
0633 lock_contended(&lock->dep_map, ip);
0634
0635 if (!use_ww_ctx) {
0636
0637 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
0638 } else {
0639
0640
0641
0642
0643 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
0644 if (ret)
0645 goto err_early_kill;
0646 }
0647
0648 set_current_state(state);
0649 trace_contention_begin(lock, LCB_F_MUTEX);
0650 for (;;) {
0651 bool first;
0652
0653
0654
0655
0656
0657
0658
0659 if (__mutex_trylock(lock))
0660 goto acquired;
0661
0662
0663
0664
0665
0666
0667 if (signal_pending_state(state, current)) {
0668 ret = -EINTR;
0669 goto err;
0670 }
0671
0672 if (ww_ctx) {
0673 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
0674 if (ret)
0675 goto err;
0676 }
0677
0678 raw_spin_unlock(&lock->wait_lock);
0679 schedule_preempt_disabled();
0680
0681 first = __mutex_waiter_is_first(lock, &waiter);
0682
0683 set_current_state(state);
0684
0685
0686
0687
0688
0689 if (__mutex_trylock_or_handoff(lock, first))
0690 break;
0691
0692 if (first) {
0693 trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
0694 if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
0695 break;
0696 trace_contention_begin(lock, LCB_F_MUTEX);
0697 }
0698
0699 raw_spin_lock(&lock->wait_lock);
0700 }
0701 raw_spin_lock(&lock->wait_lock);
0702 acquired:
0703 __set_current_state(TASK_RUNNING);
0704
0705 if (ww_ctx) {
0706
0707
0708
0709
0710 if (!ww_ctx->is_wait_die &&
0711 !__mutex_waiter_is_first(lock, &waiter))
0712 __ww_mutex_check_waiters(lock, ww_ctx);
0713 }
0714
0715 __mutex_remove_waiter(lock, &waiter);
0716
0717 debug_mutex_free_waiter(&waiter);
0718
0719 skip_wait:
0720
0721 lock_acquired(&lock->dep_map, ip);
0722 trace_contention_end(lock, 0);
0723
0724 if (ww_ctx)
0725 ww_mutex_lock_acquired(ww, ww_ctx);
0726
0727 raw_spin_unlock(&lock->wait_lock);
0728 preempt_enable();
0729 return 0;
0730
0731 err:
0732 __set_current_state(TASK_RUNNING);
0733 __mutex_remove_waiter(lock, &waiter);
0734 err_early_kill:
0735 trace_contention_end(lock, ret);
0736 raw_spin_unlock(&lock->wait_lock);
0737 debug_mutex_free_waiter(&waiter);
0738 mutex_release(&lock->dep_map, ip);
0739 preempt_enable();
0740 return ret;
0741 }
0742
0743 static int __sched
0744 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
0745 struct lockdep_map *nest_lock, unsigned long ip)
0746 {
0747 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
0748 }
0749
0750 static int __sched
0751 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
0752 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
0753 {
0754 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770 int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
0771 {
0772 if (!ww_ctx)
0773 return mutex_trylock(&ww->base);
0774
0775 MUTEX_WARN_ON(ww->base.magic != &ww->base);
0776
0777
0778
0779
0780
0781
0782 if (ww_ctx->acquired == 0)
0783 ww_ctx->wounded = 0;
0784
0785 if (__mutex_trylock(&ww->base)) {
0786 ww_mutex_set_context_fastpath(ww, ww_ctx);
0787 mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
0788 return 1;
0789 }
0790
0791 return 0;
0792 }
0793 EXPORT_SYMBOL(ww_mutex_trylock);
0794
0795 #ifdef CONFIG_DEBUG_LOCK_ALLOC
0796 void __sched
0797 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
0798 {
0799 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
0800 }
0801
0802 EXPORT_SYMBOL_GPL(mutex_lock_nested);
0803
0804 void __sched
0805 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
0806 {
0807 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
0808 }
0809 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
0810
0811 int __sched
0812 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
0813 {
0814 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
0815 }
0816 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
0817
0818 int __sched
0819 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
0820 {
0821 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
0822 }
0823 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
0824
0825 void __sched
0826 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
0827 {
0828 int token;
0829
0830 might_sleep();
0831
0832 token = io_schedule_prepare();
0833 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
0834 subclass, NULL, _RET_IP_, NULL, 0);
0835 io_schedule_finish(token);
0836 }
0837 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
0838
0839 static inline int
0840 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0841 {
0842 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
0843 unsigned tmp;
0844
0845 if (ctx->deadlock_inject_countdown-- == 0) {
0846 tmp = ctx->deadlock_inject_interval;
0847 if (tmp > UINT_MAX/4)
0848 tmp = UINT_MAX;
0849 else
0850 tmp = tmp*2 + tmp + tmp/2;
0851
0852 ctx->deadlock_inject_interval = tmp;
0853 ctx->deadlock_inject_countdown = tmp;
0854 ctx->contending_lock = lock;
0855
0856 ww_mutex_unlock(lock);
0857
0858 return -EDEADLK;
0859 }
0860 #endif
0861
0862 return 0;
0863 }
0864
0865 int __sched
0866 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0867 {
0868 int ret;
0869
0870 might_sleep();
0871 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
0872 0, _RET_IP_, ctx);
0873 if (!ret && ctx && ctx->acquired > 1)
0874 return ww_mutex_deadlock_injection(lock, ctx);
0875
0876 return ret;
0877 }
0878 EXPORT_SYMBOL_GPL(ww_mutex_lock);
0879
0880 int __sched
0881 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0882 {
0883 int ret;
0884
0885 might_sleep();
0886 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
0887 0, _RET_IP_, ctx);
0888
0889 if (!ret && ctx && ctx->acquired > 1)
0890 return ww_mutex_deadlock_injection(lock, ctx);
0891
0892 return ret;
0893 }
0894 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
0895
0896 #endif
0897
0898
0899
0900
0901 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
0902 {
0903 struct task_struct *next = NULL;
0904 DEFINE_WAKE_Q(wake_q);
0905 unsigned long owner;
0906
0907 mutex_release(&lock->dep_map, ip);
0908
0909
0910
0911
0912
0913
0914
0915
0916 owner = atomic_long_read(&lock->owner);
0917 for (;;) {
0918 MUTEX_WARN_ON(__owner_task(owner) != current);
0919 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
0920
0921 if (owner & MUTEX_FLAG_HANDOFF)
0922 break;
0923
0924 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
0925 if (owner & MUTEX_FLAG_WAITERS)
0926 break;
0927
0928 return;
0929 }
0930 }
0931
0932 raw_spin_lock(&lock->wait_lock);
0933 debug_mutex_unlock(lock);
0934 if (!list_empty(&lock->wait_list)) {
0935
0936 struct mutex_waiter *waiter =
0937 list_first_entry(&lock->wait_list,
0938 struct mutex_waiter, list);
0939
0940 next = waiter->task;
0941
0942 debug_mutex_wake_waiter(lock, waiter);
0943 wake_q_add(&wake_q, next);
0944 }
0945
0946 if (owner & MUTEX_FLAG_HANDOFF)
0947 __mutex_handoff(lock, next);
0948
0949 raw_spin_unlock(&lock->wait_lock);
0950
0951 wake_up_q(&wake_q);
0952 }
0953
0954 #ifndef CONFIG_DEBUG_LOCK_ALLOC
0955
0956
0957
0958
0959 static noinline int __sched
0960 __mutex_lock_killable_slowpath(struct mutex *lock);
0961
0962 static noinline int __sched
0963 __mutex_lock_interruptible_slowpath(struct mutex *lock);
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 int __sched mutex_lock_interruptible(struct mutex *lock)
0978 {
0979 might_sleep();
0980
0981 if (__mutex_trylock_fast(lock))
0982 return 0;
0983
0984 return __mutex_lock_interruptible_slowpath(lock);
0985 }
0986
0987 EXPORT_SYMBOL(mutex_lock_interruptible);
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 int __sched mutex_lock_killable(struct mutex *lock)
1002 {
1003 might_sleep();
1004
1005 if (__mutex_trylock_fast(lock))
1006 return 0;
1007
1008 return __mutex_lock_killable_slowpath(lock);
1009 }
1010 EXPORT_SYMBOL(mutex_lock_killable);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 void __sched mutex_lock_io(struct mutex *lock)
1023 {
1024 int token;
1025
1026 token = io_schedule_prepare();
1027 mutex_lock(lock);
1028 io_schedule_finish(token);
1029 }
1030 EXPORT_SYMBOL_GPL(mutex_lock_io);
1031
1032 static noinline void __sched
1033 __mutex_lock_slowpath(struct mutex *lock)
1034 {
1035 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1036 }
1037
1038 static noinline int __sched
1039 __mutex_lock_killable_slowpath(struct mutex *lock)
1040 {
1041 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1042 }
1043
1044 static noinline int __sched
1045 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1046 {
1047 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1048 }
1049
1050 static noinline int __sched
1051 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1052 {
1053 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
1054 _RET_IP_, ctx);
1055 }
1056
1057 static noinline int __sched
1058 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1059 struct ww_acquire_ctx *ctx)
1060 {
1061 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1062 _RET_IP_, ctx);
1063 }
1064
1065 #endif
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081 int __sched mutex_trylock(struct mutex *lock)
1082 {
1083 bool locked;
1084
1085 MUTEX_WARN_ON(lock->magic != lock);
1086
1087 locked = __mutex_trylock(lock);
1088 if (locked)
1089 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1090
1091 return locked;
1092 }
1093 EXPORT_SYMBOL(mutex_trylock);
1094
1095 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1096 int __sched
1097 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1098 {
1099 might_sleep();
1100
1101 if (__mutex_trylock_fast(&lock->base)) {
1102 if (ctx)
1103 ww_mutex_set_context_fastpath(lock, ctx);
1104 return 0;
1105 }
1106
1107 return __ww_mutex_lock_slowpath(lock, ctx);
1108 }
1109 EXPORT_SYMBOL(ww_mutex_lock);
1110
1111 int __sched
1112 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1113 {
1114 might_sleep();
1115
1116 if (__mutex_trylock_fast(&lock->base)) {
1117 if (ctx)
1118 ww_mutex_set_context_fastpath(lock, ctx);
1119 return 0;
1120 }
1121
1122 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1123 }
1124 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1125
1126 #endif
1127 #endif
1128
1129
1130
1131
1132
1133
1134
1135
1136 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1137 {
1138
1139 if (atomic_add_unless(cnt, -1, 1))
1140 return 0;
1141
1142 mutex_lock(lock);
1143 if (!atomic_dec_and_test(cnt)) {
1144
1145 mutex_unlock(lock);
1146 return 0;
1147 }
1148
1149 return 1;
1150 }
1151 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);