0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/sched.h>
0020 #include <linux/sched/debug.h>
0021 #include <linux/sched/deadline.h>
0022 #include <linux/sched/signal.h>
0023 #include <linux/sched/rt.h>
0024 #include <linux/sched/wake_q.h>
0025 #include <linux/ww_mutex.h>
0026
0027 #include <trace/events/lock.h>
0028
0029 #include "rtmutex_common.h"
0030
0031 #ifndef WW_RT
0032 # define build_ww_mutex() (false)
0033 # define ww_container_of(rtm) NULL
0034
0035 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
0036 struct rt_mutex *lock,
0037 struct ww_acquire_ctx *ww_ctx)
0038 {
0039 return 0;
0040 }
0041
0042 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
0043 struct ww_acquire_ctx *ww_ctx)
0044 {
0045 }
0046
0047 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
0048 struct ww_acquire_ctx *ww_ctx)
0049 {
0050 }
0051
0052 static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
0053 struct rt_mutex_waiter *waiter,
0054 struct ww_acquire_ctx *ww_ctx)
0055 {
0056 return 0;
0057 }
0058
0059 #else
0060 # define build_ww_mutex() (true)
0061 # define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
0062 # include "ww_mutex.h"
0063 #endif
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static __always_inline void
0093 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
0094 {
0095 unsigned long val = (unsigned long)owner;
0096
0097 if (rt_mutex_has_waiters(lock))
0098 val |= RT_MUTEX_HAS_WAITERS;
0099
0100 WRITE_ONCE(lock->owner, (struct task_struct *)val);
0101 }
0102
0103 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
0104 {
0105 lock->owner = (struct task_struct *)
0106 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
0107 }
0108
0109 static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
0110 {
0111 unsigned long owner, *p = (unsigned long *) &lock->owner;
0112
0113 if (rt_mutex_has_waiters(lock))
0114 return;
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 owner = READ_ONCE(*p);
0175 if (owner & RT_MUTEX_HAS_WAITERS)
0176 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
0177 }
0178
0179
0180
0181
0182
0183 #ifndef CONFIG_DEBUG_RT_MUTEXES
0184 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
0185 struct task_struct *old,
0186 struct task_struct *new)
0187 {
0188 return try_cmpxchg_acquire(&lock->owner, &old, new);
0189 }
0190
0191 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
0192 struct task_struct *old,
0193 struct task_struct *new)
0194 {
0195 return try_cmpxchg_release(&lock->owner, &old, new);
0196 }
0197
0198
0199
0200
0201
0202
0203 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
0204 {
0205 unsigned long owner, *p = (unsigned long *) &lock->owner;
0206
0207 do {
0208 owner = *p;
0209 } while (cmpxchg_relaxed(p, owner,
0210 owner | RT_MUTEX_HAS_WAITERS) != owner);
0211 }
0212
0213
0214
0215
0216
0217
0218
0219 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
0220 unsigned long flags)
0221 __releases(lock->wait_lock)
0222 {
0223 struct task_struct *owner = rt_mutex_owner(lock);
0224
0225 clear_rt_mutex_waiters(lock);
0226 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 return rt_mutex_cmpxchg_release(lock, owner, NULL);
0252 }
0253
0254 #else
0255 static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
0256 struct task_struct *old,
0257 struct task_struct *new)
0258 {
0259 return false;
0260
0261 }
0262
0263 static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
0264 struct task_struct *old,
0265 struct task_struct *new)
0266 {
0267 return false;
0268 }
0269
0270 static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
0271 {
0272 lock->owner = (struct task_struct *)
0273 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
0274 }
0275
0276
0277
0278
0279 static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
0280 unsigned long flags)
0281 __releases(lock->wait_lock)
0282 {
0283 lock->owner = NULL;
0284 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
0285 return true;
0286 }
0287 #endif
0288
0289 static __always_inline int __waiter_prio(struct task_struct *task)
0290 {
0291 int prio = task->prio;
0292
0293 if (!rt_prio(prio))
0294 return DEFAULT_PRIO;
0295
0296 return prio;
0297 }
0298
0299 static __always_inline void
0300 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
0301 {
0302 waiter->prio = __waiter_prio(task);
0303 waiter->deadline = task->dl.deadline;
0304 }
0305
0306
0307
0308
0309 #define task_to_waiter(p) \
0310 &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
0311
0312 static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
0313 struct rt_mutex_waiter *right)
0314 {
0315 if (left->prio < right->prio)
0316 return 1;
0317
0318
0319
0320
0321
0322
0323
0324 if (dl_prio(left->prio))
0325 return dl_time_before(left->deadline, right->deadline);
0326
0327 return 0;
0328 }
0329
0330 static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
0331 struct rt_mutex_waiter *right)
0332 {
0333 if (left->prio != right->prio)
0334 return 0;
0335
0336
0337
0338
0339
0340
0341
0342 if (dl_prio(left->prio))
0343 return left->deadline == right->deadline;
0344
0345 return 1;
0346 }
0347
0348 static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
0349 struct rt_mutex_waiter *top_waiter)
0350 {
0351 if (rt_mutex_waiter_less(waiter, top_waiter))
0352 return true;
0353
0354 #ifdef RT_MUTEX_BUILD_SPINLOCKS
0355
0356
0357
0358
0359 if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
0360 return false;
0361
0362 return rt_mutex_waiter_equal(waiter, top_waiter);
0363 #else
0364 return false;
0365 #endif
0366 }
0367
0368 #define __node_2_waiter(node) \
0369 rb_entry((node), struct rt_mutex_waiter, tree_entry)
0370
0371 static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
0372 {
0373 struct rt_mutex_waiter *aw = __node_2_waiter(a);
0374 struct rt_mutex_waiter *bw = __node_2_waiter(b);
0375
0376 if (rt_mutex_waiter_less(aw, bw))
0377 return 1;
0378
0379 if (!build_ww_mutex())
0380 return 0;
0381
0382 if (rt_mutex_waiter_less(bw, aw))
0383 return 0;
0384
0385
0386 if (aw->ww_ctx) {
0387 if (!bw->ww_ctx)
0388 return 1;
0389
0390 return (signed long)(aw->ww_ctx->stamp -
0391 bw->ww_ctx->stamp) < 0;
0392 }
0393
0394 return 0;
0395 }
0396
0397 static __always_inline void
0398 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
0399 {
0400 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
0401 }
0402
0403 static __always_inline void
0404 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
0405 {
0406 if (RB_EMPTY_NODE(&waiter->tree_entry))
0407 return;
0408
0409 rb_erase_cached(&waiter->tree_entry, &lock->waiters);
0410 RB_CLEAR_NODE(&waiter->tree_entry);
0411 }
0412
0413 #define __node_2_pi_waiter(node) \
0414 rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
0415
0416 static __always_inline bool
0417 __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
0418 {
0419 return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
0420 }
0421
0422 static __always_inline void
0423 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
0424 {
0425 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
0426 }
0427
0428 static __always_inline void
0429 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
0430 {
0431 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
0432 return;
0433
0434 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
0435 RB_CLEAR_NODE(&waiter->pi_tree_entry);
0436 }
0437
0438 static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
0439 {
0440 struct task_struct *pi_task = NULL;
0441
0442 lockdep_assert_held(&p->pi_lock);
0443
0444 if (task_has_pi_waiters(p))
0445 pi_task = task_top_pi_waiter(p)->task;
0446
0447 rt_mutex_setprio(p, pi_task);
0448 }
0449
0450
0451 static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
0452 struct task_struct *task,
0453 unsigned int wake_state)
0454 {
0455 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wake_state == TASK_RTLOCK_WAIT) {
0456 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
0457 WARN_ON_ONCE(wqh->rtlock_task);
0458 get_task_struct(task);
0459 wqh->rtlock_task = task;
0460 } else {
0461 wake_q_add(&wqh->head, task);
0462 }
0463 }
0464
0465 static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
0466 struct rt_mutex_waiter *w)
0467 {
0468 rt_mutex_wake_q_add_task(wqh, w->task, w->wake_state);
0469 }
0470
0471 static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
0472 {
0473 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
0474 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
0475 put_task_struct(wqh->rtlock_task);
0476 wqh->rtlock_task = NULL;
0477 }
0478
0479 if (!wake_q_empty(&wqh->head))
0480 wake_up_q(&wqh->head);
0481
0482
0483 preempt_enable();
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 static __always_inline bool
0500 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
0501 enum rtmutex_chainwalk chwalk)
0502 {
0503 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
0504 return waiter != NULL;
0505 return chwalk == RT_MUTEX_FULL_CHAINWALK;
0506 }
0507
0508 static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
0509 {
0510 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
0577 enum rtmutex_chainwalk chwalk,
0578 struct rt_mutex_base *orig_lock,
0579 struct rt_mutex_base *next_lock,
0580 struct rt_mutex_waiter *orig_waiter,
0581 struct task_struct *top_task)
0582 {
0583 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
0584 struct rt_mutex_waiter *prerequeue_top_waiter;
0585 int ret = 0, depth = 0;
0586 struct rt_mutex_base *lock;
0587 bool detect_deadlock;
0588 bool requeue = true;
0589
0590 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
0591
0592
0593
0594
0595
0596
0597
0598 again:
0599
0600
0601
0602 if (++depth > max_lock_depth) {
0603 static int prev_max;
0604
0605
0606
0607
0608
0609 if (prev_max != max_lock_depth) {
0610 prev_max = max_lock_depth;
0611 printk(KERN_WARNING "Maximum lock depth %d reached "
0612 "task: %s (%d)\n", max_lock_depth,
0613 top_task->comm, task_pid_nr(top_task));
0614 }
0615 put_task_struct(task);
0616
0617 return -EDEADLK;
0618 }
0619
0620
0621
0622
0623
0624
0625
0626 retry:
0627
0628
0629
0630 raw_spin_lock_irq(&task->pi_lock);
0631
0632
0633
0634
0635 waiter = task->pi_blocked_on;
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 if (!waiter)
0647 goto out_unlock_pi;
0648
0649
0650
0651
0652
0653 if (orig_waiter && !rt_mutex_owner(orig_lock))
0654 goto out_unlock_pi;
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665 if (next_lock != waiter->lock)
0666 goto out_unlock_pi;
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
0691 detect_deadlock = false;
0692
0693
0694
0695
0696
0697
0698 if (top_waiter) {
0699 if (!task_has_pi_waiters(task))
0700 goto out_unlock_pi;
0701
0702
0703
0704
0705
0706
0707 if (top_waiter != task_top_pi_waiter(task)) {
0708 if (!detect_deadlock)
0709 goto out_unlock_pi;
0710 else
0711 requeue = false;
0712 }
0713 }
0714
0715
0716
0717
0718
0719
0720
0721
0722 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
0723 if (!detect_deadlock)
0724 goto out_unlock_pi;
0725 else
0726 requeue = false;
0727 }
0728
0729
0730
0731
0732 lock = waiter->lock;
0733
0734
0735
0736
0737
0738 if (!raw_spin_trylock(&lock->wait_lock)) {
0739 raw_spin_unlock_irq(&task->pi_lock);
0740 cpu_relax();
0741 goto retry;
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
0754 ret = -EDEADLK;
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
0766 ret = 0;
0767
0768 raw_spin_unlock(&lock->wait_lock);
0769 goto out_unlock_pi;
0770 }
0771
0772
0773
0774
0775
0776
0777
0778 if (!requeue) {
0779
0780
0781
0782 raw_spin_unlock(&task->pi_lock);
0783 put_task_struct(task);
0784
0785
0786
0787
0788
0789 if (!rt_mutex_owner(lock)) {
0790 raw_spin_unlock_irq(&lock->wait_lock);
0791 return 0;
0792 }
0793
0794
0795 task = get_task_struct(rt_mutex_owner(lock));
0796 raw_spin_lock(&task->pi_lock);
0797
0798
0799
0800
0801
0802
0803
0804 next_lock = task_blocked_on_lock(task);
0805
0806
0807
0808 top_waiter = rt_mutex_top_waiter(lock);
0809
0810
0811 raw_spin_unlock(&task->pi_lock);
0812 raw_spin_unlock_irq(&lock->wait_lock);
0813
0814
0815 if (!next_lock)
0816 goto out_put_task;
0817 goto again;
0818 }
0819
0820
0821
0822
0823
0824
0825 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
0826
0827
0828 rt_mutex_dequeue(lock, waiter);
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846 waiter_update_prio(waiter, task);
0847
0848 rt_mutex_enqueue(lock, waiter);
0849
0850
0851 raw_spin_unlock(&task->pi_lock);
0852 put_task_struct(task);
0853
0854
0855
0856
0857
0858
0859
0860
0861 if (!rt_mutex_owner(lock)) {
0862
0863
0864
0865
0866
0867 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
0868 wake_up_state(waiter->task, waiter->wake_state);
0869 raw_spin_unlock_irq(&lock->wait_lock);
0870 return 0;
0871 }
0872
0873
0874 task = get_task_struct(rt_mutex_owner(lock));
0875 raw_spin_lock(&task->pi_lock);
0876
0877
0878 if (waiter == rt_mutex_top_waiter(lock)) {
0879
0880
0881
0882
0883
0884
0885 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
0886 rt_mutex_enqueue_pi(task, waiter);
0887 rt_mutex_adjust_prio(task);
0888
0889 } else if (prerequeue_top_waiter == waiter) {
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 rt_mutex_dequeue_pi(task, waiter);
0901 waiter = rt_mutex_top_waiter(lock);
0902 rt_mutex_enqueue_pi(task, waiter);
0903 rt_mutex_adjust_prio(task);
0904 } else {
0905
0906
0907
0908
0909 }
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921 next_lock = task_blocked_on_lock(task);
0922
0923
0924
0925
0926 top_waiter = rt_mutex_top_waiter(lock);
0927
0928
0929 raw_spin_unlock(&task->pi_lock);
0930 raw_spin_unlock_irq(&lock->wait_lock);
0931
0932
0933
0934
0935
0936
0937
0938
0939 if (!next_lock)
0940 goto out_put_task;
0941
0942
0943
0944
0945
0946
0947 if (!detect_deadlock && waiter != top_waiter)
0948 goto out_put_task;
0949
0950 goto again;
0951
0952 out_unlock_pi:
0953 raw_spin_unlock_irq(&task->pi_lock);
0954 out_put_task:
0955 put_task_struct(task);
0956
0957 return ret;
0958 }
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970 static int __sched
0971 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
0972 struct rt_mutex_waiter *waiter)
0973 {
0974 lockdep_assert_held(&lock->wait_lock);
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 mark_rt_mutex_waiters(lock);
0994
0995
0996
0997
0998 if (rt_mutex_owner(lock))
0999 return 0;
1000
1001
1002
1003
1004
1005
1006 if (waiter) {
1007 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
1008
1009
1010
1011
1012
1013 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1014
1015
1016
1017
1018 rt_mutex_dequeue(lock, waiter);
1019 } else {
1020 return 0;
1021 }
1022 } else {
1023
1024
1025
1026
1027
1028
1029
1030
1031 if (rt_mutex_has_waiters(lock)) {
1032
1033 if (!rt_mutex_steal(task_to_waiter(task),
1034 rt_mutex_top_waiter(lock)))
1035 return 0;
1036
1037
1038
1039
1040
1041
1042 } else {
1043
1044
1045
1046
1047
1048
1049 goto takeit;
1050 }
1051 }
1052
1053
1054
1055
1056
1057
1058
1059 raw_spin_lock(&task->pi_lock);
1060 task->pi_blocked_on = NULL;
1061
1062
1063
1064
1065
1066 if (rt_mutex_has_waiters(lock))
1067 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
1068 raw_spin_unlock(&task->pi_lock);
1069
1070 takeit:
1071
1072
1073
1074
1075 rt_mutex_set_owner(lock, task);
1076
1077 return 1;
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087 static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
1088 struct rt_mutex_waiter *waiter,
1089 struct task_struct *task,
1090 struct ww_acquire_ctx *ww_ctx,
1091 enum rtmutex_chainwalk chwalk)
1092 {
1093 struct task_struct *owner = rt_mutex_owner(lock);
1094 struct rt_mutex_waiter *top_waiter = waiter;
1095 struct rt_mutex_base *next_lock;
1096 int chain_walk = 0, res;
1097
1098 lockdep_assert_held(&lock->wait_lock);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 if (owner == task && !(build_ww_mutex() && ww_ctx))
1113 return -EDEADLK;
1114
1115 raw_spin_lock(&task->pi_lock);
1116 waiter->task = task;
1117 waiter->lock = lock;
1118 waiter_update_prio(waiter, task);
1119
1120
1121 if (rt_mutex_has_waiters(lock))
1122 top_waiter = rt_mutex_top_waiter(lock);
1123 rt_mutex_enqueue(lock, waiter);
1124
1125 task->pi_blocked_on = waiter;
1126
1127 raw_spin_unlock(&task->pi_lock);
1128
1129 if (build_ww_mutex() && ww_ctx) {
1130 struct rt_mutex *rtm;
1131
1132
1133 rtm = container_of(lock, struct rt_mutex, rtmutex);
1134 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
1135 if (res) {
1136 raw_spin_lock(&task->pi_lock);
1137 rt_mutex_dequeue(lock, waiter);
1138 task->pi_blocked_on = NULL;
1139 raw_spin_unlock(&task->pi_lock);
1140 return res;
1141 }
1142 }
1143
1144 if (!owner)
1145 return 0;
1146
1147 raw_spin_lock(&owner->pi_lock);
1148 if (waiter == rt_mutex_top_waiter(lock)) {
1149 rt_mutex_dequeue_pi(owner, top_waiter);
1150 rt_mutex_enqueue_pi(owner, waiter);
1151
1152 rt_mutex_adjust_prio(owner);
1153 if (owner->pi_blocked_on)
1154 chain_walk = 1;
1155 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1156 chain_walk = 1;
1157 }
1158
1159
1160 next_lock = task_blocked_on_lock(owner);
1161
1162 raw_spin_unlock(&owner->pi_lock);
1163
1164
1165
1166
1167
1168 if (!chain_walk || !next_lock)
1169 return 0;
1170
1171
1172
1173
1174
1175
1176 get_task_struct(owner);
1177
1178 raw_spin_unlock_irq(&lock->wait_lock);
1179
1180 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1181 next_lock, waiter, task);
1182
1183 raw_spin_lock_irq(&lock->wait_lock);
1184
1185 return res;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194 static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
1195 struct rt_mutex_base *lock)
1196 {
1197 struct rt_mutex_waiter *waiter;
1198
1199 raw_spin_lock(¤t->pi_lock);
1200
1201 waiter = rt_mutex_top_waiter(lock);
1202
1203
1204
1205
1206
1207
1208
1209
1210 rt_mutex_dequeue_pi(current, waiter);
1211 rt_mutex_adjust_prio(current);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233 preempt_disable();
1234 rt_mutex_wake_q_add(wqh, waiter);
1235 raw_spin_unlock(¤t->pi_lock);
1236 }
1237
1238 static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1239 {
1240 int ret = try_to_take_rt_mutex(lock, current, NULL);
1241
1242
1243
1244
1245
1246 fixup_rt_mutex_waiters(lock);
1247
1248 return ret;
1249 }
1250
1251
1252
1253
1254 static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1255 {
1256 unsigned long flags;
1257 int ret;
1258
1259
1260
1261
1262
1263
1264 if (rt_mutex_owner(lock))
1265 return 0;
1266
1267
1268
1269
1270
1271 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1272
1273 ret = __rt_mutex_slowtrylock(lock);
1274
1275 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1276
1277 return ret;
1278 }
1279
1280 static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
1281 {
1282 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1283 return 1;
1284
1285 return rt_mutex_slowtrylock(lock);
1286 }
1287
1288
1289
1290
1291 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
1292 {
1293 DEFINE_RT_WAKE_Q(wqh);
1294 unsigned long flags;
1295
1296
1297 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1298
1299 debug_rt_mutex_unlock(lock);
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 while (!rt_mutex_has_waiters(lock)) {
1333
1334 if (unlock_rt_mutex_safe(lock, flags) == true)
1335 return;
1336
1337 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1338 }
1339
1340
1341
1342
1343
1344
1345
1346 mark_wakeup_next_waiter(&wqh, lock);
1347 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1348
1349 rt_mutex_wake_up_q(&wqh);
1350 }
1351
1352 static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
1353 {
1354 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1355 return;
1356
1357 rt_mutex_slowunlock(lock);
1358 }
1359
1360 #ifdef CONFIG_SMP
1361 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1362 struct rt_mutex_waiter *waiter,
1363 struct task_struct *owner)
1364 {
1365 bool res = true;
1366
1367 rcu_read_lock();
1368 for (;;) {
1369
1370 if (owner != rt_mutex_owner(lock))
1371 break;
1372
1373
1374
1375
1376
1377
1378 barrier();
1379
1380
1381
1382
1383
1384
1385
1386
1387 if (!owner_on_cpu(owner) || need_resched() ||
1388 !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
1389 res = false;
1390 break;
1391 }
1392 cpu_relax();
1393 }
1394 rcu_read_unlock();
1395 return res;
1396 }
1397 #else
1398 static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1399 struct rt_mutex_waiter *waiter,
1400 struct task_struct *owner)
1401 {
1402 return false;
1403 }
1404 #endif
1405
1406 #ifdef RT_MUTEX_BUILD_MUTEX
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 static void __sched remove_waiter(struct rt_mutex_base *lock,
1420 struct rt_mutex_waiter *waiter)
1421 {
1422 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1423 struct task_struct *owner = rt_mutex_owner(lock);
1424 struct rt_mutex_base *next_lock;
1425
1426 lockdep_assert_held(&lock->wait_lock);
1427
1428 raw_spin_lock(¤t->pi_lock);
1429 rt_mutex_dequeue(lock, waiter);
1430 current->pi_blocked_on = NULL;
1431 raw_spin_unlock(¤t->pi_lock);
1432
1433
1434
1435
1436
1437 if (!owner || !is_top_waiter)
1438 return;
1439
1440 raw_spin_lock(&owner->pi_lock);
1441
1442 rt_mutex_dequeue_pi(owner, waiter);
1443
1444 if (rt_mutex_has_waiters(lock))
1445 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1446
1447 rt_mutex_adjust_prio(owner);
1448
1449
1450 next_lock = task_blocked_on_lock(owner);
1451
1452 raw_spin_unlock(&owner->pi_lock);
1453
1454
1455
1456
1457
1458 if (!next_lock)
1459 return;
1460
1461
1462 get_task_struct(owner);
1463
1464 raw_spin_unlock_irq(&lock->wait_lock);
1465
1466 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1467 next_lock, NULL, current);
1468
1469 raw_spin_lock_irq(&lock->wait_lock);
1470 }
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
1484 struct ww_acquire_ctx *ww_ctx,
1485 unsigned int state,
1486 struct hrtimer_sleeper *timeout,
1487 struct rt_mutex_waiter *waiter)
1488 {
1489 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1490 struct task_struct *owner;
1491 int ret = 0;
1492
1493 for (;;) {
1494
1495 if (try_to_take_rt_mutex(lock, current, waiter))
1496 break;
1497
1498 if (timeout && !timeout->task) {
1499 ret = -ETIMEDOUT;
1500 break;
1501 }
1502 if (signal_pending_state(state, current)) {
1503 ret = -EINTR;
1504 break;
1505 }
1506
1507 if (build_ww_mutex() && ww_ctx) {
1508 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1509 if (ret)
1510 break;
1511 }
1512
1513 if (waiter == rt_mutex_top_waiter(lock))
1514 owner = rt_mutex_owner(lock);
1515 else
1516 owner = NULL;
1517 raw_spin_unlock_irq(&lock->wait_lock);
1518
1519 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1520 schedule();
1521
1522 raw_spin_lock_irq(&lock->wait_lock);
1523 set_current_state(state);
1524 }
1525
1526 __set_current_state(TASK_RUNNING);
1527 return ret;
1528 }
1529
1530 static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
1531 struct rt_mutex_waiter *w)
1532 {
1533
1534
1535
1536
1537 if (res != -EDEADLOCK || detect_deadlock)
1538 return;
1539
1540 if (build_ww_mutex() && w->ww_ctx)
1541 return;
1542
1543
1544
1545
1546 WARN(1, "rtmutex deadlock detected\n");
1547 while (1) {
1548 set_current_state(TASK_INTERRUPTIBLE);
1549 schedule();
1550 }
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
1562 struct ww_acquire_ctx *ww_ctx,
1563 unsigned int state,
1564 enum rtmutex_chainwalk chwalk,
1565 struct rt_mutex_waiter *waiter)
1566 {
1567 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1568 struct ww_mutex *ww = ww_container_of(rtm);
1569 int ret;
1570
1571 lockdep_assert_held(&lock->wait_lock);
1572
1573
1574 if (try_to_take_rt_mutex(lock, current, NULL)) {
1575 if (build_ww_mutex() && ww_ctx) {
1576 __ww_mutex_check_waiters(rtm, ww_ctx);
1577 ww_mutex_lock_acquired(ww, ww_ctx);
1578 }
1579 return 0;
1580 }
1581
1582 set_current_state(state);
1583
1584 trace_contention_begin(lock, LCB_F_RT);
1585
1586 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
1587 if (likely(!ret))
1588 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
1589
1590 if (likely(!ret)) {
1591
1592 if (build_ww_mutex() && ww_ctx) {
1593 if (!ww_ctx->is_wait_die)
1594 __ww_mutex_check_waiters(rtm, ww_ctx);
1595 ww_mutex_lock_acquired(ww, ww_ctx);
1596 }
1597 } else {
1598 __set_current_state(TASK_RUNNING);
1599 remove_waiter(lock, waiter);
1600 rt_mutex_handle_deadlock(ret, chwalk, waiter);
1601 }
1602
1603
1604
1605
1606
1607 fixup_rt_mutex_waiters(lock);
1608
1609 trace_contention_end(lock, ret);
1610
1611 return ret;
1612 }
1613
1614 static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
1615 struct ww_acquire_ctx *ww_ctx,
1616 unsigned int state)
1617 {
1618 struct rt_mutex_waiter waiter;
1619 int ret;
1620
1621 rt_mutex_init_waiter(&waiter);
1622 waiter.ww_ctx = ww_ctx;
1623
1624 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
1625 &waiter);
1626
1627 debug_rt_mutex_free_waiter(&waiter);
1628 return ret;
1629 }
1630
1631
1632
1633
1634
1635
1636
1637 static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
1638 struct ww_acquire_ctx *ww_ctx,
1639 unsigned int state)
1640 {
1641 unsigned long flags;
1642 int ret;
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1653 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
1654 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1655
1656 return ret;
1657 }
1658
1659 static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1660 unsigned int state)
1661 {
1662 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1663 return 0;
1664
1665 return rt_mutex_slowlock(lock, NULL, state);
1666 }
1667 #endif
1668
1669 #ifdef RT_MUTEX_BUILD_SPINLOCKS
1670
1671
1672
1673
1674
1675
1676
1677
1678 static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
1679 {
1680 struct rt_mutex_waiter waiter;
1681 struct task_struct *owner;
1682
1683 lockdep_assert_held(&lock->wait_lock);
1684
1685 if (try_to_take_rt_mutex(lock, current, NULL))
1686 return;
1687
1688 rt_mutex_init_rtlock_waiter(&waiter);
1689
1690
1691 current_save_and_set_rtlock_wait_state();
1692
1693 trace_contention_begin(lock, LCB_F_RT);
1694
1695 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
1696
1697 for (;;) {
1698
1699 if (try_to_take_rt_mutex(lock, current, &waiter))
1700 break;
1701
1702 if (&waiter == rt_mutex_top_waiter(lock))
1703 owner = rt_mutex_owner(lock);
1704 else
1705 owner = NULL;
1706 raw_spin_unlock_irq(&lock->wait_lock);
1707
1708 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1709 schedule_rtlock();
1710
1711 raw_spin_lock_irq(&lock->wait_lock);
1712 set_current_state(TASK_RTLOCK_WAIT);
1713 }
1714
1715
1716 current_restore_rtlock_saved_state();
1717
1718
1719
1720
1721
1722 fixup_rt_mutex_waiters(lock);
1723 debug_rt_mutex_free_waiter(&waiter);
1724
1725 trace_contention_end(lock, 0);
1726 }
1727
1728 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
1729 {
1730 unsigned long flags;
1731
1732 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1733 rtlock_slowlock_locked(lock);
1734 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1735 }
1736
1737 #endif