0001
0002
0003 #ifndef WW_RT
0004
0005 #define MUTEX mutex
0006 #define MUTEX_WAITER mutex_waiter
0007
0008 static inline struct mutex_waiter *
0009 __ww_waiter_first(struct mutex *lock)
0010 {
0011 struct mutex_waiter *w;
0012
0013 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
0014 if (list_entry_is_head(w, &lock->wait_list, list))
0015 return NULL;
0016
0017 return w;
0018 }
0019
0020 static inline struct mutex_waiter *
0021 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
0022 {
0023 w = list_next_entry(w, list);
0024 if (list_entry_is_head(w, &lock->wait_list, list))
0025 return NULL;
0026
0027 return w;
0028 }
0029
0030 static inline struct mutex_waiter *
0031 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
0032 {
0033 w = list_prev_entry(w, list);
0034 if (list_entry_is_head(w, &lock->wait_list, list))
0035 return NULL;
0036
0037 return w;
0038 }
0039
0040 static inline struct mutex_waiter *
0041 __ww_waiter_last(struct mutex *lock)
0042 {
0043 struct mutex_waiter *w;
0044
0045 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
0046 if (list_entry_is_head(w, &lock->wait_list, list))
0047 return NULL;
0048
0049 return w;
0050 }
0051
0052 static inline void
0053 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
0054 {
0055 struct list_head *p = &lock->wait_list;
0056 if (pos)
0057 p = &pos->list;
0058 __mutex_add_waiter(lock, waiter, p);
0059 }
0060
0061 static inline struct task_struct *
0062 __ww_mutex_owner(struct mutex *lock)
0063 {
0064 return __mutex_owner(lock);
0065 }
0066
0067 static inline bool
0068 __ww_mutex_has_waiters(struct mutex *lock)
0069 {
0070 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
0071 }
0072
0073 static inline void lock_wait_lock(struct mutex *lock)
0074 {
0075 raw_spin_lock(&lock->wait_lock);
0076 }
0077
0078 static inline void unlock_wait_lock(struct mutex *lock)
0079 {
0080 raw_spin_unlock(&lock->wait_lock);
0081 }
0082
0083 static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
0084 {
0085 lockdep_assert_held(&lock->wait_lock);
0086 }
0087
0088 #else
0089
0090 #define MUTEX rt_mutex
0091 #define MUTEX_WAITER rt_mutex_waiter
0092
0093 static inline struct rt_mutex_waiter *
0094 __ww_waiter_first(struct rt_mutex *lock)
0095 {
0096 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
0097 if (!n)
0098 return NULL;
0099 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0100 }
0101
0102 static inline struct rt_mutex_waiter *
0103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
0104 {
0105 struct rb_node *n = rb_next(&w->tree_entry);
0106 if (!n)
0107 return NULL;
0108 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0109 }
0110
0111 static inline struct rt_mutex_waiter *
0112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
0113 {
0114 struct rb_node *n = rb_prev(&w->tree_entry);
0115 if (!n)
0116 return NULL;
0117 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0118 }
0119
0120 static inline struct rt_mutex_waiter *
0121 __ww_waiter_last(struct rt_mutex *lock)
0122 {
0123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
0124 if (!n)
0125 return NULL;
0126 return rb_entry(n, struct rt_mutex_waiter, tree_entry);
0127 }
0128
0129 static inline void
0130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
0131 {
0132
0133 }
0134
0135 static inline struct task_struct *
0136 __ww_mutex_owner(struct rt_mutex *lock)
0137 {
0138 return rt_mutex_owner(&lock->rtmutex);
0139 }
0140
0141 static inline bool
0142 __ww_mutex_has_waiters(struct rt_mutex *lock)
0143 {
0144 return rt_mutex_has_waiters(&lock->rtmutex);
0145 }
0146
0147 static inline void lock_wait_lock(struct rt_mutex *lock)
0148 {
0149 raw_spin_lock(&lock->rtmutex.wait_lock);
0150 }
0151
0152 static inline void unlock_wait_lock(struct rt_mutex *lock)
0153 {
0154 raw_spin_unlock(&lock->rtmutex.wait_lock);
0155 }
0156
0157 static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
0158 {
0159 lockdep_assert_held(&lock->rtmutex.wait_lock);
0160 }
0161
0162 #endif
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static __always_inline void
0181 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
0182 {
0183 #ifdef DEBUG_WW_MUTEXES
0184
0185
0186
0187
0188
0189
0190 DEBUG_LOCKS_WARN_ON(ww->ctx);
0191
0192
0193
0194
0195 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
0196
0197 if (ww_ctx->contending_lock) {
0198
0199
0200
0201
0202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
0203
0204
0205
0206
0207
0208 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
0209 ww_ctx->contending_lock = NULL;
0210 }
0211
0212
0213
0214
0215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
0216 #endif
0217 ww_ctx->acquired++;
0218 ww->ctx = ww_ctx;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227 static inline bool
0228 __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
0229 {
0230
0231
0232
0233
0234
0235 #ifdef WW_RT
0236
0237 int a_prio = a->task->prio;
0238 int b_prio = b->task->prio;
0239
0240 if (rt_prio(a_prio) || rt_prio(b_prio)) {
0241
0242 if (a_prio > b_prio)
0243 return true;
0244
0245 if (a_prio < b_prio)
0246 return false;
0247
0248
0249
0250 if (dl_prio(a_prio)) {
0251 if (dl_time_before(b->task->dl.deadline,
0252 a->task->dl.deadline))
0253 return true;
0254
0255 if (dl_time_before(a->task->dl.deadline,
0256 b->task->dl.deadline))
0257 return false;
0258 }
0259
0260
0261 }
0262 #endif
0263
0264
0265 return (signed long)(a->stamp - b->stamp) > 0;
0266 }
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 static bool
0277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
0278 struct ww_acquire_ctx *ww_ctx)
0279 {
0280 if (!ww_ctx->is_wait_die)
0281 return false;
0282
0283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
0284 #ifndef WW_RT
0285 debug_mutex_wake_waiter(lock, waiter);
0286 #endif
0287 wake_up_process(waiter->task);
0288 }
0289
0290 return true;
0291 }
0292
0293
0294
0295
0296
0297
0298
0299
0300 static bool __ww_mutex_wound(struct MUTEX *lock,
0301 struct ww_acquire_ctx *ww_ctx,
0302 struct ww_acquire_ctx *hold_ctx)
0303 {
0304 struct task_struct *owner = __ww_mutex_owner(lock);
0305
0306 lockdep_assert_wait_lock_held(lock);
0307
0308
0309
0310
0311
0312
0313 if (!hold_ctx)
0314 return false;
0315
0316
0317
0318
0319
0320
0321 if (!owner)
0322 return false;
0323
0324 if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
0325 hold_ctx->wounded = 1;
0326
0327
0328
0329
0330
0331
0332
0333 if (owner != current)
0334 wake_up_process(owner);
0335
0336 return true;
0337 }
0338
0339 return false;
0340 }
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354 static void
0355 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
0356 {
0357 struct MUTEX_WAITER *cur;
0358
0359 lockdep_assert_wait_lock_held(lock);
0360
0361 for (cur = __ww_waiter_first(lock); cur;
0362 cur = __ww_waiter_next(lock, cur)) {
0363
0364 if (!cur->ww_ctx)
0365 continue;
0366
0367 if (__ww_mutex_die(lock, cur, ww_ctx) ||
0368 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
0369 break;
0370 }
0371 }
0372
0373
0374
0375
0376
0377 static __always_inline void
0378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
0379 {
0380 ww_mutex_lock_acquired(lock, ctx);
0381
0382
0383
0384
0385
0386
0387
0388
0389 smp_mb();
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 if (likely(!__ww_mutex_has_waiters(&lock->base)))
0401 return;
0402
0403
0404
0405
0406
0407 lock_wait_lock(&lock->base);
0408 __ww_mutex_check_waiters(&lock->base, ctx);
0409 unlock_wait_lock(&lock->base);
0410 }
0411
0412 static __always_inline int
0413 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
0414 {
0415 if (ww_ctx->acquired > 0) {
0416 #ifdef DEBUG_WW_MUTEXES
0417 struct ww_mutex *ww;
0418
0419 ww = container_of(lock, struct ww_mutex, base);
0420 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
0421 ww_ctx->contending_lock = ww;
0422 #endif
0423 return -EDEADLK;
0424 }
0425
0426 return 0;
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static inline int
0441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
0442 struct ww_acquire_ctx *ctx)
0443 {
0444 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
0445 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
0446 struct MUTEX_WAITER *cur;
0447
0448 if (ctx->acquired == 0)
0449 return 0;
0450
0451 if (!ctx->is_wait_die) {
0452 if (ctx->wounded)
0453 return __ww_mutex_kill(lock, ctx);
0454
0455 return 0;
0456 }
0457
0458 if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
0459 return __ww_mutex_kill(lock, ctx);
0460
0461
0462
0463
0464
0465 for (cur = __ww_waiter_prev(lock, waiter); cur;
0466 cur = __ww_waiter_prev(lock, cur)) {
0467
0468 if (!cur->ww_ctx)
0469 continue;
0470
0471 return __ww_mutex_kill(lock, ctx);
0472 }
0473
0474 return 0;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 static inline int
0489 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
0490 struct MUTEX *lock,
0491 struct ww_acquire_ctx *ww_ctx)
0492 {
0493 struct MUTEX_WAITER *cur, *pos = NULL;
0494 bool is_wait_die;
0495
0496 if (!ww_ctx) {
0497 __ww_waiter_add(lock, waiter, NULL);
0498 return 0;
0499 }
0500
0501 is_wait_die = ww_ctx->is_wait_die;
0502
0503
0504
0505
0506
0507
0508
0509
0510 for (cur = __ww_waiter_last(lock); cur;
0511 cur = __ww_waiter_prev(lock, cur)) {
0512
0513 if (!cur->ww_ctx)
0514 continue;
0515
0516 if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
0517
0518
0519
0520
0521
0522 if (is_wait_die) {
0523 int ret = __ww_mutex_kill(lock, ww_ctx);
0524
0525 if (ret)
0526 return ret;
0527 }
0528
0529 break;
0530 }
0531
0532 pos = cur;
0533
0534
0535 __ww_mutex_die(lock, cur, ww_ctx);
0536 }
0537
0538 __ww_waiter_add(lock, waiter, pos);
0539
0540
0541
0542
0543
0544 if (!is_wait_die) {
0545 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
0546
0547
0548
0549
0550
0551
0552 smp_mb();
0553 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
0554 }
0555
0556 return 0;
0557 }
0558
0559 static inline void __ww_mutex_unlock(struct ww_mutex *lock)
0560 {
0561 if (lock->ctx) {
0562 #ifdef DEBUG_WW_MUTEXES
0563 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
0564 #endif
0565 if (lock->ctx->acquired > 0)
0566 lock->ctx->acquired--;
0567 lock->ctx = NULL;
0568 }
0569 }