0001
0002
0003 #include <linux/sched/task.h>
0004 #include <linux/sched/signal.h>
0005 #include <linux/freezer.h>
0006
0007 #include "futex.h"
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 void futex_wake_mark(struct wake_q_head *wake_q, struct futex_q *q)
0116 {
0117 struct task_struct *p = q->task;
0118
0119 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
0120 return;
0121
0122 get_task_struct(p);
0123 __futex_unqueue(q);
0124
0125
0126
0127
0128
0129
0130
0131 smp_store_release(&q->lock_ptr, NULL);
0132
0133
0134
0135
0136
0137 wake_q_add_safe(wake_q, p);
0138 }
0139
0140
0141
0142
0143 int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
0144 {
0145 struct futex_hash_bucket *hb;
0146 struct futex_q *this, *next;
0147 union futex_key key = FUTEX_KEY_INIT;
0148 int ret;
0149 DEFINE_WAKE_Q(wake_q);
0150
0151 if (!bitset)
0152 return -EINVAL;
0153
0154 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
0155 if (unlikely(ret != 0))
0156 return ret;
0157
0158 hb = futex_hash(&key);
0159
0160
0161 if (!futex_hb_waiters_pending(hb))
0162 return ret;
0163
0164 spin_lock(&hb->lock);
0165
0166 plist_for_each_entry_safe(this, next, &hb->chain, list) {
0167 if (futex_match (&this->key, &key)) {
0168 if (this->pi_state || this->rt_waiter) {
0169 ret = -EINVAL;
0170 break;
0171 }
0172
0173
0174 if (!(this->bitset & bitset))
0175 continue;
0176
0177 futex_wake_mark(&wake_q, this);
0178 if (++ret >= nr_wake)
0179 break;
0180 }
0181 }
0182
0183 spin_unlock(&hb->lock);
0184 wake_up_q(&wake_q);
0185 return ret;
0186 }
0187
0188 static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
0189 {
0190 unsigned int op = (encoded_op & 0x70000000) >> 28;
0191 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
0192 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
0193 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
0194 int oldval, ret;
0195
0196 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
0197 if (oparg < 0 || oparg > 31) {
0198 char comm[sizeof(current->comm)];
0199
0200
0201
0202
0203 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
0204 get_task_comm(comm, current), oparg);
0205 oparg &= 31;
0206 }
0207 oparg = 1 << oparg;
0208 }
0209
0210 pagefault_disable();
0211 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
0212 pagefault_enable();
0213 if (ret)
0214 return ret;
0215
0216 switch (cmp) {
0217 case FUTEX_OP_CMP_EQ:
0218 return oldval == cmparg;
0219 case FUTEX_OP_CMP_NE:
0220 return oldval != cmparg;
0221 case FUTEX_OP_CMP_LT:
0222 return oldval < cmparg;
0223 case FUTEX_OP_CMP_GE:
0224 return oldval >= cmparg;
0225 case FUTEX_OP_CMP_LE:
0226 return oldval <= cmparg;
0227 case FUTEX_OP_CMP_GT:
0228 return oldval > cmparg;
0229 default:
0230 return -ENOSYS;
0231 }
0232 }
0233
0234
0235
0236
0237
0238 int futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
0239 int nr_wake, int nr_wake2, int op)
0240 {
0241 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
0242 struct futex_hash_bucket *hb1, *hb2;
0243 struct futex_q *this, *next;
0244 int ret, op_ret;
0245 DEFINE_WAKE_Q(wake_q);
0246
0247 retry:
0248 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
0249 if (unlikely(ret != 0))
0250 return ret;
0251 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
0252 if (unlikely(ret != 0))
0253 return ret;
0254
0255 hb1 = futex_hash(&key1);
0256 hb2 = futex_hash(&key2);
0257
0258 retry_private:
0259 double_lock_hb(hb1, hb2);
0260 op_ret = futex_atomic_op_inuser(op, uaddr2);
0261 if (unlikely(op_ret < 0)) {
0262 double_unlock_hb(hb1, hb2);
0263
0264 if (!IS_ENABLED(CONFIG_MMU) ||
0265 unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) {
0266
0267
0268
0269
0270 ret = op_ret;
0271 return ret;
0272 }
0273
0274 if (op_ret == -EFAULT) {
0275 ret = fault_in_user_writeable(uaddr2);
0276 if (ret)
0277 return ret;
0278 }
0279
0280 cond_resched();
0281 if (!(flags & FLAGS_SHARED))
0282 goto retry_private;
0283 goto retry;
0284 }
0285
0286 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
0287 if (futex_match (&this->key, &key1)) {
0288 if (this->pi_state || this->rt_waiter) {
0289 ret = -EINVAL;
0290 goto out_unlock;
0291 }
0292 futex_wake_mark(&wake_q, this);
0293 if (++ret >= nr_wake)
0294 break;
0295 }
0296 }
0297
0298 if (op_ret > 0) {
0299 op_ret = 0;
0300 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
0301 if (futex_match (&this->key, &key2)) {
0302 if (this->pi_state || this->rt_waiter) {
0303 ret = -EINVAL;
0304 goto out_unlock;
0305 }
0306 futex_wake_mark(&wake_q, this);
0307 if (++op_ret >= nr_wake2)
0308 break;
0309 }
0310 }
0311 ret += op_ret;
0312 }
0313
0314 out_unlock:
0315 double_unlock_hb(hb1, hb2);
0316 wake_up_q(&wake_q);
0317 return ret;
0318 }
0319
0320 static long futex_wait_restart(struct restart_block *restart);
0321
0322
0323
0324
0325
0326
0327
0328 void futex_wait_queue(struct futex_hash_bucket *hb, struct futex_q *q,
0329 struct hrtimer_sleeper *timeout)
0330 {
0331
0332
0333
0334
0335
0336
0337 set_current_state(TASK_INTERRUPTIBLE);
0338 futex_queue(q, hb);
0339
0340
0341 if (timeout)
0342 hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
0343
0344
0345
0346
0347
0348 if (likely(!plist_node_empty(&q->list))) {
0349
0350
0351
0352
0353
0354 if (!timeout || timeout->task)
0355 freezable_schedule();
0356 }
0357 __set_current_state(TASK_RUNNING);
0358 }
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 static int unqueue_multiple(struct futex_vector *v, int count)
0372 {
0373 int ret = -1, i;
0374
0375 for (i = 0; i < count; i++) {
0376 if (!futex_unqueue(&v[i].q))
0377 ret = i;
0378 }
0379
0380 return ret;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399 static int futex_wait_multiple_setup(struct futex_vector *vs, int count, int *woken)
0400 {
0401 struct futex_hash_bucket *hb;
0402 bool retry = false;
0403 int ret, i;
0404 u32 uval;
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420 retry:
0421 for (i = 0; i < count; i++) {
0422 if ((vs[i].w.flags & FUTEX_PRIVATE_FLAG) && retry)
0423 continue;
0424
0425 ret = get_futex_key(u64_to_user_ptr(vs[i].w.uaddr),
0426 !(vs[i].w.flags & FUTEX_PRIVATE_FLAG),
0427 &vs[i].q.key, FUTEX_READ);
0428
0429 if (unlikely(ret))
0430 return ret;
0431 }
0432
0433 set_current_state(TASK_INTERRUPTIBLE);
0434
0435 for (i = 0; i < count; i++) {
0436 u32 __user *uaddr = (u32 __user *)(unsigned long)vs[i].w.uaddr;
0437 struct futex_q *q = &vs[i].q;
0438 u32 val = (u32)vs[i].w.val;
0439
0440 hb = futex_q_lock(q);
0441 ret = futex_get_value_locked(&uval, uaddr);
0442
0443 if (!ret && uval == val) {
0444
0445
0446
0447
0448
0449 futex_queue(q, hb);
0450 continue;
0451 }
0452
0453 futex_q_unlock(hb);
0454 __set_current_state(TASK_RUNNING);
0455
0456
0457
0458
0459
0460
0461 *woken = unqueue_multiple(vs, i);
0462 if (*woken >= 0)
0463 return 1;
0464
0465 if (ret) {
0466
0467
0468
0469
0470
0471
0472
0473 if (get_user(uval, uaddr))
0474 return -EFAULT;
0475
0476 retry = true;
0477 goto retry;
0478 }
0479
0480 if (uval != val)
0481 return -EWOULDBLOCK;
0482 }
0483
0484 return 0;
0485 }
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 static void futex_sleep_multiple(struct futex_vector *vs, unsigned int count,
0497 struct hrtimer_sleeper *to)
0498 {
0499 if (to && !to->task)
0500 return;
0501
0502 for (; count; count--, vs++) {
0503 if (!READ_ONCE(vs->q.lock_ptr))
0504 return;
0505 }
0506
0507 freezable_schedule();
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 int futex_wait_multiple(struct futex_vector *vs, unsigned int count,
0525 struct hrtimer_sleeper *to)
0526 {
0527 int ret, hint = 0;
0528
0529 if (to)
0530 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
0531
0532 while (1) {
0533 ret = futex_wait_multiple_setup(vs, count, &hint);
0534 if (ret) {
0535 if (ret > 0) {
0536
0537 ret = hint;
0538 }
0539 return ret;
0540 }
0541
0542 futex_sleep_multiple(vs, count, to);
0543
0544 __set_current_state(TASK_RUNNING);
0545
0546 ret = unqueue_multiple(vs, count);
0547 if (ret >= 0)
0548 return ret;
0549
0550 if (to && !to->task)
0551 return -ETIMEDOUT;
0552 else if (signal_pending(current))
0553 return -ERESTARTSYS;
0554
0555
0556
0557
0558 }
0559 }
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
0578 struct futex_q *q, struct futex_hash_bucket **hb)
0579 {
0580 u32 uval;
0581 int ret;
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 retry:
0602 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
0603 if (unlikely(ret != 0))
0604 return ret;
0605
0606 retry_private:
0607 *hb = futex_q_lock(q);
0608
0609 ret = futex_get_value_locked(&uval, uaddr);
0610
0611 if (ret) {
0612 futex_q_unlock(*hb);
0613
0614 ret = get_user(uval, uaddr);
0615 if (ret)
0616 return ret;
0617
0618 if (!(flags & FLAGS_SHARED))
0619 goto retry_private;
0620
0621 goto retry;
0622 }
0623
0624 if (uval != val) {
0625 futex_q_unlock(*hb);
0626 ret = -EWOULDBLOCK;
0627 }
0628
0629 return ret;
0630 }
0631
0632 int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset)
0633 {
0634 struct hrtimer_sleeper timeout, *to;
0635 struct restart_block *restart;
0636 struct futex_hash_bucket *hb;
0637 struct futex_q q = futex_q_init;
0638 int ret;
0639
0640 if (!bitset)
0641 return -EINVAL;
0642 q.bitset = bitset;
0643
0644 to = futex_setup_timer(abs_time, &timeout, flags,
0645 current->timer_slack_ns);
0646 retry:
0647
0648
0649
0650
0651 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
0652 if (ret)
0653 goto out;
0654
0655
0656 futex_wait_queue(hb, &q, to);
0657
0658
0659 ret = 0;
0660 if (!futex_unqueue(&q))
0661 goto out;
0662 ret = -ETIMEDOUT;
0663 if (to && !to->task)
0664 goto out;
0665
0666
0667
0668
0669
0670 if (!signal_pending(current))
0671 goto retry;
0672
0673 ret = -ERESTARTSYS;
0674 if (!abs_time)
0675 goto out;
0676
0677 restart = ¤t->restart_block;
0678 restart->futex.uaddr = uaddr;
0679 restart->futex.val = val;
0680 restart->futex.time = *abs_time;
0681 restart->futex.bitset = bitset;
0682 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
0683
0684 ret = set_restart_fn(restart, futex_wait_restart);
0685
0686 out:
0687 if (to) {
0688 hrtimer_cancel(&to->timer);
0689 destroy_hrtimer_on_stack(&to->timer);
0690 }
0691 return ret;
0692 }
0693
0694 static long futex_wait_restart(struct restart_block *restart)
0695 {
0696 u32 __user *uaddr = restart->futex.uaddr;
0697 ktime_t t, *tp = NULL;
0698
0699 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
0700 t = restart->futex.time;
0701 tp = &t;
0702 }
0703 restart->fn = do_no_restart_syscall;
0704
0705 return (long)futex_wait(uaddr, restart->futex.flags,
0706 restart->futex.val, tp, restart->futex.bitset);
0707 }
0708