0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/compat.h>
0035 #include <linux/jhash.h>
0036 #include <linux/pagemap.h>
0037 #include <linux/memblock.h>
0038 #include <linux/fault-inject.h>
0039 #include <linux/slab.h>
0040
0041 #include "futex.h"
0042 #include "../locking/rtmutex_common.h"
0043
0044
0045
0046
0047
0048
0049 static struct {
0050 struct futex_hash_bucket *queues;
0051 unsigned long hashsize;
0052 } __futex_data __read_mostly __aligned(2*sizeof(long));
0053 #define futex_queues (__futex_data.queues)
0054 #define futex_hashsize (__futex_data.hashsize)
0055
0056
0057
0058
0059
0060 #ifdef CONFIG_FAIL_FUTEX
0061
0062 static struct {
0063 struct fault_attr attr;
0064
0065 bool ignore_private;
0066 } fail_futex = {
0067 .attr = FAULT_ATTR_INITIALIZER,
0068 .ignore_private = false,
0069 };
0070
0071 static int __init setup_fail_futex(char *str)
0072 {
0073 return setup_fault_attr(&fail_futex.attr, str);
0074 }
0075 __setup("fail_futex=", setup_fail_futex);
0076
0077 bool should_fail_futex(bool fshared)
0078 {
0079 if (fail_futex.ignore_private && !fshared)
0080 return false;
0081
0082 return should_fail(&fail_futex.attr, 1);
0083 }
0084
0085 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
0086
0087 static int __init fail_futex_debugfs(void)
0088 {
0089 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
0090 struct dentry *dir;
0091
0092 dir = fault_create_debugfs_attr("fail_futex", NULL,
0093 &fail_futex.attr);
0094 if (IS_ERR(dir))
0095 return PTR_ERR(dir);
0096
0097 debugfs_create_bool("ignore-private", mode, dir,
0098 &fail_futex.ignore_private);
0099 return 0;
0100 }
0101
0102 late_initcall(fail_futex_debugfs);
0103
0104 #endif
0105
0106 #endif
0107
0108
0109
0110
0111
0112
0113
0114
0115 struct futex_hash_bucket *futex_hash(union futex_key *key)
0116 {
0117 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
0118 key->both.offset);
0119
0120 return &futex_queues[hash & (futex_hashsize - 1)];
0121 }
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 struct hrtimer_sleeper *
0135 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
0136 int flags, u64 range_ns)
0137 {
0138 if (!time)
0139 return NULL;
0140
0141 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
0142 CLOCK_REALTIME : CLOCK_MONOTONIC,
0143 HRTIMER_MODE_ABS);
0144
0145
0146
0147
0148 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
0149
0150 return timeout;
0151 }
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171 static u64 get_inode_sequence_number(struct inode *inode)
0172 {
0173 static atomic64_t i_seq;
0174 u64 old;
0175
0176
0177 old = atomic64_read(&inode->i_sequence);
0178 if (likely(old))
0179 return old;
0180
0181 for (;;) {
0182 u64 new = atomic64_add_return(1, &i_seq);
0183 if (WARN_ON_ONCE(!new))
0184 continue;
0185
0186 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
0187 if (old)
0188 return old;
0189 return new;
0190 }
0191 }
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220 int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
0221 enum futex_access rw)
0222 {
0223 unsigned long address = (unsigned long)uaddr;
0224 struct mm_struct *mm = current->mm;
0225 struct page *page, *tail;
0226 struct address_space *mapping;
0227 int err, ro = 0;
0228
0229
0230
0231
0232 key->both.offset = address % PAGE_SIZE;
0233 if (unlikely((address % sizeof(u32)) != 0))
0234 return -EINVAL;
0235 address -= key->both.offset;
0236
0237 if (unlikely(!access_ok(uaddr, sizeof(u32))))
0238 return -EFAULT;
0239
0240 if (unlikely(should_fail_futex(fshared)))
0241 return -EFAULT;
0242
0243
0244
0245
0246
0247
0248
0249
0250 if (!fshared) {
0251 key->private.mm = mm;
0252 key->private.address = address;
0253 return 0;
0254 }
0255
0256 again:
0257
0258 if (unlikely(should_fail_futex(true)))
0259 return -EFAULT;
0260
0261 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
0262
0263
0264
0265
0266 if (err == -EFAULT && rw == FUTEX_READ) {
0267 err = get_user_pages_fast(address, 1, 0, &page);
0268 ro = 1;
0269 }
0270 if (err < 0)
0271 return err;
0272 else
0273 err = 0;
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293 tail = page;
0294 page = compound_head(page);
0295 mapping = READ_ONCE(page->mapping);
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 if (unlikely(!mapping)) {
0313 int shmem_swizzled;
0314
0315
0316
0317
0318
0319
0320 lock_page(page);
0321 shmem_swizzled = PageSwapCache(page) || page->mapping;
0322 unlock_page(page);
0323 put_page(page);
0324
0325 if (shmem_swizzled)
0326 goto again;
0327
0328 return -EFAULT;
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341 if (PageAnon(page)) {
0342
0343
0344
0345
0346 if (unlikely(should_fail_futex(true)) || ro) {
0347 err = -EFAULT;
0348 goto out;
0349 }
0350
0351 key->both.offset |= FUT_OFF_MMSHARED;
0352 key->private.mm = mm;
0353 key->private.address = address;
0354
0355 } else {
0356 struct inode *inode;
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369 rcu_read_lock();
0370
0371 if (READ_ONCE(page->mapping) != mapping) {
0372 rcu_read_unlock();
0373 put_page(page);
0374
0375 goto again;
0376 }
0377
0378 inode = READ_ONCE(mapping->host);
0379 if (!inode) {
0380 rcu_read_unlock();
0381 put_page(page);
0382
0383 goto again;
0384 }
0385
0386 key->both.offset |= FUT_OFF_INODE;
0387 key->shared.i_seq = get_inode_sequence_number(inode);
0388 key->shared.pgoff = page_to_pgoff(tail);
0389 rcu_read_unlock();
0390 }
0391
0392 out:
0393 put_page(page);
0394 return err;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409 int fault_in_user_writeable(u32 __user *uaddr)
0410 {
0411 struct mm_struct *mm = current->mm;
0412 int ret;
0413
0414 mmap_read_lock(mm);
0415 ret = fixup_user_fault(mm, (unsigned long)uaddr,
0416 FAULT_FLAG_WRITE, NULL);
0417 mmap_read_unlock(mm);
0418
0419 return ret < 0 ? ret : 0;
0420 }
0421
0422
0423
0424
0425
0426
0427
0428
0429 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
0430 {
0431 struct futex_q *this;
0432
0433 plist_for_each_entry(this, &hb->chain, list) {
0434 if (futex_match(&this->key, key))
0435 return this;
0436 }
0437 return NULL;
0438 }
0439
0440 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
0441 {
0442 int ret;
0443
0444 pagefault_disable();
0445 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
0446 pagefault_enable();
0447
0448 return ret;
0449 }
0450
0451 int futex_get_value_locked(u32 *dest, u32 __user *from)
0452 {
0453 int ret;
0454
0455 pagefault_disable();
0456 ret = __get_user(*dest, from);
0457 pagefault_enable();
0458
0459 return ret ? -EFAULT : 0;
0460 }
0461
0462
0463
0464
0465
0466
0467
0468
0469 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
0470 {
0471 if (ret != -EBUSY) {
0472 WARN_ON_ONCE(exiting);
0473 return;
0474 }
0475
0476 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
0477 return;
0478
0479 mutex_lock(&exiting->futex_exit_mutex);
0480
0481
0482
0483
0484
0485
0486
0487
0488 mutex_unlock(&exiting->futex_exit_mutex);
0489
0490 put_task_struct(exiting);
0491 }
0492
0493
0494
0495
0496
0497
0498
0499 void __futex_unqueue(struct futex_q *q)
0500 {
0501 struct futex_hash_bucket *hb;
0502
0503 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
0504 return;
0505 lockdep_assert_held(q->lock_ptr);
0506
0507 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
0508 plist_del(&q->list, &hb->chain);
0509 futex_hb_waiters_dec(hb);
0510 }
0511
0512
0513 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
0514 __acquires(&hb->lock)
0515 {
0516 struct futex_hash_bucket *hb;
0517
0518 hb = futex_hash(&q->key);
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528 futex_hb_waiters_inc(hb);
0529
0530 q->lock_ptr = &hb->lock;
0531
0532 spin_lock(&hb->lock);
0533 return hb;
0534 }
0535
0536 void futex_q_unlock(struct futex_hash_bucket *hb)
0537 __releases(&hb->lock)
0538 {
0539 spin_unlock(&hb->lock);
0540 futex_hb_waiters_dec(hb);
0541 }
0542
0543 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
0544 {
0545 int prio;
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555 prio = min(current->normal_prio, MAX_RT_PRIO);
0556
0557 plist_node_init(&q->list, prio);
0558 plist_add(&q->list, &hb->chain);
0559 q->task = current;
0560 }
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573 int futex_unqueue(struct futex_q *q)
0574 {
0575 spinlock_t *lock_ptr;
0576 int ret = 0;
0577
0578
0579 retry:
0580
0581
0582
0583
0584
0585 lock_ptr = READ_ONCE(q->lock_ptr);
0586 if (lock_ptr != NULL) {
0587 spin_lock(lock_ptr);
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 if (unlikely(lock_ptr != q->lock_ptr)) {
0602 spin_unlock(lock_ptr);
0603 goto retry;
0604 }
0605 __futex_unqueue(q);
0606
0607 BUG_ON(q->pi_state);
0608
0609 spin_unlock(lock_ptr);
0610 ret = 1;
0611 }
0612
0613 return ret;
0614 }
0615
0616
0617
0618
0619
0620 void futex_unqueue_pi(struct futex_q *q)
0621 {
0622 __futex_unqueue(q);
0623
0624 BUG_ON(!q->pi_state);
0625 put_pi_state(q->pi_state);
0626 q->pi_state = NULL;
0627 }
0628
0629
0630 #define HANDLE_DEATH_PENDING true
0631 #define HANDLE_DEATH_LIST false
0632
0633
0634
0635
0636
0637 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
0638 bool pi, bool pending_op)
0639 {
0640 u32 uval, nval, mval;
0641 int err;
0642
0643
0644 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
0645 return -1;
0646
0647 retry:
0648 if (get_user(uval, uaddr))
0649 return -1;
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 if (pending_op && !pi && !uval) {
0683 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
0684 return 0;
0685 }
0686
0687 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
0688 return 0;
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
0712 switch (err) {
0713 case -EFAULT:
0714 if (fault_in_user_writeable(uaddr))
0715 return -1;
0716 goto retry;
0717
0718 case -EAGAIN:
0719 cond_resched();
0720 goto retry;
0721
0722 default:
0723 WARN_ON_ONCE(1);
0724 return err;
0725 }
0726 }
0727
0728 if (nval != uval)
0729 goto retry;
0730
0731
0732
0733
0734
0735 if (!pi && (uval & FUTEX_WAITERS))
0736 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
0737
0738 return 0;
0739 }
0740
0741
0742
0743
0744 static inline int fetch_robust_entry(struct robust_list __user **entry,
0745 struct robust_list __user * __user *head,
0746 unsigned int *pi)
0747 {
0748 unsigned long uentry;
0749
0750 if (get_user(uentry, (unsigned long __user *)head))
0751 return -EFAULT;
0752
0753 *entry = (void __user *)(uentry & ~1UL);
0754 *pi = uentry & 1;
0755
0756 return 0;
0757 }
0758
0759
0760
0761
0762
0763
0764
0765 static void exit_robust_list(struct task_struct *curr)
0766 {
0767 struct robust_list_head __user *head = curr->robust_list;
0768 struct robust_list __user *entry, *next_entry, *pending;
0769 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
0770 unsigned int next_pi;
0771 unsigned long futex_offset;
0772 int rc;
0773
0774
0775
0776
0777
0778 if (fetch_robust_entry(&entry, &head->list.next, &pi))
0779 return;
0780
0781
0782
0783 if (get_user(futex_offset, &head->futex_offset))
0784 return;
0785
0786
0787
0788
0789 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
0790 return;
0791
0792 next_entry = NULL;
0793 while (entry != &head->list) {
0794
0795
0796
0797
0798 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
0799
0800
0801
0802
0803 if (entry != pending) {
0804 if (handle_futex_death((void __user *)entry + futex_offset,
0805 curr, pi, HANDLE_DEATH_LIST))
0806 return;
0807 }
0808 if (rc)
0809 return;
0810 entry = next_entry;
0811 pi = next_pi;
0812
0813
0814
0815 if (!--limit)
0816 break;
0817
0818 cond_resched();
0819 }
0820
0821 if (pending) {
0822 handle_futex_death((void __user *)pending + futex_offset,
0823 curr, pip, HANDLE_DEATH_PENDING);
0824 }
0825 }
0826
0827 #ifdef CONFIG_COMPAT
0828 static void __user *futex_uaddr(struct robust_list __user *entry,
0829 compat_long_t futex_offset)
0830 {
0831 compat_uptr_t base = ptr_to_compat(entry);
0832 void __user *uaddr = compat_ptr(base + futex_offset);
0833
0834 return uaddr;
0835 }
0836
0837
0838
0839
0840 static inline int
0841 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
0842 compat_uptr_t __user *head, unsigned int *pi)
0843 {
0844 if (get_user(*uentry, head))
0845 return -EFAULT;
0846
0847 *entry = compat_ptr((*uentry) & ~1);
0848 *pi = (unsigned int)(*uentry) & 1;
0849
0850 return 0;
0851 }
0852
0853
0854
0855
0856
0857
0858
0859 static void compat_exit_robust_list(struct task_struct *curr)
0860 {
0861 struct compat_robust_list_head __user *head = curr->compat_robust_list;
0862 struct robust_list __user *entry, *next_entry, *pending;
0863 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
0864 unsigned int next_pi;
0865 compat_uptr_t uentry, next_uentry, upending;
0866 compat_long_t futex_offset;
0867 int rc;
0868
0869
0870
0871
0872
0873 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
0874 return;
0875
0876
0877
0878 if (get_user(futex_offset, &head->futex_offset))
0879 return;
0880
0881
0882
0883
0884 if (compat_fetch_robust_entry(&upending, &pending,
0885 &head->list_op_pending, &pip))
0886 return;
0887
0888 next_entry = NULL;
0889 while (entry != (struct robust_list __user *) &head->list) {
0890
0891
0892
0893
0894 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
0895 (compat_uptr_t __user *)&entry->next, &next_pi);
0896
0897
0898
0899
0900 if (entry != pending) {
0901 void __user *uaddr = futex_uaddr(entry, futex_offset);
0902
0903 if (handle_futex_death(uaddr, curr, pi,
0904 HANDLE_DEATH_LIST))
0905 return;
0906 }
0907 if (rc)
0908 return;
0909 uentry = next_uentry;
0910 entry = next_entry;
0911 pi = next_pi;
0912
0913
0914
0915 if (!--limit)
0916 break;
0917
0918 cond_resched();
0919 }
0920 if (pending) {
0921 void __user *uaddr = futex_uaddr(pending, futex_offset);
0922
0923 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
0924 }
0925 }
0926 #endif
0927
0928 #ifdef CONFIG_FUTEX_PI
0929
0930
0931
0932
0933
0934
0935 static void exit_pi_state_list(struct task_struct *curr)
0936 {
0937 struct list_head *next, *head = &curr->pi_state_list;
0938 struct futex_pi_state *pi_state;
0939 struct futex_hash_bucket *hb;
0940 union futex_key key = FUTEX_KEY_INIT;
0941
0942
0943
0944
0945
0946
0947 raw_spin_lock_irq(&curr->pi_lock);
0948 while (!list_empty(head)) {
0949 next = head->next;
0950 pi_state = list_entry(next, struct futex_pi_state, list);
0951 key = pi_state->key;
0952 hb = futex_hash(&key);
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964 if (!refcount_inc_not_zero(&pi_state->refcount)) {
0965 raw_spin_unlock_irq(&curr->pi_lock);
0966 cpu_relax();
0967 raw_spin_lock_irq(&curr->pi_lock);
0968 continue;
0969 }
0970 raw_spin_unlock_irq(&curr->pi_lock);
0971
0972 spin_lock(&hb->lock);
0973 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
0974 raw_spin_lock(&curr->pi_lock);
0975
0976
0977
0978
0979 if (head->next != next) {
0980
0981 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
0982 spin_unlock(&hb->lock);
0983 put_pi_state(pi_state);
0984 continue;
0985 }
0986
0987 WARN_ON(pi_state->owner != curr);
0988 WARN_ON(list_empty(&pi_state->list));
0989 list_del_init(&pi_state->list);
0990 pi_state->owner = NULL;
0991
0992 raw_spin_unlock(&curr->pi_lock);
0993 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
0994 spin_unlock(&hb->lock);
0995
0996 rt_mutex_futex_unlock(&pi_state->pi_mutex);
0997 put_pi_state(pi_state);
0998
0999 raw_spin_lock_irq(&curr->pi_lock);
1000 }
1001 raw_spin_unlock_irq(&curr->pi_lock);
1002 }
1003 #else
1004 static inline void exit_pi_state_list(struct task_struct *curr) { }
1005 #endif
1006
1007 static void futex_cleanup(struct task_struct *tsk)
1008 {
1009 if (unlikely(tsk->robust_list)) {
1010 exit_robust_list(tsk);
1011 tsk->robust_list = NULL;
1012 }
1013
1014 #ifdef CONFIG_COMPAT
1015 if (unlikely(tsk->compat_robust_list)) {
1016 compat_exit_robust_list(tsk);
1017 tsk->compat_robust_list = NULL;
1018 }
1019 #endif
1020
1021 if (unlikely(!list_empty(&tsk->pi_state_list)))
1022 exit_pi_state_list(tsk);
1023 }
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 void futex_exit_recursive(struct task_struct *tsk)
1043 {
1044
1045 if (tsk->futex_state == FUTEX_STATE_EXITING)
1046 mutex_unlock(&tsk->futex_exit_mutex);
1047 tsk->futex_state = FUTEX_STATE_DEAD;
1048 }
1049
1050 static void futex_cleanup_begin(struct task_struct *tsk)
1051 {
1052
1053
1054
1055
1056
1057
1058 mutex_lock(&tsk->futex_exit_mutex);
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071 raw_spin_lock_irq(&tsk->pi_lock);
1072 tsk->futex_state = FUTEX_STATE_EXITING;
1073 raw_spin_unlock_irq(&tsk->pi_lock);
1074 }
1075
1076 static void futex_cleanup_end(struct task_struct *tsk, int state)
1077 {
1078
1079
1080
1081
1082 tsk->futex_state = state;
1083
1084
1085
1086
1087 mutex_unlock(&tsk->futex_exit_mutex);
1088 }
1089
1090 void futex_exec_release(struct task_struct *tsk)
1091 {
1092
1093
1094
1095
1096
1097
1098
1099 futex_cleanup_begin(tsk);
1100 futex_cleanup(tsk);
1101
1102
1103
1104
1105 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1106 }
1107
1108 void futex_exit_release(struct task_struct *tsk)
1109 {
1110 futex_cleanup_begin(tsk);
1111 futex_cleanup(tsk);
1112 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1113 }
1114
1115 static int __init futex_init(void)
1116 {
1117 unsigned int futex_shift;
1118 unsigned long i;
1119
1120 #if CONFIG_BASE_SMALL
1121 futex_hashsize = 16;
1122 #else
1123 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1124 #endif
1125
1126 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1127 futex_hashsize, 0,
1128 futex_hashsize < 256 ? HASH_SMALL : 0,
1129 &futex_shift, NULL,
1130 futex_hashsize, futex_hashsize);
1131 futex_hashsize = 1UL << futex_shift;
1132
1133 for (i = 0; i < futex_hashsize; i++) {
1134 atomic_set(&futex_queues[i].waiters, 0);
1135 plist_head_init(&futex_queues[i].chain);
1136 spin_lock_init(&futex_queues[i].lock);
1137 }
1138
1139 return 0;
1140 }
1141 core_initcall(futex_init);