0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #ifndef _LINUX_RHASHTABLE_H
0019 #define _LINUX_RHASHTABLE_H
0020
0021 #include <linux/err.h>
0022 #include <linux/errno.h>
0023 #include <linux/jhash.h>
0024 #include <linux/list_nulls.h>
0025 #include <linux/workqueue.h>
0026 #include <linux/rculist.h>
0027 #include <linux/bit_spinlock.h>
0028
0029 #include <linux/rhashtable-types.h>
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047 struct rhash_lock_head {};
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 #define RHT_ELASTICITY 16u
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 struct bucket_table {
0077 unsigned int size;
0078 unsigned int nest;
0079 u32 hash_rnd;
0080 struct list_head walkers;
0081 struct rcu_head rcu;
0082
0083 struct bucket_table __rcu *future_tbl;
0084
0085 struct lockdep_map dep_map;
0086
0087 struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
0088 };
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 #define RHT_NULLS_MARKER(ptr) \
0104 ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
0105 #define INIT_RHT_NULLS_HEAD(ptr) \
0106 ((ptr) = NULL)
0107
0108 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
0109 {
0110 return ((unsigned long) ptr & 1);
0111 }
0112
0113 static inline void *rht_obj(const struct rhashtable *ht,
0114 const struct rhash_head *he)
0115 {
0116 return (char *)he - ht->p.head_offset;
0117 }
0118
0119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
0120 unsigned int hash)
0121 {
0122 return hash & (tbl->size - 1);
0123 }
0124
0125 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
0126 const void *key, const struct rhashtable_params params,
0127 unsigned int hash_rnd)
0128 {
0129 unsigned int hash;
0130
0131
0132 if (!__builtin_constant_p(params.key_len))
0133 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
0134 else if (params.key_len) {
0135 unsigned int key_len = params.key_len;
0136
0137 if (params.hashfn)
0138 hash = params.hashfn(key, key_len, hash_rnd);
0139 else if (key_len & (sizeof(u32) - 1))
0140 hash = jhash(key, key_len, hash_rnd);
0141 else
0142 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
0143 } else {
0144 unsigned int key_len = ht->p.key_len;
0145
0146 if (params.hashfn)
0147 hash = params.hashfn(key, key_len, hash_rnd);
0148 else
0149 hash = jhash(key, key_len, hash_rnd);
0150 }
0151
0152 return hash;
0153 }
0154
0155 static inline unsigned int rht_key_hashfn(
0156 struct rhashtable *ht, const struct bucket_table *tbl,
0157 const void *key, const struct rhashtable_params params)
0158 {
0159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
0160
0161 return rht_bucket_index(tbl, hash);
0162 }
0163
0164 static inline unsigned int rht_head_hashfn(
0165 struct rhashtable *ht, const struct bucket_table *tbl,
0166 const struct rhash_head *he, const struct rhashtable_params params)
0167 {
0168 const char *ptr = rht_obj(ht, he);
0169
0170 return likely(params.obj_hashfn) ?
0171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
0172 ht->p.key_len,
0173 tbl->hash_rnd)) :
0174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
0175 }
0176
0177
0178
0179
0180
0181
0182 static inline bool rht_grow_above_75(const struct rhashtable *ht,
0183 const struct bucket_table *tbl)
0184 {
0185
0186 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
0187 (!ht->p.max_size || tbl->size < ht->p.max_size);
0188 }
0189
0190
0191
0192
0193
0194
0195 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
0196 const struct bucket_table *tbl)
0197 {
0198
0199 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
0200 tbl->size > ht->p.min_size;
0201 }
0202
0203
0204
0205
0206
0207
0208 static inline bool rht_grow_above_100(const struct rhashtable *ht,
0209 const struct bucket_table *tbl)
0210 {
0211 return atomic_read(&ht->nelems) > tbl->size &&
0212 (!ht->p.max_size || tbl->size < ht->p.max_size);
0213 }
0214
0215
0216
0217
0218
0219
0220 static inline bool rht_grow_above_max(const struct rhashtable *ht,
0221 const struct bucket_table *tbl)
0222 {
0223 return atomic_read(&ht->nelems) >= ht->max_elems;
0224 }
0225
0226 #ifdef CONFIG_PROVE_LOCKING
0227 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
0228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
0229 #else
0230 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
0231 {
0232 return 1;
0233 }
0234
0235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
0236 u32 hash)
0237 {
0238 return 1;
0239 }
0240 #endif
0241
0242 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
0243 struct rhash_head *obj);
0244
0245 void rhashtable_walk_enter(struct rhashtable *ht,
0246 struct rhashtable_iter *iter);
0247 void rhashtable_walk_exit(struct rhashtable_iter *iter);
0248 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
0249
0250 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
0251 {
0252 (void)rhashtable_walk_start_check(iter);
0253 }
0254
0255 void *rhashtable_walk_next(struct rhashtable_iter *iter);
0256 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
0257 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
0258
0259 void rhashtable_free_and_destroy(struct rhashtable *ht,
0260 void (*free_fn)(void *ptr, void *arg),
0261 void *arg);
0262 void rhashtable_destroy(struct rhashtable *ht);
0263
0264 struct rhash_lock_head __rcu **rht_bucket_nested(
0265 const struct bucket_table *tbl, unsigned int hash);
0266 struct rhash_lock_head __rcu **__rht_bucket_nested(
0267 const struct bucket_table *tbl, unsigned int hash);
0268 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
0269 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
0270
0271 #define rht_dereference(p, ht) \
0272 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
0273
0274 #define rht_dereference_rcu(p, ht) \
0275 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
0276
0277 #define rht_dereference_bucket(p, tbl, hash) \
0278 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
0279
0280 #define rht_dereference_bucket_rcu(p, tbl, hash) \
0281 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
0282
0283 #define rht_entry(tpos, pos, member) \
0284 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
0285
0286 static inline struct rhash_lock_head __rcu *const *rht_bucket(
0287 const struct bucket_table *tbl, unsigned int hash)
0288 {
0289 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
0290 &tbl->buckets[hash];
0291 }
0292
0293 static inline struct rhash_lock_head __rcu **rht_bucket_var(
0294 struct bucket_table *tbl, unsigned int hash)
0295 {
0296 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
0297 &tbl->buckets[hash];
0298 }
0299
0300 static inline struct rhash_lock_head __rcu **rht_bucket_insert(
0301 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
0302 {
0303 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
0304 &tbl->buckets[hash];
0305 }
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326 static inline void rht_lock(struct bucket_table *tbl,
0327 struct rhash_lock_head __rcu **bkt)
0328 {
0329 local_bh_disable();
0330 bit_spin_lock(0, (unsigned long *)bkt);
0331 lock_map_acquire(&tbl->dep_map);
0332 }
0333
0334 static inline void rht_lock_nested(struct bucket_table *tbl,
0335 struct rhash_lock_head __rcu **bucket,
0336 unsigned int subclass)
0337 {
0338 local_bh_disable();
0339 bit_spin_lock(0, (unsigned long *)bucket);
0340 lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
0341 }
0342
0343 static inline void rht_unlock(struct bucket_table *tbl,
0344 struct rhash_lock_head __rcu **bkt)
0345 {
0346 lock_map_release(&tbl->dep_map);
0347 bit_spin_unlock(0, (unsigned long *)bkt);
0348 local_bh_enable();
0349 }
0350
0351 static inline struct rhash_head *__rht_ptr(
0352 struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
0353 {
0354 return (struct rhash_head *)
0355 ((unsigned long)p & ~BIT(0) ?:
0356 (unsigned long)RHT_NULLS_MARKER(bkt));
0357 }
0358
0359
0360
0361
0362
0363
0364
0365
0366 static inline struct rhash_head *rht_ptr_rcu(
0367 struct rhash_lock_head __rcu *const *bkt)
0368 {
0369 return __rht_ptr(rcu_dereference(*bkt), bkt);
0370 }
0371
0372 static inline struct rhash_head *rht_ptr(
0373 struct rhash_lock_head __rcu *const *bkt,
0374 struct bucket_table *tbl,
0375 unsigned int hash)
0376 {
0377 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
0378 }
0379
0380 static inline struct rhash_head *rht_ptr_exclusive(
0381 struct rhash_lock_head __rcu *const *bkt)
0382 {
0383 return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
0384 }
0385
0386 static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
0387 struct rhash_head *obj)
0388 {
0389 if (rht_is_a_nulls(obj))
0390 obj = NULL;
0391 rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
0392 }
0393
0394 static inline void rht_assign_unlock(struct bucket_table *tbl,
0395 struct rhash_lock_head __rcu **bkt,
0396 struct rhash_head *obj)
0397 {
0398 if (rht_is_a_nulls(obj))
0399 obj = NULL;
0400 lock_map_release(&tbl->dep_map);
0401 rcu_assign_pointer(*bkt, (void *)obj);
0402 preempt_enable();
0403 __release(bitlock);
0404 local_bh_enable();
0405 }
0406
0407
0408
0409
0410
0411
0412
0413
0414 #define rht_for_each_from(pos, head, tbl, hash) \
0415 for (pos = head; \
0416 !rht_is_a_nulls(pos); \
0417 pos = rht_dereference_bucket((pos)->next, tbl, hash))
0418
0419
0420
0421
0422
0423
0424
0425 #define rht_for_each(pos, tbl, hash) \
0426 rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
0427 tbl, hash)
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
0439 for (pos = head; \
0440 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
0441 pos = rht_dereference_bucket((pos)->next, tbl, hash))
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
0452 rht_for_each_entry_from(tpos, pos, \
0453 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
0454 tbl, hash, member)
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
0469 for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
0470 next = !rht_is_a_nulls(pos) ? \
0471 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
0472 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
0473 pos = next, \
0474 next = !rht_is_a_nulls(pos) ? \
0475 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 #define rht_for_each_rcu_from(pos, head, tbl, hash) \
0489 for (({barrier(); }), \
0490 pos = head; \
0491 !rht_is_a_nulls(pos); \
0492 pos = rcu_dereference_raw(pos->next))
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504 #define rht_for_each_rcu(pos, tbl, hash) \
0505 for (({barrier(); }), \
0506 pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
0507 !rht_is_a_nulls(pos); \
0508 pos = rcu_dereference_raw(pos->next))
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
0524 for (({barrier(); }), \
0525 pos = head; \
0526 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
0527 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
0542 rht_for_each_entry_rcu_from(tpos, pos, \
0543 rht_ptr_rcu(rht_bucket(tbl, hash)), \
0544 tbl, hash, member)
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 #define rhl_for_each_rcu(pos, list) \
0555 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567 #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
0568 for (pos = list; pos && rht_entry(tpos, pos, member); \
0569 pos = rcu_dereference_raw(pos->next))
0570
0571 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
0572 const void *obj)
0573 {
0574 struct rhashtable *ht = arg->ht;
0575 const char *ptr = obj;
0576
0577 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
0578 }
0579
0580
0581 static inline struct rhash_head *__rhashtable_lookup(
0582 struct rhashtable *ht, const void *key,
0583 const struct rhashtable_params params)
0584 {
0585 struct rhashtable_compare_arg arg = {
0586 .ht = ht,
0587 .key = key,
0588 };
0589 struct rhash_lock_head __rcu *const *bkt;
0590 struct bucket_table *tbl;
0591 struct rhash_head *he;
0592 unsigned int hash;
0593
0594 tbl = rht_dereference_rcu(ht->tbl, ht);
0595 restart:
0596 hash = rht_key_hashfn(ht, tbl, key, params);
0597 bkt = rht_bucket(tbl, hash);
0598 do {
0599 rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
0600 if (params.obj_cmpfn ?
0601 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
0602 rhashtable_compare(&arg, rht_obj(ht, he)))
0603 continue;
0604 return he;
0605 }
0606
0607
0608
0609 } while (he != RHT_NULLS_MARKER(bkt));
0610
0611
0612 smp_rmb();
0613
0614 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
0615 if (unlikely(tbl))
0616 goto restart;
0617
0618 return NULL;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634 static inline void *rhashtable_lookup(
0635 struct rhashtable *ht, const void *key,
0636 const struct rhashtable_params params)
0637 {
0638 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
0639
0640 return he ? rht_obj(ht, he) : NULL;
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 static inline void *rhashtable_lookup_fast(
0658 struct rhashtable *ht, const void *key,
0659 const struct rhashtable_params params)
0660 {
0661 void *obj;
0662
0663 rcu_read_lock();
0664 obj = rhashtable_lookup(ht, key, params);
0665 rcu_read_unlock();
0666
0667 return obj;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684 static inline struct rhlist_head *rhltable_lookup(
0685 struct rhltable *hlt, const void *key,
0686 const struct rhashtable_params params)
0687 {
0688 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
0689
0690 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
0691 }
0692
0693
0694
0695
0696
0697 static inline void *__rhashtable_insert_fast(
0698 struct rhashtable *ht, const void *key, struct rhash_head *obj,
0699 const struct rhashtable_params params, bool rhlist)
0700 {
0701 struct rhashtable_compare_arg arg = {
0702 .ht = ht,
0703 .key = key,
0704 };
0705 struct rhash_lock_head __rcu **bkt;
0706 struct rhash_head __rcu **pprev;
0707 struct bucket_table *tbl;
0708 struct rhash_head *head;
0709 unsigned int hash;
0710 int elasticity;
0711 void *data;
0712
0713 rcu_read_lock();
0714
0715 tbl = rht_dereference_rcu(ht->tbl, ht);
0716 hash = rht_head_hashfn(ht, tbl, obj, params);
0717 elasticity = RHT_ELASTICITY;
0718 bkt = rht_bucket_insert(ht, tbl, hash);
0719 data = ERR_PTR(-ENOMEM);
0720 if (!bkt)
0721 goto out;
0722 pprev = NULL;
0723 rht_lock(tbl, bkt);
0724
0725 if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
0726 slow_path:
0727 rht_unlock(tbl, bkt);
0728 rcu_read_unlock();
0729 return rhashtable_insert_slow(ht, key, obj);
0730 }
0731
0732 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
0733 struct rhlist_head *plist;
0734 struct rhlist_head *list;
0735
0736 elasticity--;
0737 if (!key ||
0738 (params.obj_cmpfn ?
0739 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
0740 rhashtable_compare(&arg, rht_obj(ht, head)))) {
0741 pprev = &head->next;
0742 continue;
0743 }
0744
0745 data = rht_obj(ht, head);
0746
0747 if (!rhlist)
0748 goto out_unlock;
0749
0750
0751 list = container_of(obj, struct rhlist_head, rhead);
0752 plist = container_of(head, struct rhlist_head, rhead);
0753
0754 RCU_INIT_POINTER(list->next, plist);
0755 head = rht_dereference_bucket(head->next, tbl, hash);
0756 RCU_INIT_POINTER(list->rhead.next, head);
0757 if (pprev) {
0758 rcu_assign_pointer(*pprev, obj);
0759 rht_unlock(tbl, bkt);
0760 } else
0761 rht_assign_unlock(tbl, bkt, obj);
0762 data = NULL;
0763 goto out;
0764 }
0765
0766 if (elasticity <= 0)
0767 goto slow_path;
0768
0769 data = ERR_PTR(-E2BIG);
0770 if (unlikely(rht_grow_above_max(ht, tbl)))
0771 goto out_unlock;
0772
0773 if (unlikely(rht_grow_above_100(ht, tbl)))
0774 goto slow_path;
0775
0776
0777 head = rht_ptr(bkt, tbl, hash);
0778
0779 RCU_INIT_POINTER(obj->next, head);
0780 if (rhlist) {
0781 struct rhlist_head *list;
0782
0783 list = container_of(obj, struct rhlist_head, rhead);
0784 RCU_INIT_POINTER(list->next, NULL);
0785 }
0786
0787 atomic_inc(&ht->nelems);
0788 rht_assign_unlock(tbl, bkt, obj);
0789
0790 if (rht_grow_above_75(ht, tbl))
0791 schedule_work(&ht->run_work);
0792
0793 data = NULL;
0794 out:
0795 rcu_read_unlock();
0796
0797 return data;
0798
0799 out_unlock:
0800 rht_unlock(tbl, bkt);
0801 goto out;
0802 }
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819 static inline int rhashtable_insert_fast(
0820 struct rhashtable *ht, struct rhash_head *obj,
0821 const struct rhashtable_params params)
0822 {
0823 void *ret;
0824
0825 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
0826 if (IS_ERR(ret))
0827 return PTR_ERR(ret);
0828
0829 return ret == NULL ? 0 : -EEXIST;
0830 }
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 static inline int rhltable_insert_key(
0849 struct rhltable *hlt, const void *key, struct rhlist_head *list,
0850 const struct rhashtable_params params)
0851 {
0852 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
0853 params, true));
0854 }
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871 static inline int rhltable_insert(
0872 struct rhltable *hlt, struct rhlist_head *list,
0873 const struct rhashtable_params params)
0874 {
0875 const char *key = rht_obj(&hlt->ht, &list->rhead);
0876
0877 key += params.key_offset;
0878
0879 return rhltable_insert_key(hlt, key, list, params);
0880 }
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896 static inline int rhashtable_lookup_insert_fast(
0897 struct rhashtable *ht, struct rhash_head *obj,
0898 const struct rhashtable_params params)
0899 {
0900 const char *key = rht_obj(ht, obj);
0901 void *ret;
0902
0903 BUG_ON(ht->p.obj_hashfn);
0904
0905 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
0906 false);
0907 if (IS_ERR(ret))
0908 return PTR_ERR(ret);
0909
0910 return ret == NULL ? 0 : -EEXIST;
0911 }
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923 static inline void *rhashtable_lookup_get_insert_fast(
0924 struct rhashtable *ht, struct rhash_head *obj,
0925 const struct rhashtable_params params)
0926 {
0927 const char *key = rht_obj(ht, obj);
0928
0929 BUG_ON(ht->p.obj_hashfn);
0930
0931 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
0932 false);
0933 }
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950 static inline int rhashtable_lookup_insert_key(
0951 struct rhashtable *ht, const void *key, struct rhash_head *obj,
0952 const struct rhashtable_params params)
0953 {
0954 void *ret;
0955
0956 BUG_ON(!ht->p.obj_hashfn || !key);
0957
0958 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
0959 if (IS_ERR(ret))
0960 return PTR_ERR(ret);
0961
0962 return ret == NULL ? 0 : -EEXIST;
0963 }
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976 static inline void *rhashtable_lookup_get_insert_key(
0977 struct rhashtable *ht, const void *key, struct rhash_head *obj,
0978 const struct rhashtable_params params)
0979 {
0980 BUG_ON(!ht->p.obj_hashfn || !key);
0981
0982 return __rhashtable_insert_fast(ht, key, obj, params, false);
0983 }
0984
0985
0986 static inline int __rhashtable_remove_fast_one(
0987 struct rhashtable *ht, struct bucket_table *tbl,
0988 struct rhash_head *obj, const struct rhashtable_params params,
0989 bool rhlist)
0990 {
0991 struct rhash_lock_head __rcu **bkt;
0992 struct rhash_head __rcu **pprev;
0993 struct rhash_head *he;
0994 unsigned int hash;
0995 int err = -ENOENT;
0996
0997 hash = rht_head_hashfn(ht, tbl, obj, params);
0998 bkt = rht_bucket_var(tbl, hash);
0999 if (!bkt)
1000 return -ENOENT;
1001 pprev = NULL;
1002 rht_lock(tbl, bkt);
1003
1004 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1005 struct rhlist_head *list;
1006
1007 list = container_of(he, struct rhlist_head, rhead);
1008
1009 if (he != obj) {
1010 struct rhlist_head __rcu **lpprev;
1011
1012 pprev = &he->next;
1013
1014 if (!rhlist)
1015 continue;
1016
1017 do {
1018 lpprev = &list->next;
1019 list = rht_dereference_bucket(list->next,
1020 tbl, hash);
1021 } while (list && obj != &list->rhead);
1022
1023 if (!list)
1024 continue;
1025
1026 list = rht_dereference_bucket(list->next, tbl, hash);
1027 RCU_INIT_POINTER(*lpprev, list);
1028 err = 0;
1029 break;
1030 }
1031
1032 obj = rht_dereference_bucket(obj->next, tbl, hash);
1033 err = 1;
1034
1035 if (rhlist) {
1036 list = rht_dereference_bucket(list->next, tbl, hash);
1037 if (list) {
1038 RCU_INIT_POINTER(list->rhead.next, obj);
1039 obj = &list->rhead;
1040 err = 0;
1041 }
1042 }
1043
1044 if (pprev) {
1045 rcu_assign_pointer(*pprev, obj);
1046 rht_unlock(tbl, bkt);
1047 } else {
1048 rht_assign_unlock(tbl, bkt, obj);
1049 }
1050 goto unlocked;
1051 }
1052
1053 rht_unlock(tbl, bkt);
1054 unlocked:
1055 if (err > 0) {
1056 atomic_dec(&ht->nelems);
1057 if (unlikely(ht->p.automatic_shrinking &&
1058 rht_shrink_below_30(ht, tbl)))
1059 schedule_work(&ht->run_work);
1060 err = 0;
1061 }
1062
1063 return err;
1064 }
1065
1066
1067 static inline int __rhashtable_remove_fast(
1068 struct rhashtable *ht, struct rhash_head *obj,
1069 const struct rhashtable_params params, bool rhlist)
1070 {
1071 struct bucket_table *tbl;
1072 int err;
1073
1074 rcu_read_lock();
1075
1076 tbl = rht_dereference_rcu(ht->tbl, ht);
1077
1078
1079
1080
1081
1082
1083 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1084 rhlist)) &&
1085 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1086 ;
1087
1088 rcu_read_unlock();
1089
1090 return err;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 static inline int rhashtable_remove_fast(
1109 struct rhashtable *ht, struct rhash_head *obj,
1110 const struct rhashtable_params params)
1111 {
1112 return __rhashtable_remove_fast(ht, obj, params, false);
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 static inline int rhltable_remove(
1131 struct rhltable *hlt, struct rhlist_head *list,
1132 const struct rhashtable_params params)
1133 {
1134 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1135 }
1136
1137
1138 static inline int __rhashtable_replace_fast(
1139 struct rhashtable *ht, struct bucket_table *tbl,
1140 struct rhash_head *obj_old, struct rhash_head *obj_new,
1141 const struct rhashtable_params params)
1142 {
1143 struct rhash_lock_head __rcu **bkt;
1144 struct rhash_head __rcu **pprev;
1145 struct rhash_head *he;
1146 unsigned int hash;
1147 int err = -ENOENT;
1148
1149
1150
1151
1152 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1153 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1154 return -EINVAL;
1155
1156 bkt = rht_bucket_var(tbl, hash);
1157 if (!bkt)
1158 return -ENOENT;
1159
1160 pprev = NULL;
1161 rht_lock(tbl, bkt);
1162
1163 rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1164 if (he != obj_old) {
1165 pprev = &he->next;
1166 continue;
1167 }
1168
1169 rcu_assign_pointer(obj_new->next, obj_old->next);
1170 if (pprev) {
1171 rcu_assign_pointer(*pprev, obj_new);
1172 rht_unlock(tbl, bkt);
1173 } else {
1174 rht_assign_unlock(tbl, bkt, obj_new);
1175 }
1176 err = 0;
1177 goto unlocked;
1178 }
1179
1180 rht_unlock(tbl, bkt);
1181
1182 unlocked:
1183 return err;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static inline int rhashtable_replace_fast(
1201 struct rhashtable *ht, struct rhash_head *obj_old,
1202 struct rhash_head *obj_new,
1203 const struct rhashtable_params params)
1204 {
1205 struct bucket_table *tbl;
1206 int err;
1207
1208 rcu_read_lock();
1209
1210 tbl = rht_dereference_rcu(ht->tbl, ht);
1211
1212
1213
1214
1215
1216
1217 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1218 obj_new, params)) &&
1219 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1220 ;
1221
1222 rcu_read_unlock();
1223
1224 return err;
1225 }
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 static inline void rhltable_walk_enter(struct rhltable *hlt,
1249 struct rhashtable_iter *iter)
1250 {
1251 return rhashtable_walk_enter(&hlt->ht, iter);
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1263 void (*free_fn)(void *ptr,
1264 void *arg),
1265 void *arg)
1266 {
1267 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1268 }
1269
1270 static inline void rhltable_destroy(struct rhltable *hlt)
1271 {
1272 return rhltable_free_and_destroy(hlt, NULL, NULL);
1273 }
1274
1275 #endif