Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Resizable, Scalable, Concurrent Hash Table
0004  *
0005  * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
0006  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
0007  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
0008  *
0009  * Code partially derived from nft_hash
0010  * Rewritten with rehash code from br_multicast plus single list
0011  * pointer as suggested by Josh Triplett
0012  *
0013  * This program is free software; you can redistribute it and/or modify
0014  * it under the terms of the GNU General Public License version 2 as
0015  * published by the Free Software Foundation.
0016  */
0017 
0018 #ifndef _LINUX_RHASHTABLE_H
0019 #define _LINUX_RHASHTABLE_H
0020 
0021 #include <linux/err.h>
0022 #include <linux/errno.h>
0023 #include <linux/jhash.h>
0024 #include <linux/list_nulls.h>
0025 #include <linux/workqueue.h>
0026 #include <linux/rculist.h>
0027 #include <linux/bit_spinlock.h>
0028 
0029 #include <linux/rhashtable-types.h>
0030 /*
0031  * Objects in an rhashtable have an embedded struct rhash_head
0032  * which is linked into as hash chain from the hash table - or one
0033  * of two or more hash tables when the rhashtable is being resized.
0034  * The end of the chain is marked with a special nulls marks which has
0035  * the least significant bit set but otherwise stores the address of
0036  * the hash bucket.  This allows us to be sure we've found the end
0037  * of the right list.
0038  * The value stored in the hash bucket has BIT(0) used as a lock bit.
0039  * This bit must be atomically set before any changes are made to
0040  * the chain.  To avoid dereferencing this pointer without clearing
0041  * the bit first, we use an opaque 'struct rhash_lock_head *' for the
0042  * pointer stored in the bucket.  This struct needs to be defined so
0043  * that rcu_dereference() works on it, but it has no content so a
0044  * cast is needed for it to be useful.  This ensures it isn't
0045  * used by mistake with clearing the lock bit first.
0046  */
0047 struct rhash_lock_head {};
0048 
0049 /* Maximum chain length before rehash
0050  *
0051  * The maximum (not average) chain length grows with the size of the hash
0052  * table, at a rate of (log N)/(log log N).
0053  *
0054  * The value of 16 is selected so that even if the hash table grew to
0055  * 2^32 you would not expect the maximum chain length to exceed it
0056  * unless we are under attack (or extremely unlucky).
0057  *
0058  * As this limit is only to detect attacks, we don't need to set it to a
0059  * lower value as you'd need the chain length to vastly exceed 16 to have
0060  * any real effect on the system.
0061  */
0062 #define RHT_ELASTICITY  16u
0063 
0064 /**
0065  * struct bucket_table - Table of hash buckets
0066  * @size: Number of hash buckets
0067  * @nest: Number of bits of first-level nested table.
0068  * @rehash: Current bucket being rehashed
0069  * @hash_rnd: Random seed to fold into hash
0070  * @walkers: List of active walkers
0071  * @rcu: RCU structure for freeing the table
0072  * @future_tbl: Table under construction during rehashing
0073  * @ntbl: Nested table used when out of memory.
0074  * @buckets: size * hash buckets
0075  */
0076 struct bucket_table {
0077     unsigned int        size;
0078     unsigned int        nest;
0079     u32         hash_rnd;
0080     struct list_head    walkers;
0081     struct rcu_head     rcu;
0082 
0083     struct bucket_table __rcu *future_tbl;
0084 
0085     struct lockdep_map  dep_map;
0086 
0087     struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
0088 };
0089 
0090 /*
0091  * NULLS_MARKER() expects a hash value with the low
0092  * bits mostly likely to be significant, and it discards
0093  * the msb.
0094  * We give it an address, in which the bottom bit is
0095  * always 0, and the msb might be significant.
0096  * So we shift the address down one bit to align with
0097  * expectations and avoid losing a significant bit.
0098  *
0099  * We never store the NULLS_MARKER in the hash table
0100  * itself as we need the lsb for locking.
0101  * Instead we store a NULL
0102  */
0103 #define RHT_NULLS_MARKER(ptr)   \
0104     ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
0105 #define INIT_RHT_NULLS_HEAD(ptr)    \
0106     ((ptr) = NULL)
0107 
0108 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
0109 {
0110     return ((unsigned long) ptr & 1);
0111 }
0112 
0113 static inline void *rht_obj(const struct rhashtable *ht,
0114                 const struct rhash_head *he)
0115 {
0116     return (char *)he - ht->p.head_offset;
0117 }
0118 
0119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
0120                         unsigned int hash)
0121 {
0122     return hash & (tbl->size - 1);
0123 }
0124 
0125 static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
0126     const void *key, const struct rhashtable_params params,
0127     unsigned int hash_rnd)
0128 {
0129     unsigned int hash;
0130 
0131     /* params must be equal to ht->p if it isn't constant. */
0132     if (!__builtin_constant_p(params.key_len))
0133         hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
0134     else if (params.key_len) {
0135         unsigned int key_len = params.key_len;
0136 
0137         if (params.hashfn)
0138             hash = params.hashfn(key, key_len, hash_rnd);
0139         else if (key_len & (sizeof(u32) - 1))
0140             hash = jhash(key, key_len, hash_rnd);
0141         else
0142             hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
0143     } else {
0144         unsigned int key_len = ht->p.key_len;
0145 
0146         if (params.hashfn)
0147             hash = params.hashfn(key, key_len, hash_rnd);
0148         else
0149             hash = jhash(key, key_len, hash_rnd);
0150     }
0151 
0152     return hash;
0153 }
0154 
0155 static inline unsigned int rht_key_hashfn(
0156     struct rhashtable *ht, const struct bucket_table *tbl,
0157     const void *key, const struct rhashtable_params params)
0158 {
0159     unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
0160 
0161     return rht_bucket_index(tbl, hash);
0162 }
0163 
0164 static inline unsigned int rht_head_hashfn(
0165     struct rhashtable *ht, const struct bucket_table *tbl,
0166     const struct rhash_head *he, const struct rhashtable_params params)
0167 {
0168     const char *ptr = rht_obj(ht, he);
0169 
0170     return likely(params.obj_hashfn) ?
0171            rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
0172                                 ht->p.key_len,
0173                                tbl->hash_rnd)) :
0174            rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
0175 }
0176 
0177 /**
0178  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
0179  * @ht:     hash table
0180  * @tbl:    current table
0181  */
0182 static inline bool rht_grow_above_75(const struct rhashtable *ht,
0183                      const struct bucket_table *tbl)
0184 {
0185     /* Expand table when exceeding 75% load */
0186     return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
0187            (!ht->p.max_size || tbl->size < ht->p.max_size);
0188 }
0189 
0190 /**
0191  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
0192  * @ht:     hash table
0193  * @tbl:    current table
0194  */
0195 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
0196                        const struct bucket_table *tbl)
0197 {
0198     /* Shrink table beneath 30% load */
0199     return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
0200            tbl->size > ht->p.min_size;
0201 }
0202 
0203 /**
0204  * rht_grow_above_100 - returns true if nelems > table-size
0205  * @ht:     hash table
0206  * @tbl:    current table
0207  */
0208 static inline bool rht_grow_above_100(const struct rhashtable *ht,
0209                       const struct bucket_table *tbl)
0210 {
0211     return atomic_read(&ht->nelems) > tbl->size &&
0212         (!ht->p.max_size || tbl->size < ht->p.max_size);
0213 }
0214 
0215 /**
0216  * rht_grow_above_max - returns true if table is above maximum
0217  * @ht:     hash table
0218  * @tbl:    current table
0219  */
0220 static inline bool rht_grow_above_max(const struct rhashtable *ht,
0221                       const struct bucket_table *tbl)
0222 {
0223     return atomic_read(&ht->nelems) >= ht->max_elems;
0224 }
0225 
0226 #ifdef CONFIG_PROVE_LOCKING
0227 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
0228 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
0229 #else
0230 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
0231 {
0232     return 1;
0233 }
0234 
0235 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
0236                          u32 hash)
0237 {
0238     return 1;
0239 }
0240 #endif /* CONFIG_PROVE_LOCKING */
0241 
0242 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
0243                  struct rhash_head *obj);
0244 
0245 void rhashtable_walk_enter(struct rhashtable *ht,
0246                struct rhashtable_iter *iter);
0247 void rhashtable_walk_exit(struct rhashtable_iter *iter);
0248 int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
0249 
0250 static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
0251 {
0252     (void)rhashtable_walk_start_check(iter);
0253 }
0254 
0255 void *rhashtable_walk_next(struct rhashtable_iter *iter);
0256 void *rhashtable_walk_peek(struct rhashtable_iter *iter);
0257 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
0258 
0259 void rhashtable_free_and_destroy(struct rhashtable *ht,
0260                  void (*free_fn)(void *ptr, void *arg),
0261                  void *arg);
0262 void rhashtable_destroy(struct rhashtable *ht);
0263 
0264 struct rhash_lock_head __rcu **rht_bucket_nested(
0265     const struct bucket_table *tbl, unsigned int hash);
0266 struct rhash_lock_head __rcu **__rht_bucket_nested(
0267     const struct bucket_table *tbl, unsigned int hash);
0268 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
0269     struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
0270 
0271 #define rht_dereference(p, ht) \
0272     rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
0273 
0274 #define rht_dereference_rcu(p, ht) \
0275     rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
0276 
0277 #define rht_dereference_bucket(p, tbl, hash) \
0278     rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
0279 
0280 #define rht_dereference_bucket_rcu(p, tbl, hash) \
0281     rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
0282 
0283 #define rht_entry(tpos, pos, member) \
0284     ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
0285 
0286 static inline struct rhash_lock_head __rcu *const *rht_bucket(
0287     const struct bucket_table *tbl, unsigned int hash)
0288 {
0289     return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
0290                      &tbl->buckets[hash];
0291 }
0292 
0293 static inline struct rhash_lock_head __rcu **rht_bucket_var(
0294     struct bucket_table *tbl, unsigned int hash)
0295 {
0296     return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
0297                      &tbl->buckets[hash];
0298 }
0299 
0300 static inline struct rhash_lock_head __rcu **rht_bucket_insert(
0301     struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
0302 {
0303     return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
0304                      &tbl->buckets[hash];
0305 }
0306 
0307 /*
0308  * We lock a bucket by setting BIT(0) in the pointer - this is always
0309  * zero in real pointers.  The NULLS mark is never stored in the bucket,
0310  * rather we store NULL if the bucket is empty.
0311  * bit_spin_locks do not handle contention well, but the whole point
0312  * of the hashtable design is to achieve minimum per-bucket contention.
0313  * A nested hash table might not have a bucket pointer.  In that case
0314  * we cannot get a lock.  For remove and replace the bucket cannot be
0315  * interesting and doesn't need locking.
0316  * For insert we allocate the bucket if this is the last bucket_table,
0317  * and then take the lock.
0318  * Sometimes we unlock a bucket by writing a new pointer there.  In that
0319  * case we don't need to unlock, but we do need to reset state such as
0320  * local_bh. For that we have rht_assign_unlock().  As rcu_assign_pointer()
0321  * provides the same release semantics that bit_spin_unlock() provides,
0322  * this is safe.
0323  * When we write to a bucket without unlocking, we use rht_assign_locked().
0324  */
0325 
0326 static inline void rht_lock(struct bucket_table *tbl,
0327                 struct rhash_lock_head __rcu **bkt)
0328 {
0329     local_bh_disable();
0330     bit_spin_lock(0, (unsigned long *)bkt);
0331     lock_map_acquire(&tbl->dep_map);
0332 }
0333 
0334 static inline void rht_lock_nested(struct bucket_table *tbl,
0335                    struct rhash_lock_head __rcu **bucket,
0336                    unsigned int subclass)
0337 {
0338     local_bh_disable();
0339     bit_spin_lock(0, (unsigned long *)bucket);
0340     lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
0341 }
0342 
0343 static inline void rht_unlock(struct bucket_table *tbl,
0344                   struct rhash_lock_head __rcu **bkt)
0345 {
0346     lock_map_release(&tbl->dep_map);
0347     bit_spin_unlock(0, (unsigned long *)bkt);
0348     local_bh_enable();
0349 }
0350 
0351 static inline struct rhash_head *__rht_ptr(
0352     struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
0353 {
0354     return (struct rhash_head *)
0355         ((unsigned long)p & ~BIT(0) ?:
0356          (unsigned long)RHT_NULLS_MARKER(bkt));
0357 }
0358 
0359 /*
0360  * Where 'bkt' is a bucket and might be locked:
0361  *   rht_ptr_rcu() dereferences that pointer and clears the lock bit.
0362  *   rht_ptr() dereferences in a context where the bucket is locked.
0363  *   rht_ptr_exclusive() dereferences in a context where exclusive
0364  *            access is guaranteed, such as when destroying the table.
0365  */
0366 static inline struct rhash_head *rht_ptr_rcu(
0367     struct rhash_lock_head __rcu *const *bkt)
0368 {
0369     return __rht_ptr(rcu_dereference(*bkt), bkt);
0370 }
0371 
0372 static inline struct rhash_head *rht_ptr(
0373     struct rhash_lock_head __rcu *const *bkt,
0374     struct bucket_table *tbl,
0375     unsigned int hash)
0376 {
0377     return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
0378 }
0379 
0380 static inline struct rhash_head *rht_ptr_exclusive(
0381     struct rhash_lock_head __rcu *const *bkt)
0382 {
0383     return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
0384 }
0385 
0386 static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
0387                      struct rhash_head *obj)
0388 {
0389     if (rht_is_a_nulls(obj))
0390         obj = NULL;
0391     rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
0392 }
0393 
0394 static inline void rht_assign_unlock(struct bucket_table *tbl,
0395                      struct rhash_lock_head __rcu **bkt,
0396                      struct rhash_head *obj)
0397 {
0398     if (rht_is_a_nulls(obj))
0399         obj = NULL;
0400     lock_map_release(&tbl->dep_map);
0401     rcu_assign_pointer(*bkt, (void *)obj);
0402     preempt_enable();
0403     __release(bitlock);
0404     local_bh_enable();
0405 }
0406 
0407 /**
0408  * rht_for_each_from - iterate over hash chain from given head
0409  * @pos:    the &struct rhash_head to use as a loop cursor.
0410  * @head:   the &struct rhash_head to start from
0411  * @tbl:    the &struct bucket_table
0412  * @hash:   the hash value / bucket index
0413  */
0414 #define rht_for_each_from(pos, head, tbl, hash) \
0415     for (pos = head;            \
0416          !rht_is_a_nulls(pos);      \
0417          pos = rht_dereference_bucket((pos)->next, tbl, hash))
0418 
0419 /**
0420  * rht_for_each - iterate over hash chain
0421  * @pos:    the &struct rhash_head to use as a loop cursor.
0422  * @tbl:    the &struct bucket_table
0423  * @hash:   the hash value / bucket index
0424  */
0425 #define rht_for_each(pos, tbl, hash) \
0426     rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash),  \
0427               tbl, hash)
0428 
0429 /**
0430  * rht_for_each_entry_from - iterate over hash chain from given head
0431  * @tpos:   the type * to use as a loop cursor.
0432  * @pos:    the &struct rhash_head to use as a loop cursor.
0433  * @head:   the &struct rhash_head to start from
0434  * @tbl:    the &struct bucket_table
0435  * @hash:   the hash value / bucket index
0436  * @member: name of the &struct rhash_head within the hashable struct.
0437  */
0438 #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
0439     for (pos = head;                        \
0440          (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);    \
0441          pos = rht_dereference_bucket((pos)->next, tbl, hash))
0442 
0443 /**
0444  * rht_for_each_entry - iterate over hash chain of given type
0445  * @tpos:   the type * to use as a loop cursor.
0446  * @pos:    the &struct rhash_head to use as a loop cursor.
0447  * @tbl:    the &struct bucket_table
0448  * @hash:   the hash value / bucket index
0449  * @member: name of the &struct rhash_head within the hashable struct.
0450  */
0451 #define rht_for_each_entry(tpos, pos, tbl, hash, member)        \
0452     rht_for_each_entry_from(tpos, pos,              \
0453                 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
0454                 tbl, hash, member)
0455 
0456 /**
0457  * rht_for_each_entry_safe - safely iterate over hash chain of given type
0458  * @tpos:   the type * to use as a loop cursor.
0459  * @pos:    the &struct rhash_head to use as a loop cursor.
0460  * @next:   the &struct rhash_head to use as next in loop cursor.
0461  * @tbl:    the &struct bucket_table
0462  * @hash:   the hash value / bucket index
0463  * @member: name of the &struct rhash_head within the hashable struct.
0464  *
0465  * This hash chain list-traversal primitive allows for the looped code to
0466  * remove the loop cursor from the list.
0467  */
0468 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)       \
0469     for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash),             \
0470          next = !rht_is_a_nulls(pos) ?                    \
0471                rht_dereference_bucket(pos->next, tbl, hash) : NULL;   \
0472          (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);          \
0473          pos = next,                              \
0474          next = !rht_is_a_nulls(pos) ?                    \
0475                rht_dereference_bucket(pos->next, tbl, hash) : NULL)
0476 
0477 /**
0478  * rht_for_each_rcu_from - iterate over rcu hash chain from given head
0479  * @pos:    the &struct rhash_head to use as a loop cursor.
0480  * @head:   the &struct rhash_head to start from
0481  * @tbl:    the &struct bucket_table
0482  * @hash:   the hash value / bucket index
0483  *
0484  * This hash chain list-traversal primitive may safely run concurrently with
0485  * the _rcu mutation primitives such as rhashtable_insert() as long as the
0486  * traversal is guarded by rcu_read_lock().
0487  */
0488 #define rht_for_each_rcu_from(pos, head, tbl, hash)         \
0489     for (({barrier(); }),                       \
0490          pos = head;                        \
0491          !rht_is_a_nulls(pos);                  \
0492          pos = rcu_dereference_raw(pos->next))
0493 
0494 /**
0495  * rht_for_each_rcu - iterate over rcu hash chain
0496  * @pos:    the &struct rhash_head to use as a loop cursor.
0497  * @tbl:    the &struct bucket_table
0498  * @hash:   the hash value / bucket index
0499  *
0500  * This hash chain list-traversal primitive may safely run concurrently with
0501  * the _rcu mutation primitives such as rhashtable_insert() as long as the
0502  * traversal is guarded by rcu_read_lock().
0503  */
0504 #define rht_for_each_rcu(pos, tbl, hash)            \
0505     for (({barrier(); }),                   \
0506          pos = rht_ptr_rcu(rht_bucket(tbl, hash));      \
0507          !rht_is_a_nulls(pos);              \
0508          pos = rcu_dereference_raw(pos->next))
0509 
0510 /**
0511  * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
0512  * @tpos:   the type * to use as a loop cursor.
0513  * @pos:    the &struct rhash_head to use as a loop cursor.
0514  * @head:   the &struct rhash_head to start from
0515  * @tbl:    the &struct bucket_table
0516  * @hash:   the hash value / bucket index
0517  * @member: name of the &struct rhash_head within the hashable struct.
0518  *
0519  * This hash chain list-traversal primitive may safely run concurrently with
0520  * the _rcu mutation primitives such as rhashtable_insert() as long as the
0521  * traversal is guarded by rcu_read_lock().
0522  */
0523 #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
0524     for (({barrier(); }),                           \
0525          pos = head;                            \
0526          (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
0527          pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
0528 
0529 /**
0530  * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
0531  * @tpos:   the type * to use as a loop cursor.
0532  * @pos:    the &struct rhash_head to use as a loop cursor.
0533  * @tbl:    the &struct bucket_table
0534  * @hash:   the hash value / bucket index
0535  * @member: name of the &struct rhash_head within the hashable struct.
0536  *
0537  * This hash chain list-traversal primitive may safely run concurrently with
0538  * the _rcu mutation primitives such as rhashtable_insert() as long as the
0539  * traversal is guarded by rcu_read_lock().
0540  */
0541 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)           \
0542     rht_for_each_entry_rcu_from(tpos, pos,                 \
0543                     rht_ptr_rcu(rht_bucket(tbl, hash)),    \
0544                     tbl, hash, member)
0545 
0546 /**
0547  * rhl_for_each_rcu - iterate over rcu hash table list
0548  * @pos:    the &struct rlist_head to use as a loop cursor.
0549  * @list:   the head of the list
0550  *
0551  * This hash chain list-traversal primitive should be used on the
0552  * list returned by rhltable_lookup.
0553  */
0554 #define rhl_for_each_rcu(pos, list)                 \
0555     for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
0556 
0557 /**
0558  * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
0559  * @tpos:   the type * to use as a loop cursor.
0560  * @pos:    the &struct rlist_head to use as a loop cursor.
0561  * @list:   the head of the list
0562  * @member: name of the &struct rlist_head within the hashable struct.
0563  *
0564  * This hash chain list-traversal primitive should be used on the
0565  * list returned by rhltable_lookup.
0566  */
0567 #define rhl_for_each_entry_rcu(tpos, pos, list, member)         \
0568     for (pos = list; pos && rht_entry(tpos, pos, member);       \
0569          pos = rcu_dereference_raw(pos->next))
0570 
0571 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
0572                      const void *obj)
0573 {
0574     struct rhashtable *ht = arg->ht;
0575     const char *ptr = obj;
0576 
0577     return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
0578 }
0579 
0580 /* Internal function, do not use. */
0581 static inline struct rhash_head *__rhashtable_lookup(
0582     struct rhashtable *ht, const void *key,
0583     const struct rhashtable_params params)
0584 {
0585     struct rhashtable_compare_arg arg = {
0586         .ht = ht,
0587         .key = key,
0588     };
0589     struct rhash_lock_head __rcu *const *bkt;
0590     struct bucket_table *tbl;
0591     struct rhash_head *he;
0592     unsigned int hash;
0593 
0594     tbl = rht_dereference_rcu(ht->tbl, ht);
0595 restart:
0596     hash = rht_key_hashfn(ht, tbl, key, params);
0597     bkt = rht_bucket(tbl, hash);
0598     do {
0599         rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
0600             if (params.obj_cmpfn ?
0601                 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
0602                 rhashtable_compare(&arg, rht_obj(ht, he)))
0603                 continue;
0604             return he;
0605         }
0606         /* An object might have been moved to a different hash chain,
0607          * while we walk along it - better check and retry.
0608          */
0609     } while (he != RHT_NULLS_MARKER(bkt));
0610 
0611     /* Ensure we see any new tables. */
0612     smp_rmb();
0613 
0614     tbl = rht_dereference_rcu(tbl->future_tbl, ht);
0615     if (unlikely(tbl))
0616         goto restart;
0617 
0618     return NULL;
0619 }
0620 
0621 /**
0622  * rhashtable_lookup - search hash table
0623  * @ht:     hash table
0624  * @key:    the pointer to the key
0625  * @params: hash table parameters
0626  *
0627  * Computes the hash value for the key and traverses the bucket chain looking
0628  * for a entry with an identical key. The first matching entry is returned.
0629  *
0630  * This must only be called under the RCU read lock.
0631  *
0632  * Returns the first entry on which the compare function returned true.
0633  */
0634 static inline void *rhashtable_lookup(
0635     struct rhashtable *ht, const void *key,
0636     const struct rhashtable_params params)
0637 {
0638     struct rhash_head *he = __rhashtable_lookup(ht, key, params);
0639 
0640     return he ? rht_obj(ht, he) : NULL;
0641 }
0642 
0643 /**
0644  * rhashtable_lookup_fast - search hash table, without RCU read lock
0645  * @ht:     hash table
0646  * @key:    the pointer to the key
0647  * @params: hash table parameters
0648  *
0649  * Computes the hash value for the key and traverses the bucket chain looking
0650  * for a entry with an identical key. The first matching entry is returned.
0651  *
0652  * Only use this function when you have other mechanisms guaranteeing
0653  * that the object won't go away after the RCU read lock is released.
0654  *
0655  * Returns the first entry on which the compare function returned true.
0656  */
0657 static inline void *rhashtable_lookup_fast(
0658     struct rhashtable *ht, const void *key,
0659     const struct rhashtable_params params)
0660 {
0661     void *obj;
0662 
0663     rcu_read_lock();
0664     obj = rhashtable_lookup(ht, key, params);
0665     rcu_read_unlock();
0666 
0667     return obj;
0668 }
0669 
0670 /**
0671  * rhltable_lookup - search hash list table
0672  * @hlt:    hash table
0673  * @key:    the pointer to the key
0674  * @params: hash table parameters
0675  *
0676  * Computes the hash value for the key and traverses the bucket chain looking
0677  * for a entry with an identical key.  All matching entries are returned
0678  * in a list.
0679  *
0680  * This must only be called under the RCU read lock.
0681  *
0682  * Returns the list of entries that match the given key.
0683  */
0684 static inline struct rhlist_head *rhltable_lookup(
0685     struct rhltable *hlt, const void *key,
0686     const struct rhashtable_params params)
0687 {
0688     struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
0689 
0690     return he ? container_of(he, struct rhlist_head, rhead) : NULL;
0691 }
0692 
0693 /* Internal function, please use rhashtable_insert_fast() instead. This
0694  * function returns the existing element already in hashes in there is a clash,
0695  * otherwise it returns an error via ERR_PTR().
0696  */
0697 static inline void *__rhashtable_insert_fast(
0698     struct rhashtable *ht, const void *key, struct rhash_head *obj,
0699     const struct rhashtable_params params, bool rhlist)
0700 {
0701     struct rhashtable_compare_arg arg = {
0702         .ht = ht,
0703         .key = key,
0704     };
0705     struct rhash_lock_head __rcu **bkt;
0706     struct rhash_head __rcu **pprev;
0707     struct bucket_table *tbl;
0708     struct rhash_head *head;
0709     unsigned int hash;
0710     int elasticity;
0711     void *data;
0712 
0713     rcu_read_lock();
0714 
0715     tbl = rht_dereference_rcu(ht->tbl, ht);
0716     hash = rht_head_hashfn(ht, tbl, obj, params);
0717     elasticity = RHT_ELASTICITY;
0718     bkt = rht_bucket_insert(ht, tbl, hash);
0719     data = ERR_PTR(-ENOMEM);
0720     if (!bkt)
0721         goto out;
0722     pprev = NULL;
0723     rht_lock(tbl, bkt);
0724 
0725     if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
0726 slow_path:
0727         rht_unlock(tbl, bkt);
0728         rcu_read_unlock();
0729         return rhashtable_insert_slow(ht, key, obj);
0730     }
0731 
0732     rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
0733         struct rhlist_head *plist;
0734         struct rhlist_head *list;
0735 
0736         elasticity--;
0737         if (!key ||
0738             (params.obj_cmpfn ?
0739              params.obj_cmpfn(&arg, rht_obj(ht, head)) :
0740              rhashtable_compare(&arg, rht_obj(ht, head)))) {
0741             pprev = &head->next;
0742             continue;
0743         }
0744 
0745         data = rht_obj(ht, head);
0746 
0747         if (!rhlist)
0748             goto out_unlock;
0749 
0750 
0751         list = container_of(obj, struct rhlist_head, rhead);
0752         plist = container_of(head, struct rhlist_head, rhead);
0753 
0754         RCU_INIT_POINTER(list->next, plist);
0755         head = rht_dereference_bucket(head->next, tbl, hash);
0756         RCU_INIT_POINTER(list->rhead.next, head);
0757         if (pprev) {
0758             rcu_assign_pointer(*pprev, obj);
0759             rht_unlock(tbl, bkt);
0760         } else
0761             rht_assign_unlock(tbl, bkt, obj);
0762         data = NULL;
0763         goto out;
0764     }
0765 
0766     if (elasticity <= 0)
0767         goto slow_path;
0768 
0769     data = ERR_PTR(-E2BIG);
0770     if (unlikely(rht_grow_above_max(ht, tbl)))
0771         goto out_unlock;
0772 
0773     if (unlikely(rht_grow_above_100(ht, tbl)))
0774         goto slow_path;
0775 
0776     /* Inserting at head of list makes unlocking free. */
0777     head = rht_ptr(bkt, tbl, hash);
0778 
0779     RCU_INIT_POINTER(obj->next, head);
0780     if (rhlist) {
0781         struct rhlist_head *list;
0782 
0783         list = container_of(obj, struct rhlist_head, rhead);
0784         RCU_INIT_POINTER(list->next, NULL);
0785     }
0786 
0787     atomic_inc(&ht->nelems);
0788     rht_assign_unlock(tbl, bkt, obj);
0789 
0790     if (rht_grow_above_75(ht, tbl))
0791         schedule_work(&ht->run_work);
0792 
0793     data = NULL;
0794 out:
0795     rcu_read_unlock();
0796 
0797     return data;
0798 
0799 out_unlock:
0800     rht_unlock(tbl, bkt);
0801     goto out;
0802 }
0803 
0804 /**
0805  * rhashtable_insert_fast - insert object into hash table
0806  * @ht:     hash table
0807  * @obj:    pointer to hash head inside object
0808  * @params: hash table parameters
0809  *
0810  * Will take the per bucket bitlock to protect against mutual mutations
0811  * on the same bucket. Multiple insertions may occur in parallel unless
0812  * they map to the same bucket.
0813  *
0814  * It is safe to call this function from atomic context.
0815  *
0816  * Will trigger an automatic deferred table resizing if residency in the
0817  * table grows beyond 70%.
0818  */
0819 static inline int rhashtable_insert_fast(
0820     struct rhashtable *ht, struct rhash_head *obj,
0821     const struct rhashtable_params params)
0822 {
0823     void *ret;
0824 
0825     ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
0826     if (IS_ERR(ret))
0827         return PTR_ERR(ret);
0828 
0829     return ret == NULL ? 0 : -EEXIST;
0830 }
0831 
0832 /**
0833  * rhltable_insert_key - insert object into hash list table
0834  * @hlt:    hash list table
0835  * @key:    the pointer to the key
0836  * @list:   pointer to hash list head inside object
0837  * @params: hash table parameters
0838  *
0839  * Will take the per bucket bitlock to protect against mutual mutations
0840  * on the same bucket. Multiple insertions may occur in parallel unless
0841  * they map to the same bucket.
0842  *
0843  * It is safe to call this function from atomic context.
0844  *
0845  * Will trigger an automatic deferred table resizing if residency in the
0846  * table grows beyond 70%.
0847  */
0848 static inline int rhltable_insert_key(
0849     struct rhltable *hlt, const void *key, struct rhlist_head *list,
0850     const struct rhashtable_params params)
0851 {
0852     return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
0853                         params, true));
0854 }
0855 
0856 /**
0857  * rhltable_insert - insert object into hash list table
0858  * @hlt:    hash list table
0859  * @list:   pointer to hash list head inside object
0860  * @params: hash table parameters
0861  *
0862  * Will take the per bucket bitlock to protect against mutual mutations
0863  * on the same bucket. Multiple insertions may occur in parallel unless
0864  * they map to the same bucket.
0865  *
0866  * It is safe to call this function from atomic context.
0867  *
0868  * Will trigger an automatic deferred table resizing if residency in the
0869  * table grows beyond 70%.
0870  */
0871 static inline int rhltable_insert(
0872     struct rhltable *hlt, struct rhlist_head *list,
0873     const struct rhashtable_params params)
0874 {
0875     const char *key = rht_obj(&hlt->ht, &list->rhead);
0876 
0877     key += params.key_offset;
0878 
0879     return rhltable_insert_key(hlt, key, list, params);
0880 }
0881 
0882 /**
0883  * rhashtable_lookup_insert_fast - lookup and insert object into hash table
0884  * @ht:     hash table
0885  * @obj:    pointer to hash head inside object
0886  * @params: hash table parameters
0887  *
0888  * This lookup function may only be used for fixed key hash table (key_len
0889  * parameter set). It will BUG() if used inappropriately.
0890  *
0891  * It is safe to call this function from atomic context.
0892  *
0893  * Will trigger an automatic deferred table resizing if residency in the
0894  * table grows beyond 70%.
0895  */
0896 static inline int rhashtable_lookup_insert_fast(
0897     struct rhashtable *ht, struct rhash_head *obj,
0898     const struct rhashtable_params params)
0899 {
0900     const char *key = rht_obj(ht, obj);
0901     void *ret;
0902 
0903     BUG_ON(ht->p.obj_hashfn);
0904 
0905     ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
0906                        false);
0907     if (IS_ERR(ret))
0908         return PTR_ERR(ret);
0909 
0910     return ret == NULL ? 0 : -EEXIST;
0911 }
0912 
0913 /**
0914  * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
0915  * @ht:     hash table
0916  * @obj:    pointer to hash head inside object
0917  * @params: hash table parameters
0918  *
0919  * Just like rhashtable_lookup_insert_fast(), but this function returns the
0920  * object if it exists, NULL if it did not and the insertion was successful,
0921  * and an ERR_PTR otherwise.
0922  */
0923 static inline void *rhashtable_lookup_get_insert_fast(
0924     struct rhashtable *ht, struct rhash_head *obj,
0925     const struct rhashtable_params params)
0926 {
0927     const char *key = rht_obj(ht, obj);
0928 
0929     BUG_ON(ht->p.obj_hashfn);
0930 
0931     return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
0932                     false);
0933 }
0934 
0935 /**
0936  * rhashtable_lookup_insert_key - search and insert object to hash table
0937  *                with explicit key
0938  * @ht:     hash table
0939  * @key:    key
0940  * @obj:    pointer to hash head inside object
0941  * @params: hash table parameters
0942  *
0943  * Lookups may occur in parallel with hashtable mutations and resizing.
0944  *
0945  * Will trigger an automatic deferred table resizing if residency in the
0946  * table grows beyond 70%.
0947  *
0948  * Returns zero on success.
0949  */
0950 static inline int rhashtable_lookup_insert_key(
0951     struct rhashtable *ht, const void *key, struct rhash_head *obj,
0952     const struct rhashtable_params params)
0953 {
0954     void *ret;
0955 
0956     BUG_ON(!ht->p.obj_hashfn || !key);
0957 
0958     ret = __rhashtable_insert_fast(ht, key, obj, params, false);
0959     if (IS_ERR(ret))
0960         return PTR_ERR(ret);
0961 
0962     return ret == NULL ? 0 : -EEXIST;
0963 }
0964 
0965 /**
0966  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
0967  * @ht:     hash table
0968  * @key:    key
0969  * @obj:    pointer to hash head inside object
0970  * @params: hash table parameters
0971  *
0972  * Just like rhashtable_lookup_insert_key(), but this function returns the
0973  * object if it exists, NULL if it does not and the insertion was successful,
0974  * and an ERR_PTR otherwise.
0975  */
0976 static inline void *rhashtable_lookup_get_insert_key(
0977     struct rhashtable *ht, const void *key, struct rhash_head *obj,
0978     const struct rhashtable_params params)
0979 {
0980     BUG_ON(!ht->p.obj_hashfn || !key);
0981 
0982     return __rhashtable_insert_fast(ht, key, obj, params, false);
0983 }
0984 
0985 /* Internal function, please use rhashtable_remove_fast() instead */
0986 static inline int __rhashtable_remove_fast_one(
0987     struct rhashtable *ht, struct bucket_table *tbl,
0988     struct rhash_head *obj, const struct rhashtable_params params,
0989     bool rhlist)
0990 {
0991     struct rhash_lock_head __rcu **bkt;
0992     struct rhash_head __rcu **pprev;
0993     struct rhash_head *he;
0994     unsigned int hash;
0995     int err = -ENOENT;
0996 
0997     hash = rht_head_hashfn(ht, tbl, obj, params);
0998     bkt = rht_bucket_var(tbl, hash);
0999     if (!bkt)
1000         return -ENOENT;
1001     pprev = NULL;
1002     rht_lock(tbl, bkt);
1003 
1004     rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1005         struct rhlist_head *list;
1006 
1007         list = container_of(he, struct rhlist_head, rhead);
1008 
1009         if (he != obj) {
1010             struct rhlist_head __rcu **lpprev;
1011 
1012             pprev = &he->next;
1013 
1014             if (!rhlist)
1015                 continue;
1016 
1017             do {
1018                 lpprev = &list->next;
1019                 list = rht_dereference_bucket(list->next,
1020                                   tbl, hash);
1021             } while (list && obj != &list->rhead);
1022 
1023             if (!list)
1024                 continue;
1025 
1026             list = rht_dereference_bucket(list->next, tbl, hash);
1027             RCU_INIT_POINTER(*lpprev, list);
1028             err = 0;
1029             break;
1030         }
1031 
1032         obj = rht_dereference_bucket(obj->next, tbl, hash);
1033         err = 1;
1034 
1035         if (rhlist) {
1036             list = rht_dereference_bucket(list->next, tbl, hash);
1037             if (list) {
1038                 RCU_INIT_POINTER(list->rhead.next, obj);
1039                 obj = &list->rhead;
1040                 err = 0;
1041             }
1042         }
1043 
1044         if (pprev) {
1045             rcu_assign_pointer(*pprev, obj);
1046             rht_unlock(tbl, bkt);
1047         } else {
1048             rht_assign_unlock(tbl, bkt, obj);
1049         }
1050         goto unlocked;
1051     }
1052 
1053     rht_unlock(tbl, bkt);
1054 unlocked:
1055     if (err > 0) {
1056         atomic_dec(&ht->nelems);
1057         if (unlikely(ht->p.automatic_shrinking &&
1058                  rht_shrink_below_30(ht, tbl)))
1059             schedule_work(&ht->run_work);
1060         err = 0;
1061     }
1062 
1063     return err;
1064 }
1065 
1066 /* Internal function, please use rhashtable_remove_fast() instead */
1067 static inline int __rhashtable_remove_fast(
1068     struct rhashtable *ht, struct rhash_head *obj,
1069     const struct rhashtable_params params, bool rhlist)
1070 {
1071     struct bucket_table *tbl;
1072     int err;
1073 
1074     rcu_read_lock();
1075 
1076     tbl = rht_dereference_rcu(ht->tbl, ht);
1077 
1078     /* Because we have already taken (and released) the bucket
1079      * lock in old_tbl, if we find that future_tbl is not yet
1080      * visible then that guarantees the entry to still be in
1081      * the old tbl if it exists.
1082      */
1083     while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1084                            rhlist)) &&
1085            (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1086         ;
1087 
1088     rcu_read_unlock();
1089 
1090     return err;
1091 }
1092 
1093 /**
1094  * rhashtable_remove_fast - remove object from hash table
1095  * @ht:     hash table
1096  * @obj:    pointer to hash head inside object
1097  * @params: hash table parameters
1098  *
1099  * Since the hash chain is single linked, the removal operation needs to
1100  * walk the bucket chain upon removal. The removal operation is thus
1101  * considerable slow if the hash table is not correctly sized.
1102  *
1103  * Will automatically shrink the table if permitted when residency drops
1104  * below 30%.
1105  *
1106  * Returns zero on success, -ENOENT if the entry could not be found.
1107  */
1108 static inline int rhashtable_remove_fast(
1109     struct rhashtable *ht, struct rhash_head *obj,
1110     const struct rhashtable_params params)
1111 {
1112     return __rhashtable_remove_fast(ht, obj, params, false);
1113 }
1114 
1115 /**
1116  * rhltable_remove - remove object from hash list table
1117  * @hlt:    hash list table
1118  * @list:   pointer to hash list head inside object
1119  * @params: hash table parameters
1120  *
1121  * Since the hash chain is single linked, the removal operation needs to
1122  * walk the bucket chain upon removal. The removal operation is thus
1123  * considerable slow if the hash table is not correctly sized.
1124  *
1125  * Will automatically shrink the table if permitted when residency drops
1126  * below 30%
1127  *
1128  * Returns zero on success, -ENOENT if the entry could not be found.
1129  */
1130 static inline int rhltable_remove(
1131     struct rhltable *hlt, struct rhlist_head *list,
1132     const struct rhashtable_params params)
1133 {
1134     return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1135 }
1136 
1137 /* Internal function, please use rhashtable_replace_fast() instead */
1138 static inline int __rhashtable_replace_fast(
1139     struct rhashtable *ht, struct bucket_table *tbl,
1140     struct rhash_head *obj_old, struct rhash_head *obj_new,
1141     const struct rhashtable_params params)
1142 {
1143     struct rhash_lock_head __rcu **bkt;
1144     struct rhash_head __rcu **pprev;
1145     struct rhash_head *he;
1146     unsigned int hash;
1147     int err = -ENOENT;
1148 
1149     /* Minimally, the old and new objects must have same hash
1150      * (which should mean identifiers are the same).
1151      */
1152     hash = rht_head_hashfn(ht, tbl, obj_old, params);
1153     if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1154         return -EINVAL;
1155 
1156     bkt = rht_bucket_var(tbl, hash);
1157     if (!bkt)
1158         return -ENOENT;
1159 
1160     pprev = NULL;
1161     rht_lock(tbl, bkt);
1162 
1163     rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
1164         if (he != obj_old) {
1165             pprev = &he->next;
1166             continue;
1167         }
1168 
1169         rcu_assign_pointer(obj_new->next, obj_old->next);
1170         if (pprev) {
1171             rcu_assign_pointer(*pprev, obj_new);
1172             rht_unlock(tbl, bkt);
1173         } else {
1174             rht_assign_unlock(tbl, bkt, obj_new);
1175         }
1176         err = 0;
1177         goto unlocked;
1178     }
1179 
1180     rht_unlock(tbl, bkt);
1181 
1182 unlocked:
1183     return err;
1184 }
1185 
1186 /**
1187  * rhashtable_replace_fast - replace an object in hash table
1188  * @ht:     hash table
1189  * @obj_old:    pointer to hash head inside object being replaced
1190  * @obj_new:    pointer to hash head inside object which is new
1191  * @params: hash table parameters
1192  *
1193  * Replacing an object doesn't affect the number of elements in the hash table
1194  * or bucket, so we don't need to worry about shrinking or expanding the
1195  * table here.
1196  *
1197  * Returns zero on success, -ENOENT if the entry could not be found,
1198  * -EINVAL if hash is not the same for the old and new objects.
1199  */
1200 static inline int rhashtable_replace_fast(
1201     struct rhashtable *ht, struct rhash_head *obj_old,
1202     struct rhash_head *obj_new,
1203     const struct rhashtable_params params)
1204 {
1205     struct bucket_table *tbl;
1206     int err;
1207 
1208     rcu_read_lock();
1209 
1210     tbl = rht_dereference_rcu(ht->tbl, ht);
1211 
1212     /* Because we have already taken (and released) the bucket
1213      * lock in old_tbl, if we find that future_tbl is not yet
1214      * visible then that guarantees the entry to still be in
1215      * the old tbl if it exists.
1216      */
1217     while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1218                         obj_new, params)) &&
1219            (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1220         ;
1221 
1222     rcu_read_unlock();
1223 
1224     return err;
1225 }
1226 
1227 /**
1228  * rhltable_walk_enter - Initialise an iterator
1229  * @hlt:    Table to walk over
1230  * @iter:   Hash table Iterator
1231  *
1232  * This function prepares a hash table walk.
1233  *
1234  * Note that if you restart a walk after rhashtable_walk_stop you
1235  * may see the same object twice.  Also, you may miss objects if
1236  * there are removals in between rhashtable_walk_stop and the next
1237  * call to rhashtable_walk_start.
1238  *
1239  * For a completely stable walk you should construct your own data
1240  * structure outside the hash table.
1241  *
1242  * This function may be called from any process context, including
1243  * non-preemptable context, but cannot be called from softirq or
1244  * hardirq context.
1245  *
1246  * You must call rhashtable_walk_exit after this function returns.
1247  */
1248 static inline void rhltable_walk_enter(struct rhltable *hlt,
1249                        struct rhashtable_iter *iter)
1250 {
1251     return rhashtable_walk_enter(&hlt->ht, iter);
1252 }
1253 
1254 /**
1255  * rhltable_free_and_destroy - free elements and destroy hash list table
1256  * @hlt:    the hash list table to destroy
1257  * @free_fn:    callback to release resources of element
1258  * @arg:    pointer passed to free_fn
1259  *
1260  * See documentation for rhashtable_free_and_destroy.
1261  */
1262 static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1263                          void (*free_fn)(void *ptr,
1264                                  void *arg),
1265                          void *arg)
1266 {
1267     return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1268 }
1269 
1270 static inline void rhltable_destroy(struct rhltable *hlt)
1271 {
1272     return rhltable_free_and_destroy(hlt, NULL, NULL);
1273 }
1274 
1275 #endif /* _LINUX_RHASHTABLE_H */