Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_RCULIST_H
0003 #define _LINUX_RCULIST_H
0004 
0005 #ifdef __KERNEL__
0006 
0007 /*
0008  * RCU-protected list version
0009  */
0010 #include <linux/list.h>
0011 #include <linux/rcupdate.h>
0012 
0013 /*
0014  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
0015  * @list: list to be initialized
0016  *
0017  * You should instead use INIT_LIST_HEAD() for normal initialization and
0018  * cleanup tasks, when readers have no access to the list being initialized.
0019  * However, if the list being initialized is visible to readers, you
0020  * need to keep the compiler from being too mischievous.
0021  */
0022 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
0023 {
0024     WRITE_ONCE(list->next, list);
0025     WRITE_ONCE(list->prev, list);
0026 }
0027 
0028 /*
0029  * return the ->next pointer of a list_head in an rcu safe
0030  * way, we must not access it directly
0031  */
0032 #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
0033 
0034 /**
0035  * list_tail_rcu - returns the prev pointer of the head of the list
0036  * @head: the head of the list
0037  *
0038  * Note: This should only be used with the list header, and even then
0039  * only if list_del() and similar primitives are not also used on the
0040  * list header.
0041  */
0042 #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
0043 
0044 /*
0045  * Check during list traversal that we are within an RCU reader
0046  */
0047 
0048 #define check_arg_count_one(dummy)
0049 
0050 #ifdef CONFIG_PROVE_RCU_LIST
0051 #define __list_check_rcu(dummy, cond, extra...)             \
0052     ({                              \
0053     check_arg_count_one(extra);                 \
0054     RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(),      \
0055              "RCU-list traversed in non-reader section!");  \
0056     })
0057 
0058 #define __list_check_srcu(cond)                  \
0059     ({                               \
0060     RCU_LOCKDEP_WARN(!(cond),                    \
0061         "RCU-list traversed without holding the required lock!");\
0062     })
0063 #else
0064 #define __list_check_rcu(dummy, cond, extra...)             \
0065     ({ check_arg_count_one(extra); })
0066 
0067 #define __list_check_srcu(cond) ({ })
0068 #endif
0069 
0070 /*
0071  * Insert a new entry between two known consecutive entries.
0072  *
0073  * This is only for internal list manipulation where we know
0074  * the prev/next entries already!
0075  */
0076 static inline void __list_add_rcu(struct list_head *new,
0077         struct list_head *prev, struct list_head *next)
0078 {
0079     if (!__list_add_valid(new, prev, next))
0080         return;
0081 
0082     new->next = next;
0083     new->prev = prev;
0084     rcu_assign_pointer(list_next_rcu(prev), new);
0085     next->prev = new;
0086 }
0087 
0088 /**
0089  * list_add_rcu - add a new entry to rcu-protected list
0090  * @new: new entry to be added
0091  * @head: list head to add it after
0092  *
0093  * Insert a new entry after the specified head.
0094  * This is good for implementing stacks.
0095  *
0096  * The caller must take whatever precautions are necessary
0097  * (such as holding appropriate locks) to avoid racing
0098  * with another list-mutation primitive, such as list_add_rcu()
0099  * or list_del_rcu(), running on this same list.
0100  * However, it is perfectly legal to run concurrently with
0101  * the _rcu list-traversal primitives, such as
0102  * list_for_each_entry_rcu().
0103  */
0104 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
0105 {
0106     __list_add_rcu(new, head, head->next);
0107 }
0108 
0109 /**
0110  * list_add_tail_rcu - add a new entry to rcu-protected list
0111  * @new: new entry to be added
0112  * @head: list head to add it before
0113  *
0114  * Insert a new entry before the specified head.
0115  * This is useful for implementing queues.
0116  *
0117  * The caller must take whatever precautions are necessary
0118  * (such as holding appropriate locks) to avoid racing
0119  * with another list-mutation primitive, such as list_add_tail_rcu()
0120  * or list_del_rcu(), running on this same list.
0121  * However, it is perfectly legal to run concurrently with
0122  * the _rcu list-traversal primitives, such as
0123  * list_for_each_entry_rcu().
0124  */
0125 static inline void list_add_tail_rcu(struct list_head *new,
0126                     struct list_head *head)
0127 {
0128     __list_add_rcu(new, head->prev, head);
0129 }
0130 
0131 /**
0132  * list_del_rcu - deletes entry from list without re-initialization
0133  * @entry: the element to delete from the list.
0134  *
0135  * Note: list_empty() on entry does not return true after this,
0136  * the entry is in an undefined state. It is useful for RCU based
0137  * lockfree traversal.
0138  *
0139  * In particular, it means that we can not poison the forward
0140  * pointers that may still be used for walking the list.
0141  *
0142  * The caller must take whatever precautions are necessary
0143  * (such as holding appropriate locks) to avoid racing
0144  * with another list-mutation primitive, such as list_del_rcu()
0145  * or list_add_rcu(), running on this same list.
0146  * However, it is perfectly legal to run concurrently with
0147  * the _rcu list-traversal primitives, such as
0148  * list_for_each_entry_rcu().
0149  *
0150  * Note that the caller is not permitted to immediately free
0151  * the newly deleted entry.  Instead, either synchronize_rcu()
0152  * or call_rcu() must be used to defer freeing until an RCU
0153  * grace period has elapsed.
0154  */
0155 static inline void list_del_rcu(struct list_head *entry)
0156 {
0157     __list_del_entry(entry);
0158     entry->prev = LIST_POISON2;
0159 }
0160 
0161 /**
0162  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
0163  * @n: the element to delete from the hash list.
0164  *
0165  * Note: list_unhashed() on the node return true after this. It is
0166  * useful for RCU based read lockfree traversal if the writer side
0167  * must know if the list entry is still hashed or already unhashed.
0168  *
0169  * In particular, it means that we can not poison the forward pointers
0170  * that may still be used for walking the hash list and we can only
0171  * zero the pprev pointer so list_unhashed() will return true after
0172  * this.
0173  *
0174  * The caller must take whatever precautions are necessary (such as
0175  * holding appropriate locks) to avoid racing with another
0176  * list-mutation primitive, such as hlist_add_head_rcu() or
0177  * hlist_del_rcu(), running on this same list.  However, it is
0178  * perfectly legal to run concurrently with the _rcu list-traversal
0179  * primitives, such as hlist_for_each_entry_rcu().
0180  */
0181 static inline void hlist_del_init_rcu(struct hlist_node *n)
0182 {
0183     if (!hlist_unhashed(n)) {
0184         __hlist_del(n);
0185         WRITE_ONCE(n->pprev, NULL);
0186     }
0187 }
0188 
0189 /**
0190  * list_replace_rcu - replace old entry by new one
0191  * @old : the element to be replaced
0192  * @new : the new element to insert
0193  *
0194  * The @old entry will be replaced with the @new entry atomically.
0195  * Note: @old should not be empty.
0196  */
0197 static inline void list_replace_rcu(struct list_head *old,
0198                 struct list_head *new)
0199 {
0200     new->next = old->next;
0201     new->prev = old->prev;
0202     rcu_assign_pointer(list_next_rcu(new->prev), new);
0203     new->next->prev = new;
0204     old->prev = LIST_POISON2;
0205 }
0206 
0207 /**
0208  * __list_splice_init_rcu - join an RCU-protected list into an existing list.
0209  * @list:   the RCU-protected list to splice
0210  * @prev:   points to the last element of the existing list
0211  * @next:   points to the first element of the existing list
0212  * @sync:   synchronize_rcu, synchronize_rcu_expedited, ...
0213  *
0214  * The list pointed to by @prev and @next can be RCU-read traversed
0215  * concurrently with this function.
0216  *
0217  * Note that this function blocks.
0218  *
0219  * Important note: the caller must take whatever action is necessary to prevent
0220  * any other updates to the existing list.  In principle, it is possible to
0221  * modify the list as soon as sync() begins execution. If this sort of thing
0222  * becomes necessary, an alternative version based on call_rcu() could be
0223  * created.  But only if -really- needed -- there is no shortage of RCU API
0224  * members.
0225  */
0226 static inline void __list_splice_init_rcu(struct list_head *list,
0227                       struct list_head *prev,
0228                       struct list_head *next,
0229                       void (*sync)(void))
0230 {
0231     struct list_head *first = list->next;
0232     struct list_head *last = list->prev;
0233 
0234     /*
0235      * "first" and "last" tracking list, so initialize it.  RCU readers
0236      * have access to this list, so we must use INIT_LIST_HEAD_RCU()
0237      * instead of INIT_LIST_HEAD().
0238      */
0239 
0240     INIT_LIST_HEAD_RCU(list);
0241 
0242     /*
0243      * At this point, the list body still points to the source list.
0244      * Wait for any readers to finish using the list before splicing
0245      * the list body into the new list.  Any new readers will see
0246      * an empty list.
0247      */
0248 
0249     sync();
0250     ASSERT_EXCLUSIVE_ACCESS(*first);
0251     ASSERT_EXCLUSIVE_ACCESS(*last);
0252 
0253     /*
0254      * Readers are finished with the source list, so perform splice.
0255      * The order is important if the new list is global and accessible
0256      * to concurrent RCU readers.  Note that RCU readers are not
0257      * permitted to traverse the prev pointers without excluding
0258      * this function.
0259      */
0260 
0261     last->next = next;
0262     rcu_assign_pointer(list_next_rcu(prev), first);
0263     first->prev = prev;
0264     next->prev = last;
0265 }
0266 
0267 /**
0268  * list_splice_init_rcu - splice an RCU-protected list into an existing list,
0269  *                        designed for stacks.
0270  * @list:   the RCU-protected list to splice
0271  * @head:   the place in the existing list to splice the first list into
0272  * @sync:   synchronize_rcu, synchronize_rcu_expedited, ...
0273  */
0274 static inline void list_splice_init_rcu(struct list_head *list,
0275                     struct list_head *head,
0276                     void (*sync)(void))
0277 {
0278     if (!list_empty(list))
0279         __list_splice_init_rcu(list, head, head->next, sync);
0280 }
0281 
0282 /**
0283  * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
0284  *                             list, designed for queues.
0285  * @list:   the RCU-protected list to splice
0286  * @head:   the place in the existing list to splice the first list into
0287  * @sync:   synchronize_rcu, synchronize_rcu_expedited, ...
0288  */
0289 static inline void list_splice_tail_init_rcu(struct list_head *list,
0290                          struct list_head *head,
0291                          void (*sync)(void))
0292 {
0293     if (!list_empty(list))
0294         __list_splice_init_rcu(list, head->prev, head, sync);
0295 }
0296 
0297 /**
0298  * list_entry_rcu - get the struct for this entry
0299  * @ptr:        the &struct list_head pointer.
0300  * @type:       the type of the struct this is embedded in.
0301  * @member:     the name of the list_head within the struct.
0302  *
0303  * This primitive may safely run concurrently with the _rcu list-mutation
0304  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
0305  */
0306 #define list_entry_rcu(ptr, type, member) \
0307     container_of(READ_ONCE(ptr), type, member)
0308 
0309 /*
0310  * Where are list_empty_rcu() and list_first_entry_rcu()?
0311  *
0312  * They do not exist because they would lead to subtle race conditions:
0313  *
0314  * if (!list_empty_rcu(mylist)) {
0315  *  struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
0316  *  do_something(bar);
0317  * }
0318  *
0319  * The list might be non-empty when list_empty_rcu() checks it, but it
0320  * might have become empty by the time that list_first_entry_rcu() rereads
0321  * the ->next pointer, which would result in a SEGV.
0322  *
0323  * When not using RCU, it is OK for list_first_entry() to re-read that
0324  * pointer because both functions should be protected by some lock that
0325  * blocks writers.
0326  *
0327  * When using RCU, list_empty() uses READ_ONCE() to fetch the
0328  * RCU-protected ->next pointer and then compares it to the address of the
0329  * list head.  However, it neither dereferences this pointer nor provides
0330  * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
0331  * rcu_dereference() is not needed), which means that list_empty() can be
0332  * used anywhere you would want to use list_empty_rcu().  Just don't
0333  * expect anything useful to happen if you do a subsequent lockless
0334  * call to list_first_entry_rcu()!!!
0335  *
0336  * See list_first_or_null_rcu for an alternative.
0337  */
0338 
0339 /**
0340  * list_first_or_null_rcu - get the first element from a list
0341  * @ptr:        the list head to take the element from.
0342  * @type:       the type of the struct this is embedded in.
0343  * @member:     the name of the list_head within the struct.
0344  *
0345  * Note that if the list is empty, it returns NULL.
0346  *
0347  * This primitive may safely run concurrently with the _rcu list-mutation
0348  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
0349  */
0350 #define list_first_or_null_rcu(ptr, type, member) \
0351 ({ \
0352     struct list_head *__ptr = (ptr); \
0353     struct list_head *__next = READ_ONCE(__ptr->next); \
0354     likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
0355 })
0356 
0357 /**
0358  * list_next_or_null_rcu - get the first element from a list
0359  * @head:   the head for the list.
0360  * @ptr:        the list head to take the next element from.
0361  * @type:       the type of the struct this is embedded in.
0362  * @member:     the name of the list_head within the struct.
0363  *
0364  * Note that if the ptr is at the end of the list, NULL is returned.
0365  *
0366  * This primitive may safely run concurrently with the _rcu list-mutation
0367  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
0368  */
0369 #define list_next_or_null_rcu(head, ptr, type, member) \
0370 ({ \
0371     struct list_head *__head = (head); \
0372     struct list_head *__ptr = (ptr); \
0373     struct list_head *__next = READ_ONCE(__ptr->next); \
0374     likely(__next != __head) ? list_entry_rcu(__next, type, \
0375                           member) : NULL; \
0376 })
0377 
0378 /**
0379  * list_for_each_entry_rcu  -   iterate over rcu list of given type
0380  * @pos:    the type * to use as a loop cursor.
0381  * @head:   the head for your list.
0382  * @member: the name of the list_head within the struct.
0383  * @cond:   optional lockdep expression if called from non-RCU protection.
0384  *
0385  * This list-traversal primitive may safely run concurrently with
0386  * the _rcu list-mutation primitives such as list_add_rcu()
0387  * as long as the traversal is guarded by rcu_read_lock().
0388  */
0389 #define list_for_each_entry_rcu(pos, head, member, cond...)     \
0390     for (__list_check_rcu(dummy, ## cond, 0),           \
0391          pos = list_entry_rcu((head)->next, typeof(*pos), member);  \
0392         &pos->member != (head);                 \
0393         pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
0394 
0395 /**
0396  * list_for_each_entry_srcu -   iterate over rcu list of given type
0397  * @pos:    the type * to use as a loop cursor.
0398  * @head:   the head for your list.
0399  * @member: the name of the list_head within the struct.
0400  * @cond:   lockdep expression for the lock required to traverse the list.
0401  *
0402  * This list-traversal primitive may safely run concurrently with
0403  * the _rcu list-mutation primitives such as list_add_rcu()
0404  * as long as the traversal is guarded by srcu_read_lock().
0405  * The lockdep expression srcu_read_lock_held() can be passed as the
0406  * cond argument from read side.
0407  */
0408 #define list_for_each_entry_srcu(pos, head, member, cond)       \
0409     for (__list_check_srcu(cond),                   \
0410          pos = list_entry_rcu((head)->next, typeof(*pos), member);  \
0411         &pos->member != (head);                 \
0412         pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
0413 
0414 /**
0415  * list_entry_lockless - get the struct for this entry
0416  * @ptr:        the &struct list_head pointer.
0417  * @type:       the type of the struct this is embedded in.
0418  * @member:     the name of the list_head within the struct.
0419  *
0420  * This primitive may safely run concurrently with the _rcu
0421  * list-mutation primitives such as list_add_rcu(), but requires some
0422  * implicit RCU read-side guarding.  One example is running within a special
0423  * exception-time environment where preemption is disabled and where lockdep
0424  * cannot be invoked.  Another example is when items are added to the list,
0425  * but never deleted.
0426  */
0427 #define list_entry_lockless(ptr, type, member) \
0428     container_of((typeof(ptr))READ_ONCE(ptr), type, member)
0429 
0430 /**
0431  * list_for_each_entry_lockless - iterate over rcu list of given type
0432  * @pos:    the type * to use as a loop cursor.
0433  * @head:   the head for your list.
0434  * @member: the name of the list_struct within the struct.
0435  *
0436  * This primitive may safely run concurrently with the _rcu
0437  * list-mutation primitives such as list_add_rcu(), but requires some
0438  * implicit RCU read-side guarding.  One example is running within a special
0439  * exception-time environment where preemption is disabled and where lockdep
0440  * cannot be invoked.  Another example is when items are added to the list,
0441  * but never deleted.
0442  */
0443 #define list_for_each_entry_lockless(pos, head, member) \
0444     for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
0445          &pos->member != (head); \
0446          pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
0447 
0448 /**
0449  * list_for_each_entry_continue_rcu - continue iteration over list of given type
0450  * @pos:    the type * to use as a loop cursor.
0451  * @head:   the head for your list.
0452  * @member: the name of the list_head within the struct.
0453  *
0454  * Continue to iterate over list of given type, continuing after
0455  * the current position which must have been in the list when the RCU read
0456  * lock was taken.
0457  * This would typically require either that you obtained the node from a
0458  * previous walk of the list in the same RCU read-side critical section, or
0459  * that you held some sort of non-RCU reference (such as a reference count)
0460  * to keep the node alive *and* in the list.
0461  *
0462  * This iterator is similar to list_for_each_entry_from_rcu() except
0463  * this starts after the given position and that one starts at the given
0464  * position.
0465  */
0466 #define list_for_each_entry_continue_rcu(pos, head, member)         \
0467     for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
0468          &pos->member != (head);    \
0469          pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
0470 
0471 /**
0472  * list_for_each_entry_from_rcu - iterate over a list from current point
0473  * @pos:    the type * to use as a loop cursor.
0474  * @head:   the head for your list.
0475  * @member: the name of the list_node within the struct.
0476  *
0477  * Iterate over the tail of a list starting from a given position,
0478  * which must have been in the list when the RCU read lock was taken.
0479  * This would typically require either that you obtained the node from a
0480  * previous walk of the list in the same RCU read-side critical section, or
0481  * that you held some sort of non-RCU reference (such as a reference count)
0482  * to keep the node alive *and* in the list.
0483  *
0484  * This iterator is similar to list_for_each_entry_continue_rcu() except
0485  * this starts from the given position and that one starts from the position
0486  * after the given position.
0487  */
0488 #define list_for_each_entry_from_rcu(pos, head, member)         \
0489     for (; &(pos)->member != (head);                    \
0490         pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
0491 
0492 /**
0493  * hlist_del_rcu - deletes entry from hash list without re-initialization
0494  * @n: the element to delete from the hash list.
0495  *
0496  * Note: list_unhashed() on entry does not return true after this,
0497  * the entry is in an undefined state. It is useful for RCU based
0498  * lockfree traversal.
0499  *
0500  * In particular, it means that we can not poison the forward
0501  * pointers that may still be used for walking the hash list.
0502  *
0503  * The caller must take whatever precautions are necessary
0504  * (such as holding appropriate locks) to avoid racing
0505  * with another list-mutation primitive, such as hlist_add_head_rcu()
0506  * or hlist_del_rcu(), running on this same list.
0507  * However, it is perfectly legal to run concurrently with
0508  * the _rcu list-traversal primitives, such as
0509  * hlist_for_each_entry().
0510  */
0511 static inline void hlist_del_rcu(struct hlist_node *n)
0512 {
0513     __hlist_del(n);
0514     WRITE_ONCE(n->pprev, LIST_POISON2);
0515 }
0516 
0517 /**
0518  * hlist_replace_rcu - replace old entry by new one
0519  * @old : the element to be replaced
0520  * @new : the new element to insert
0521  *
0522  * The @old entry will be replaced with the @new entry atomically.
0523  */
0524 static inline void hlist_replace_rcu(struct hlist_node *old,
0525                     struct hlist_node *new)
0526 {
0527     struct hlist_node *next = old->next;
0528 
0529     new->next = next;
0530     WRITE_ONCE(new->pprev, old->pprev);
0531     rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
0532     if (next)
0533         WRITE_ONCE(new->next->pprev, &new->next);
0534     WRITE_ONCE(old->pprev, LIST_POISON2);
0535 }
0536 
0537 /**
0538  * hlists_swap_heads_rcu - swap the lists the hlist heads point to
0539  * @left:  The hlist head on the left
0540  * @right: The hlist head on the right
0541  *
0542  * The lists start out as [@left  ][node1 ... ] and
0543  *                        [@right ][node2 ... ]
0544  * The lists end up as    [@left  ][node2 ... ]
0545  *                        [@right ][node1 ... ]
0546  */
0547 static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
0548 {
0549     struct hlist_node *node1 = left->first;
0550     struct hlist_node *node2 = right->first;
0551 
0552     rcu_assign_pointer(left->first, node2);
0553     rcu_assign_pointer(right->first, node1);
0554     WRITE_ONCE(node2->pprev, &left->first);
0555     WRITE_ONCE(node1->pprev, &right->first);
0556 }
0557 
0558 /*
0559  * return the first or the next element in an RCU protected hlist
0560  */
0561 #define hlist_first_rcu(head)   (*((struct hlist_node __rcu **)(&(head)->first)))
0562 #define hlist_next_rcu(node)    (*((struct hlist_node __rcu **)(&(node)->next)))
0563 #define hlist_pprev_rcu(node)   (*((struct hlist_node __rcu **)((node)->pprev)))
0564 
0565 /**
0566  * hlist_add_head_rcu
0567  * @n: the element to add to the hash list.
0568  * @h: the list to add to.
0569  *
0570  * Description:
0571  * Adds the specified element to the specified hlist,
0572  * while permitting racing traversals.
0573  *
0574  * The caller must take whatever precautions are necessary
0575  * (such as holding appropriate locks) to avoid racing
0576  * with another list-mutation primitive, such as hlist_add_head_rcu()
0577  * or hlist_del_rcu(), running on this same list.
0578  * However, it is perfectly legal to run concurrently with
0579  * the _rcu list-traversal primitives, such as
0580  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
0581  * problems on Alpha CPUs.  Regardless of the type of CPU, the
0582  * list-traversal primitive must be guarded by rcu_read_lock().
0583  */
0584 static inline void hlist_add_head_rcu(struct hlist_node *n,
0585                     struct hlist_head *h)
0586 {
0587     struct hlist_node *first = h->first;
0588 
0589     n->next = first;
0590     WRITE_ONCE(n->pprev, &h->first);
0591     rcu_assign_pointer(hlist_first_rcu(h), n);
0592     if (first)
0593         WRITE_ONCE(first->pprev, &n->next);
0594 }
0595 
0596 /**
0597  * hlist_add_tail_rcu
0598  * @n: the element to add to the hash list.
0599  * @h: the list to add to.
0600  *
0601  * Description:
0602  * Adds the specified element to the specified hlist,
0603  * while permitting racing traversals.
0604  *
0605  * The caller must take whatever precautions are necessary
0606  * (such as holding appropriate locks) to avoid racing
0607  * with another list-mutation primitive, such as hlist_add_head_rcu()
0608  * or hlist_del_rcu(), running on this same list.
0609  * However, it is perfectly legal to run concurrently with
0610  * the _rcu list-traversal primitives, such as
0611  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
0612  * problems on Alpha CPUs.  Regardless of the type of CPU, the
0613  * list-traversal primitive must be guarded by rcu_read_lock().
0614  */
0615 static inline void hlist_add_tail_rcu(struct hlist_node *n,
0616                       struct hlist_head *h)
0617 {
0618     struct hlist_node *i, *last = NULL;
0619 
0620     /* Note: write side code, so rcu accessors are not needed. */
0621     for (i = h->first; i; i = i->next)
0622         last = i;
0623 
0624     if (last) {
0625         n->next = last->next;
0626         WRITE_ONCE(n->pprev, &last->next);
0627         rcu_assign_pointer(hlist_next_rcu(last), n);
0628     } else {
0629         hlist_add_head_rcu(n, h);
0630     }
0631 }
0632 
0633 /**
0634  * hlist_add_before_rcu
0635  * @n: the new element to add to the hash list.
0636  * @next: the existing element to add the new element before.
0637  *
0638  * Description:
0639  * Adds the specified element to the specified hlist
0640  * before the specified node while permitting racing traversals.
0641  *
0642  * The caller must take whatever precautions are necessary
0643  * (such as holding appropriate locks) to avoid racing
0644  * with another list-mutation primitive, such as hlist_add_head_rcu()
0645  * or hlist_del_rcu(), running on this same list.
0646  * However, it is perfectly legal to run concurrently with
0647  * the _rcu list-traversal primitives, such as
0648  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
0649  * problems on Alpha CPUs.
0650  */
0651 static inline void hlist_add_before_rcu(struct hlist_node *n,
0652                     struct hlist_node *next)
0653 {
0654     WRITE_ONCE(n->pprev, next->pprev);
0655     n->next = next;
0656     rcu_assign_pointer(hlist_pprev_rcu(n), n);
0657     WRITE_ONCE(next->pprev, &n->next);
0658 }
0659 
0660 /**
0661  * hlist_add_behind_rcu
0662  * @n: the new element to add to the hash list.
0663  * @prev: the existing element to add the new element after.
0664  *
0665  * Description:
0666  * Adds the specified element to the specified hlist
0667  * after the specified node while permitting racing traversals.
0668  *
0669  * The caller must take whatever precautions are necessary
0670  * (such as holding appropriate locks) to avoid racing
0671  * with another list-mutation primitive, such as hlist_add_head_rcu()
0672  * or hlist_del_rcu(), running on this same list.
0673  * However, it is perfectly legal to run concurrently with
0674  * the _rcu list-traversal primitives, such as
0675  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
0676  * problems on Alpha CPUs.
0677  */
0678 static inline void hlist_add_behind_rcu(struct hlist_node *n,
0679                     struct hlist_node *prev)
0680 {
0681     n->next = prev->next;
0682     WRITE_ONCE(n->pprev, &prev->next);
0683     rcu_assign_pointer(hlist_next_rcu(prev), n);
0684     if (n->next)
0685         WRITE_ONCE(n->next->pprev, &n->next);
0686 }
0687 
0688 #define __hlist_for_each_rcu(pos, head)             \
0689     for (pos = rcu_dereference(hlist_first_rcu(head));  \
0690          pos;                       \
0691          pos = rcu_dereference(hlist_next_rcu(pos)))
0692 
0693 /**
0694  * hlist_for_each_entry_rcu - iterate over rcu list of given type
0695  * @pos:    the type * to use as a loop cursor.
0696  * @head:   the head for your list.
0697  * @member: the name of the hlist_node within the struct.
0698  * @cond:   optional lockdep expression if called from non-RCU protection.
0699  *
0700  * This list-traversal primitive may safely run concurrently with
0701  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
0702  * as long as the traversal is guarded by rcu_read_lock().
0703  */
0704 #define hlist_for_each_entry_rcu(pos, head, member, cond...)        \
0705     for (__list_check_rcu(dummy, ## cond, 0),           \
0706          pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
0707             typeof(*(pos)), member);            \
0708         pos;                            \
0709         pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
0710             &(pos)->member)), typeof(*(pos)), member))
0711 
0712 /**
0713  * hlist_for_each_entry_srcu - iterate over rcu list of given type
0714  * @pos:    the type * to use as a loop cursor.
0715  * @head:   the head for your list.
0716  * @member: the name of the hlist_node within the struct.
0717  * @cond:   lockdep expression for the lock required to traverse the list.
0718  *
0719  * This list-traversal primitive may safely run concurrently with
0720  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
0721  * as long as the traversal is guarded by srcu_read_lock().
0722  * The lockdep expression srcu_read_lock_held() can be passed as the
0723  * cond argument from read side.
0724  */
0725 #define hlist_for_each_entry_srcu(pos, head, member, cond)      \
0726     for (__list_check_srcu(cond),                   \
0727          pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
0728             typeof(*(pos)), member);            \
0729         pos;                            \
0730         pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
0731             &(pos)->member)), typeof(*(pos)), member))
0732 
0733 /**
0734  * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
0735  * @pos:    the type * to use as a loop cursor.
0736  * @head:   the head for your list.
0737  * @member: the name of the hlist_node within the struct.
0738  *
0739  * This list-traversal primitive may safely run concurrently with
0740  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
0741  * as long as the traversal is guarded by rcu_read_lock().
0742  *
0743  * This is the same as hlist_for_each_entry_rcu() except that it does
0744  * not do any RCU debugging or tracing.
0745  */
0746 #define hlist_for_each_entry_rcu_notrace(pos, head, member)         \
0747     for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
0748             typeof(*(pos)), member);            \
0749         pos;                            \
0750         pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
0751             &(pos)->member)), typeof(*(pos)), member))
0752 
0753 /**
0754  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
0755  * @pos:    the type * to use as a loop cursor.
0756  * @head:   the head for your list.
0757  * @member: the name of the hlist_node within the struct.
0758  *
0759  * This list-traversal primitive may safely run concurrently with
0760  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
0761  * as long as the traversal is guarded by rcu_read_lock().
0762  */
0763 #define hlist_for_each_entry_rcu_bh(pos, head, member)          \
0764     for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
0765             typeof(*(pos)), member);            \
0766         pos;                            \
0767         pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
0768             &(pos)->member)), typeof(*(pos)), member))
0769 
0770 /**
0771  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
0772  * @pos:    the type * to use as a loop cursor.
0773  * @member: the name of the hlist_node within the struct.
0774  */
0775 #define hlist_for_each_entry_continue_rcu(pos, member)          \
0776     for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
0777             &(pos)->member)), typeof(*(pos)), member);  \
0778          pos;                           \
0779          pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
0780             &(pos)->member)), typeof(*(pos)), member))
0781 
0782 /**
0783  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
0784  * @pos:    the type * to use as a loop cursor.
0785  * @member: the name of the hlist_node within the struct.
0786  */
0787 #define hlist_for_each_entry_continue_rcu_bh(pos, member)       \
0788     for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
0789             &(pos)->member)), typeof(*(pos)), member);  \
0790          pos;                           \
0791          pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
0792             &(pos)->member)), typeof(*(pos)), member))
0793 
0794 /**
0795  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
0796  * @pos:    the type * to use as a loop cursor.
0797  * @member: the name of the hlist_node within the struct.
0798  */
0799 #define hlist_for_each_entry_from_rcu(pos, member)          \
0800     for (; pos;                         \
0801          pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
0802             &(pos)->member)), typeof(*(pos)), member))
0803 
0804 #endif  /* __KERNEL__ */
0805 #endif