Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_RCULIST_NULLS_H
0003 #define _LINUX_RCULIST_NULLS_H
0004 
0005 #ifdef __KERNEL__
0006 
0007 /*
0008  * RCU-protected list version
0009  */
0010 #include <linux/list_nulls.h>
0011 #include <linux/rcupdate.h>
0012 
0013 /**
0014  * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
0015  * @n: the element to delete from the hash list.
0016  *
0017  * Note: hlist_nulls_unhashed() on the node return true after this. It is
0018  * useful for RCU based read lockfree traversal if the writer side
0019  * must know if the list entry is still hashed or already unhashed.
0020  *
0021  * In particular, it means that we can not poison the forward pointers
0022  * that may still be used for walking the hash list and we can only
0023  * zero the pprev pointer so list_unhashed() will return true after
0024  * this.
0025  *
0026  * The caller must take whatever precautions are necessary (such as
0027  * holding appropriate locks) to avoid racing with another
0028  * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
0029  * hlist_nulls_del_rcu(), running on this same list.  However, it is
0030  * perfectly legal to run concurrently with the _rcu list-traversal
0031  * primitives, such as hlist_nulls_for_each_entry_rcu().
0032  */
0033 static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
0034 {
0035     if (!hlist_nulls_unhashed(n)) {
0036         __hlist_nulls_del(n);
0037         WRITE_ONCE(n->pprev, NULL);
0038     }
0039 }
0040 
0041 /**
0042  * hlist_nulls_first_rcu - returns the first element of the hash list.
0043  * @head: the head of the list.
0044  */
0045 #define hlist_nulls_first_rcu(head) \
0046     (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
0047 
0048 /**
0049  * hlist_nulls_next_rcu - returns the element of the list after @node.
0050  * @node: element of the list.
0051  */
0052 #define hlist_nulls_next_rcu(node) \
0053     (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
0054 
0055 /**
0056  * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
0057  * @n: the element to delete from the hash list.
0058  *
0059  * Note: hlist_nulls_unhashed() on entry does not return true after this,
0060  * the entry is in an undefined state. It is useful for RCU based
0061  * lockfree traversal.
0062  *
0063  * In particular, it means that we can not poison the forward
0064  * pointers that may still be used for walking the hash list.
0065  *
0066  * The caller must take whatever precautions are necessary
0067  * (such as holding appropriate locks) to avoid racing
0068  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
0069  * or hlist_nulls_del_rcu(), running on this same list.
0070  * However, it is perfectly legal to run concurrently with
0071  * the _rcu list-traversal primitives, such as
0072  * hlist_nulls_for_each_entry().
0073  */
0074 static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
0075 {
0076     __hlist_nulls_del(n);
0077     WRITE_ONCE(n->pprev, LIST_POISON2);
0078 }
0079 
0080 /**
0081  * hlist_nulls_add_head_rcu
0082  * @n: the element to add to the hash list.
0083  * @h: the list to add to.
0084  *
0085  * Description:
0086  * Adds the specified element to the specified hlist_nulls,
0087  * while permitting racing traversals.
0088  *
0089  * The caller must take whatever precautions are necessary
0090  * (such as holding appropriate locks) to avoid racing
0091  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
0092  * or hlist_nulls_del_rcu(), running on this same list.
0093  * However, it is perfectly legal to run concurrently with
0094  * the _rcu list-traversal primitives, such as
0095  * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
0096  * problems on Alpha CPUs.  Regardless of the type of CPU, the
0097  * list-traversal primitive must be guarded by rcu_read_lock().
0098  */
0099 static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
0100                     struct hlist_nulls_head *h)
0101 {
0102     struct hlist_nulls_node *first = h->first;
0103 
0104     n->next = first;
0105     WRITE_ONCE(n->pprev, &h->first);
0106     rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
0107     if (!is_a_nulls(first))
0108         WRITE_ONCE(first->pprev, &n->next);
0109 }
0110 
0111 /**
0112  * hlist_nulls_add_tail_rcu
0113  * @n: the element to add to the hash list.
0114  * @h: the list to add to.
0115  *
0116  * Description:
0117  * Adds the specified element to the specified hlist_nulls,
0118  * while permitting racing traversals.
0119  *
0120  * The caller must take whatever precautions are necessary
0121  * (such as holding appropriate locks) to avoid racing
0122  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
0123  * or hlist_nulls_del_rcu(), running on this same list.
0124  * However, it is perfectly legal to run concurrently with
0125  * the _rcu list-traversal primitives, such as
0126  * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
0127  * problems on Alpha CPUs.  Regardless of the type of CPU, the
0128  * list-traversal primitive must be guarded by rcu_read_lock().
0129  */
0130 static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
0131                         struct hlist_nulls_head *h)
0132 {
0133     struct hlist_nulls_node *i, *last = NULL;
0134 
0135     /* Note: write side code, so rcu accessors are not needed. */
0136     for (i = h->first; !is_a_nulls(i); i = i->next)
0137         last = i;
0138 
0139     if (last) {
0140         n->next = last->next;
0141         n->pprev = &last->next;
0142         rcu_assign_pointer(hlist_next_rcu(last), n);
0143     } else {
0144         hlist_nulls_add_head_rcu(n, h);
0145     }
0146 }
0147 
0148 /* after that hlist_nulls_del will work */
0149 static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
0150 {
0151     n->pprev = &n->next;
0152     n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
0153 }
0154 
0155 /**
0156  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
0157  * @tpos:   the type * to use as a loop cursor.
0158  * @pos:    the &struct hlist_nulls_node to use as a loop cursor.
0159  * @head:   the head of the list.
0160  * @member: the name of the hlist_nulls_node within the struct.
0161  *
0162  * The barrier() is needed to make sure compiler doesn't cache first element [1],
0163  * as this loop can be restarted [2]
0164  * [1] Documentation/memory-barriers.txt around line 1533
0165  * [2] Documentation/RCU/rculist_nulls.rst around line 146
0166  */
0167 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)         \
0168     for (({barrier();}),                            \
0169          pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));        \
0170         (!is_a_nulls(pos)) &&                       \
0171         ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
0172         pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
0173 
0174 /**
0175  * hlist_nulls_for_each_entry_safe -
0176  *   iterate over list of given type safe against removal of list entry
0177  * @tpos:   the type * to use as a loop cursor.
0178  * @pos:    the &struct hlist_nulls_node to use as a loop cursor.
0179  * @head:   the head of the list.
0180  * @member: the name of the hlist_nulls_node within the struct.
0181  */
0182 #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member)        \
0183     for (({barrier();}),                            \
0184          pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));        \
0185         (!is_a_nulls(pos)) &&                       \
0186         ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member);    \
0187            pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
0188 #endif
0189 #endif