Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
0002 #ifndef FREELIST_H
0003 #define FREELIST_H
0004 
0005 #include <linux/atomic.h>
0006 
0007 /*
0008  * Copyright: cameron@moodycamel.com
0009  *
0010  * A simple CAS-based lock-free free list. Not the fastest thing in the world
0011  * under heavy contention, but simple and correct (assuming nodes are never
0012  * freed until after the free list is destroyed), and fairly speedy under low
0013  * contention.
0014  *
0015  * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
0016  */
0017 
0018 struct freelist_node {
0019     atomic_t        refs;
0020     struct freelist_node    *next;
0021 };
0022 
0023 struct freelist_head {
0024     struct freelist_node    *head;
0025 };
0026 
0027 #define REFS_ON_FREELIST 0x80000000
0028 #define REFS_MASK    0x7FFFFFFF
0029 
0030 static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
0031 {
0032     /*
0033      * Since the refcount is zero, and nobody can increase it once it's
0034      * zero (except us, and we run only one copy of this method per node at
0035      * a time, i.e. the single thread case), then we know we can safely
0036      * change the next pointer of the node; however, once the refcount is
0037      * back above zero, then other threads could increase it (happens under
0038      * heavy contention, when the refcount goes to zero in between a load
0039      * and a refcount increment of a node in try_get, then back up to
0040      * something non-zero, then the refcount increment is done by the other
0041      * thread) -- so if the CAS to add the node to the actual list fails,
0042      * decrese the refcount and leave the add operation to the next thread
0043      * who puts the refcount back to zero (which could be us, hence the
0044      * loop).
0045      */
0046     struct freelist_node *head = READ_ONCE(list->head);
0047 
0048     for (;;) {
0049         WRITE_ONCE(node->next, head);
0050         atomic_set_release(&node->refs, 1);
0051 
0052         if (!try_cmpxchg_release(&list->head, &head, node)) {
0053             /*
0054              * Hmm, the add failed, but we can only try again when
0055              * the refcount goes back to zero.
0056              */
0057             if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
0058                 continue;
0059         }
0060         return;
0061     }
0062 }
0063 
0064 static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
0065 {
0066     /*
0067      * We know that the should-be-on-freelist bit is 0 at this point, so
0068      * it's safe to set it using a fetch_add.
0069      */
0070     if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
0071         /*
0072          * Oh look! We were the last ones referencing this node, and we
0073          * know we want to add it to the free list, so let's do it!
0074          */
0075         __freelist_add(node, list);
0076     }
0077 }
0078 
0079 static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
0080 {
0081     struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
0082     unsigned int refs;
0083 
0084     while (head) {
0085         prev = head;
0086         refs = atomic_read(&head->refs);
0087         if ((refs & REFS_MASK) == 0 ||
0088             !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
0089             head = smp_load_acquire(&list->head);
0090             continue;
0091         }
0092 
0093         /*
0094          * Good, reference count has been incremented (it wasn't at
0095          * zero), which means we can read the next and not worry about
0096          * it changing between now and the time we do the CAS.
0097          */
0098         next = READ_ONCE(head->next);
0099         if (try_cmpxchg_acquire(&list->head, &head, next)) {
0100             /*
0101              * Yay, got the node. This means it was on the list,
0102              * which means should-be-on-freelist must be false no
0103              * matter the refcount (because nobody else knows it's
0104              * been taken off yet, it can't have been put back on).
0105              */
0106             WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
0107 
0108             /*
0109              * Decrease refcount twice, once for our ref, and once
0110              * for the list's ref.
0111              */
0112             atomic_fetch_add(-2, &head->refs);
0113 
0114             return head;
0115         }
0116 
0117         /*
0118          * OK, the head must have changed on us, but we still need to decrement
0119          * the refcount we increased.
0120          */
0121         refs = atomic_fetch_add(-1, &prev->refs);
0122         if (refs == REFS_ON_FREELIST + 1)
0123             __freelist_add(prev, list);
0124     }
0125 
0126     return NULL;
0127 }
0128 
0129 #endif /* FREELIST_H */