Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * XArray implementation
0004  * Copyright (c) 2017-2018 Microsoft Corporation
0005  * Copyright (c) 2018-2020 Oracle
0006  * Author: Matthew Wilcox <willy@infradead.org>
0007  */
0008 
0009 #include <linux/bitmap.h>
0010 #include <linux/export.h>
0011 #include <linux/list.h>
0012 #include <linux/slab.h>
0013 #include <linux/xarray.h>
0014 
0015 /*
0016  * Coding conventions in this file:
0017  *
0018  * @xa is used to refer to the entire xarray.
0019  * @xas is the 'xarray operation state'.  It may be either a pointer to
0020  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
0021  * ambiguity.
0022  * @index is the index of the entry being operated on
0023  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
0024  * @node refers to an xa_node; usually the primary one being operated on by
0025  * this function.
0026  * @offset is the index into the slots array inside an xa_node.
0027  * @parent refers to the @xa_node closer to the head than @node.
0028  * @entry refers to something stored in a slot in the xarray
0029  */
0030 
0031 static inline unsigned int xa_lock_type(const struct xarray *xa)
0032 {
0033     return (__force unsigned int)xa->xa_flags & 3;
0034 }
0035 
0036 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
0037 {
0038     if (lock_type == XA_LOCK_IRQ)
0039         xas_lock_irq(xas);
0040     else if (lock_type == XA_LOCK_BH)
0041         xas_lock_bh(xas);
0042     else
0043         xas_lock(xas);
0044 }
0045 
0046 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
0047 {
0048     if (lock_type == XA_LOCK_IRQ)
0049         xas_unlock_irq(xas);
0050     else if (lock_type == XA_LOCK_BH)
0051         xas_unlock_bh(xas);
0052     else
0053         xas_unlock(xas);
0054 }
0055 
0056 static inline bool xa_track_free(const struct xarray *xa)
0057 {
0058     return xa->xa_flags & XA_FLAGS_TRACK_FREE;
0059 }
0060 
0061 static inline bool xa_zero_busy(const struct xarray *xa)
0062 {
0063     return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
0064 }
0065 
0066 static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
0067 {
0068     if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
0069         xa->xa_flags |= XA_FLAGS_MARK(mark);
0070 }
0071 
0072 static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
0073 {
0074     if (xa->xa_flags & XA_FLAGS_MARK(mark))
0075         xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
0076 }
0077 
0078 static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
0079 {
0080     return node->marks[(__force unsigned)mark];
0081 }
0082 
0083 static inline bool node_get_mark(struct xa_node *node,
0084         unsigned int offset, xa_mark_t mark)
0085 {
0086     return test_bit(offset, node_marks(node, mark));
0087 }
0088 
0089 /* returns true if the bit was set */
0090 static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
0091                 xa_mark_t mark)
0092 {
0093     return __test_and_set_bit(offset, node_marks(node, mark));
0094 }
0095 
0096 /* returns true if the bit was set */
0097 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
0098                 xa_mark_t mark)
0099 {
0100     return __test_and_clear_bit(offset, node_marks(node, mark));
0101 }
0102 
0103 static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
0104 {
0105     return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
0106 }
0107 
0108 static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
0109 {
0110     bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
0111 }
0112 
0113 #define mark_inc(mark) do { \
0114     mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
0115 } while (0)
0116 
0117 /*
0118  * xas_squash_marks() - Merge all marks to the first entry
0119  * @xas: Array operation state.
0120  *
0121  * Set a mark on the first entry if any entry has it set.  Clear marks on
0122  * all sibling entries.
0123  */
0124 static void xas_squash_marks(const struct xa_state *xas)
0125 {
0126     unsigned int mark = 0;
0127     unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
0128 
0129     if (!xas->xa_sibs)
0130         return;
0131 
0132     do {
0133         unsigned long *marks = xas->xa_node->marks[mark];
0134         if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
0135             continue;
0136         __set_bit(xas->xa_offset, marks);
0137         bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
0138     } while (mark++ != (__force unsigned)XA_MARK_MAX);
0139 }
0140 
0141 /* extracts the offset within this node from the index */
0142 static unsigned int get_offset(unsigned long index, struct xa_node *node)
0143 {
0144     return (index >> node->shift) & XA_CHUNK_MASK;
0145 }
0146 
0147 static void xas_set_offset(struct xa_state *xas)
0148 {
0149     xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
0150 }
0151 
0152 /* move the index either forwards (find) or backwards (sibling slot) */
0153 static void xas_move_index(struct xa_state *xas, unsigned long offset)
0154 {
0155     unsigned int shift = xas->xa_node->shift;
0156     xas->xa_index &= ~XA_CHUNK_MASK << shift;
0157     xas->xa_index += offset << shift;
0158 }
0159 
0160 static void xas_next_offset(struct xa_state *xas)
0161 {
0162     xas->xa_offset++;
0163     xas_move_index(xas, xas->xa_offset);
0164 }
0165 
0166 static void *set_bounds(struct xa_state *xas)
0167 {
0168     xas->xa_node = XAS_BOUNDS;
0169     return NULL;
0170 }
0171 
0172 /*
0173  * Starts a walk.  If the @xas is already valid, we assume that it's on
0174  * the right path and just return where we've got to.  If we're in an
0175  * error state, return NULL.  If the index is outside the current scope
0176  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
0177  * set @xas->xa_node to NULL and return the current head of the array.
0178  */
0179 static void *xas_start(struct xa_state *xas)
0180 {
0181     void *entry;
0182 
0183     if (xas_valid(xas))
0184         return xas_reload(xas);
0185     if (xas_error(xas))
0186         return NULL;
0187 
0188     entry = xa_head(xas->xa);
0189     if (!xa_is_node(entry)) {
0190         if (xas->xa_index)
0191             return set_bounds(xas);
0192     } else {
0193         if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
0194             return set_bounds(xas);
0195     }
0196 
0197     xas->xa_node = NULL;
0198     return entry;
0199 }
0200 
0201 static void *xas_descend(struct xa_state *xas, struct xa_node *node)
0202 {
0203     unsigned int offset = get_offset(xas->xa_index, node);
0204     void *entry = xa_entry(xas->xa, node, offset);
0205 
0206     xas->xa_node = node;
0207     if (xa_is_sibling(entry)) {
0208         offset = xa_to_sibling(entry);
0209         entry = xa_entry(xas->xa, node, offset);
0210         if (node->shift && xa_is_node(entry))
0211             entry = XA_RETRY_ENTRY;
0212     }
0213 
0214     xas->xa_offset = offset;
0215     return entry;
0216 }
0217 
0218 /**
0219  * xas_load() - Load an entry from the XArray (advanced).
0220  * @xas: XArray operation state.
0221  *
0222  * Usually walks the @xas to the appropriate state to load the entry
0223  * stored at xa_index.  However, it will do nothing and return %NULL if
0224  * @xas is in an error state.  xas_load() will never expand the tree.
0225  *
0226  * If the xa_state is set up to operate on a multi-index entry, xas_load()
0227  * may return %NULL or an internal entry, even if there are entries
0228  * present within the range specified by @xas.
0229  *
0230  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
0231  * Return: Usually an entry in the XArray, but see description for exceptions.
0232  */
0233 void *xas_load(struct xa_state *xas)
0234 {
0235     void *entry = xas_start(xas);
0236 
0237     while (xa_is_node(entry)) {
0238         struct xa_node *node = xa_to_node(entry);
0239 
0240         if (xas->xa_shift > node->shift)
0241             break;
0242         entry = xas_descend(xas, node);
0243         if (node->shift == 0)
0244             break;
0245     }
0246     return entry;
0247 }
0248 EXPORT_SYMBOL_GPL(xas_load);
0249 
0250 /* Move the radix tree node cache here */
0251 extern struct kmem_cache *radix_tree_node_cachep;
0252 extern void radix_tree_node_rcu_free(struct rcu_head *head);
0253 
0254 #define XA_RCU_FREE ((struct xarray *)1)
0255 
0256 static void xa_node_free(struct xa_node *node)
0257 {
0258     XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
0259     node->array = XA_RCU_FREE;
0260     call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
0261 }
0262 
0263 /*
0264  * xas_destroy() - Free any resources allocated during the XArray operation.
0265  * @xas: XArray operation state.
0266  *
0267  * Most users will not need to call this function; it is called for you
0268  * by xas_nomem().
0269  */
0270 void xas_destroy(struct xa_state *xas)
0271 {
0272     struct xa_node *next, *node = xas->xa_alloc;
0273 
0274     while (node) {
0275         XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
0276         next = rcu_dereference_raw(node->parent);
0277         radix_tree_node_rcu_free(&node->rcu_head);
0278         xas->xa_alloc = node = next;
0279     }
0280 }
0281 
0282 /**
0283  * xas_nomem() - Allocate memory if needed.
0284  * @xas: XArray operation state.
0285  * @gfp: Memory allocation flags.
0286  *
0287  * If we need to add new nodes to the XArray, we try to allocate memory
0288  * with GFP_NOWAIT while holding the lock, which will usually succeed.
0289  * If it fails, @xas is flagged as needing memory to continue.  The caller
0290  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
0291  * the caller should retry the operation.
0292  *
0293  * Forward progress is guaranteed as one node is allocated here and
0294  * stored in the xa_state where it will be found by xas_alloc().  More
0295  * nodes will likely be found in the slab allocator, but we do not tie
0296  * them up here.
0297  *
0298  * Return: true if memory was needed, and was successfully allocated.
0299  */
0300 bool xas_nomem(struct xa_state *xas, gfp_t gfp)
0301 {
0302     if (xas->xa_node != XA_ERROR(-ENOMEM)) {
0303         xas_destroy(xas);
0304         return false;
0305     }
0306     if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
0307         gfp |= __GFP_ACCOUNT;
0308     xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
0309     if (!xas->xa_alloc)
0310         return false;
0311     xas->xa_alloc->parent = NULL;
0312     XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
0313     xas->xa_node = XAS_RESTART;
0314     return true;
0315 }
0316 EXPORT_SYMBOL_GPL(xas_nomem);
0317 
0318 /*
0319  * __xas_nomem() - Drop locks and allocate memory if needed.
0320  * @xas: XArray operation state.
0321  * @gfp: Memory allocation flags.
0322  *
0323  * Internal variant of xas_nomem().
0324  *
0325  * Return: true if memory was needed, and was successfully allocated.
0326  */
0327 static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
0328     __must_hold(xas->xa->xa_lock)
0329 {
0330     unsigned int lock_type = xa_lock_type(xas->xa);
0331 
0332     if (xas->xa_node != XA_ERROR(-ENOMEM)) {
0333         xas_destroy(xas);
0334         return false;
0335     }
0336     if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
0337         gfp |= __GFP_ACCOUNT;
0338     if (gfpflags_allow_blocking(gfp)) {
0339         xas_unlock_type(xas, lock_type);
0340         xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
0341         xas_lock_type(xas, lock_type);
0342     } else {
0343         xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
0344     }
0345     if (!xas->xa_alloc)
0346         return false;
0347     xas->xa_alloc->parent = NULL;
0348     XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
0349     xas->xa_node = XAS_RESTART;
0350     return true;
0351 }
0352 
0353 static void xas_update(struct xa_state *xas, struct xa_node *node)
0354 {
0355     if (xas->xa_update)
0356         xas->xa_update(node);
0357     else
0358         XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
0359 }
0360 
0361 static void *xas_alloc(struct xa_state *xas, unsigned int shift)
0362 {
0363     struct xa_node *parent = xas->xa_node;
0364     struct xa_node *node = xas->xa_alloc;
0365 
0366     if (xas_invalid(xas))
0367         return NULL;
0368 
0369     if (node) {
0370         xas->xa_alloc = NULL;
0371     } else {
0372         gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
0373 
0374         if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
0375             gfp |= __GFP_ACCOUNT;
0376 
0377         node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
0378         if (!node) {
0379             xas_set_err(xas, -ENOMEM);
0380             return NULL;
0381         }
0382     }
0383 
0384     if (parent) {
0385         node->offset = xas->xa_offset;
0386         parent->count++;
0387         XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
0388         xas_update(xas, parent);
0389     }
0390     XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
0391     XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
0392     node->shift = shift;
0393     node->count = 0;
0394     node->nr_values = 0;
0395     RCU_INIT_POINTER(node->parent, xas->xa_node);
0396     node->array = xas->xa;
0397 
0398     return node;
0399 }
0400 
0401 #ifdef CONFIG_XARRAY_MULTI
0402 /* Returns the number of indices covered by a given xa_state */
0403 static unsigned long xas_size(const struct xa_state *xas)
0404 {
0405     return (xas->xa_sibs + 1UL) << xas->xa_shift;
0406 }
0407 #endif
0408 
0409 /*
0410  * Use this to calculate the maximum index that will need to be created
0411  * in order to add the entry described by @xas.  Because we cannot store a
0412  * multi-index entry at index 0, the calculation is a little more complex
0413  * than you might expect.
0414  */
0415 static unsigned long xas_max(struct xa_state *xas)
0416 {
0417     unsigned long max = xas->xa_index;
0418 
0419 #ifdef CONFIG_XARRAY_MULTI
0420     if (xas->xa_shift || xas->xa_sibs) {
0421         unsigned long mask = xas_size(xas) - 1;
0422         max |= mask;
0423         if (mask == max)
0424             max++;
0425     }
0426 #endif
0427 
0428     return max;
0429 }
0430 
0431 /* The maximum index that can be contained in the array without expanding it */
0432 static unsigned long max_index(void *entry)
0433 {
0434     if (!xa_is_node(entry))
0435         return 0;
0436     return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
0437 }
0438 
0439 static void xas_shrink(struct xa_state *xas)
0440 {
0441     struct xarray *xa = xas->xa;
0442     struct xa_node *node = xas->xa_node;
0443 
0444     for (;;) {
0445         void *entry;
0446 
0447         XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
0448         if (node->count != 1)
0449             break;
0450         entry = xa_entry_locked(xa, node, 0);
0451         if (!entry)
0452             break;
0453         if (!xa_is_node(entry) && node->shift)
0454             break;
0455         if (xa_is_zero(entry) && xa_zero_busy(xa))
0456             entry = NULL;
0457         xas->xa_node = XAS_BOUNDS;
0458 
0459         RCU_INIT_POINTER(xa->xa_head, entry);
0460         if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
0461             xa_mark_clear(xa, XA_FREE_MARK);
0462 
0463         node->count = 0;
0464         node->nr_values = 0;
0465         if (!xa_is_node(entry))
0466             RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
0467         xas_update(xas, node);
0468         xa_node_free(node);
0469         if (!xa_is_node(entry))
0470             break;
0471         node = xa_to_node(entry);
0472         node->parent = NULL;
0473     }
0474 }
0475 
0476 /*
0477  * xas_delete_node() - Attempt to delete an xa_node
0478  * @xas: Array operation state.
0479  *
0480  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
0481  * a non-zero reference count.
0482  */
0483 static void xas_delete_node(struct xa_state *xas)
0484 {
0485     struct xa_node *node = xas->xa_node;
0486 
0487     for (;;) {
0488         struct xa_node *parent;
0489 
0490         XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
0491         if (node->count)
0492             break;
0493 
0494         parent = xa_parent_locked(xas->xa, node);
0495         xas->xa_node = parent;
0496         xas->xa_offset = node->offset;
0497         xa_node_free(node);
0498 
0499         if (!parent) {
0500             xas->xa->xa_head = NULL;
0501             xas->xa_node = XAS_BOUNDS;
0502             return;
0503         }
0504 
0505         parent->slots[xas->xa_offset] = NULL;
0506         parent->count--;
0507         XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
0508         node = parent;
0509         xas_update(xas, node);
0510     }
0511 
0512     if (!node->parent)
0513         xas_shrink(xas);
0514 }
0515 
0516 /**
0517  * xas_free_nodes() - Free this node and all nodes that it references
0518  * @xas: Array operation state.
0519  * @top: Node to free
0520  *
0521  * This node has been removed from the tree.  We must now free it and all
0522  * of its subnodes.  There may be RCU walkers with references into the tree,
0523  * so we must replace all entries with retry markers.
0524  */
0525 static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
0526 {
0527     unsigned int offset = 0;
0528     struct xa_node *node = top;
0529 
0530     for (;;) {
0531         void *entry = xa_entry_locked(xas->xa, node, offset);
0532 
0533         if (node->shift && xa_is_node(entry)) {
0534             node = xa_to_node(entry);
0535             offset = 0;
0536             continue;
0537         }
0538         if (entry)
0539             RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
0540         offset++;
0541         while (offset == XA_CHUNK_SIZE) {
0542             struct xa_node *parent;
0543 
0544             parent = xa_parent_locked(xas->xa, node);
0545             offset = node->offset + 1;
0546             node->count = 0;
0547             node->nr_values = 0;
0548             xas_update(xas, node);
0549             xa_node_free(node);
0550             if (node == top)
0551                 return;
0552             node = parent;
0553         }
0554     }
0555 }
0556 
0557 /*
0558  * xas_expand adds nodes to the head of the tree until it has reached
0559  * sufficient height to be able to contain @xas->xa_index
0560  */
0561 static int xas_expand(struct xa_state *xas, void *head)
0562 {
0563     struct xarray *xa = xas->xa;
0564     struct xa_node *node = NULL;
0565     unsigned int shift = 0;
0566     unsigned long max = xas_max(xas);
0567 
0568     if (!head) {
0569         if (max == 0)
0570             return 0;
0571         while ((max >> shift) >= XA_CHUNK_SIZE)
0572             shift += XA_CHUNK_SHIFT;
0573         return shift + XA_CHUNK_SHIFT;
0574     } else if (xa_is_node(head)) {
0575         node = xa_to_node(head);
0576         shift = node->shift + XA_CHUNK_SHIFT;
0577     }
0578     xas->xa_node = NULL;
0579 
0580     while (max > max_index(head)) {
0581         xa_mark_t mark = 0;
0582 
0583         XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
0584         node = xas_alloc(xas, shift);
0585         if (!node)
0586             return -ENOMEM;
0587 
0588         node->count = 1;
0589         if (xa_is_value(head))
0590             node->nr_values = 1;
0591         RCU_INIT_POINTER(node->slots[0], head);
0592 
0593         /* Propagate the aggregated mark info to the new child */
0594         for (;;) {
0595             if (xa_track_free(xa) && mark == XA_FREE_MARK) {
0596                 node_mark_all(node, XA_FREE_MARK);
0597                 if (!xa_marked(xa, XA_FREE_MARK)) {
0598                     node_clear_mark(node, 0, XA_FREE_MARK);
0599                     xa_mark_set(xa, XA_FREE_MARK);
0600                 }
0601             } else if (xa_marked(xa, mark)) {
0602                 node_set_mark(node, 0, mark);
0603             }
0604             if (mark == XA_MARK_MAX)
0605                 break;
0606             mark_inc(mark);
0607         }
0608 
0609         /*
0610          * Now that the new node is fully initialised, we can add
0611          * it to the tree
0612          */
0613         if (xa_is_node(head)) {
0614             xa_to_node(head)->offset = 0;
0615             rcu_assign_pointer(xa_to_node(head)->parent, node);
0616         }
0617         head = xa_mk_node(node);
0618         rcu_assign_pointer(xa->xa_head, head);
0619         xas_update(xas, node);
0620 
0621         shift += XA_CHUNK_SHIFT;
0622     }
0623 
0624     xas->xa_node = node;
0625     return shift;
0626 }
0627 
0628 /*
0629  * xas_create() - Create a slot to store an entry in.
0630  * @xas: XArray operation state.
0631  * @allow_root: %true if we can store the entry in the root directly
0632  *
0633  * Most users will not need to call this function directly, as it is called
0634  * by xas_store().  It is useful for doing conditional store operations
0635  * (see the xa_cmpxchg() implementation for an example).
0636  *
0637  * Return: If the slot already existed, returns the contents of this slot.
0638  * If the slot was newly created, returns %NULL.  If it failed to create the
0639  * slot, returns %NULL and indicates the error in @xas.
0640  */
0641 static void *xas_create(struct xa_state *xas, bool allow_root)
0642 {
0643     struct xarray *xa = xas->xa;
0644     void *entry;
0645     void __rcu **slot;
0646     struct xa_node *node = xas->xa_node;
0647     int shift;
0648     unsigned int order = xas->xa_shift;
0649 
0650     if (xas_top(node)) {
0651         entry = xa_head_locked(xa);
0652         xas->xa_node = NULL;
0653         if (!entry && xa_zero_busy(xa))
0654             entry = XA_ZERO_ENTRY;
0655         shift = xas_expand(xas, entry);
0656         if (shift < 0)
0657             return NULL;
0658         if (!shift && !allow_root)
0659             shift = XA_CHUNK_SHIFT;
0660         entry = xa_head_locked(xa);
0661         slot = &xa->xa_head;
0662     } else if (xas_error(xas)) {
0663         return NULL;
0664     } else if (node) {
0665         unsigned int offset = xas->xa_offset;
0666 
0667         shift = node->shift;
0668         entry = xa_entry_locked(xa, node, offset);
0669         slot = &node->slots[offset];
0670     } else {
0671         shift = 0;
0672         entry = xa_head_locked(xa);
0673         slot = &xa->xa_head;
0674     }
0675 
0676     while (shift > order) {
0677         shift -= XA_CHUNK_SHIFT;
0678         if (!entry) {
0679             node = xas_alloc(xas, shift);
0680             if (!node)
0681                 break;
0682             if (xa_track_free(xa))
0683                 node_mark_all(node, XA_FREE_MARK);
0684             rcu_assign_pointer(*slot, xa_mk_node(node));
0685         } else if (xa_is_node(entry)) {
0686             node = xa_to_node(entry);
0687         } else {
0688             break;
0689         }
0690         entry = xas_descend(xas, node);
0691         slot = &node->slots[xas->xa_offset];
0692     }
0693 
0694     return entry;
0695 }
0696 
0697 /**
0698  * xas_create_range() - Ensure that stores to this range will succeed
0699  * @xas: XArray operation state.
0700  *
0701  * Creates all of the slots in the range covered by @xas.  Sets @xas to
0702  * create single-index entries and positions it at the beginning of the
0703  * range.  This is for the benefit of users which have not yet been
0704  * converted to use multi-index entries.
0705  */
0706 void xas_create_range(struct xa_state *xas)
0707 {
0708     unsigned long index = xas->xa_index;
0709     unsigned char shift = xas->xa_shift;
0710     unsigned char sibs = xas->xa_sibs;
0711 
0712     xas->xa_index |= ((sibs + 1UL) << shift) - 1;
0713     if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
0714         xas->xa_offset |= sibs;
0715     xas->xa_shift = 0;
0716     xas->xa_sibs = 0;
0717 
0718     for (;;) {
0719         xas_create(xas, true);
0720         if (xas_error(xas))
0721             goto restore;
0722         if (xas->xa_index <= (index | XA_CHUNK_MASK))
0723             goto success;
0724         xas->xa_index -= XA_CHUNK_SIZE;
0725 
0726         for (;;) {
0727             struct xa_node *node = xas->xa_node;
0728             if (node->shift >= shift)
0729                 break;
0730             xas->xa_node = xa_parent_locked(xas->xa, node);
0731             xas->xa_offset = node->offset - 1;
0732             if (node->offset != 0)
0733                 break;
0734         }
0735     }
0736 
0737 restore:
0738     xas->xa_shift = shift;
0739     xas->xa_sibs = sibs;
0740     xas->xa_index = index;
0741     return;
0742 success:
0743     xas->xa_index = index;
0744     if (xas->xa_node)
0745         xas_set_offset(xas);
0746 }
0747 EXPORT_SYMBOL_GPL(xas_create_range);
0748 
0749 static void update_node(struct xa_state *xas, struct xa_node *node,
0750         int count, int values)
0751 {
0752     if (!node || (!count && !values))
0753         return;
0754 
0755     node->count += count;
0756     node->nr_values += values;
0757     XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
0758     XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
0759     xas_update(xas, node);
0760     if (count < 0)
0761         xas_delete_node(xas);
0762 }
0763 
0764 /**
0765  * xas_store() - Store this entry in the XArray.
0766  * @xas: XArray operation state.
0767  * @entry: New entry.
0768  *
0769  * If @xas is operating on a multi-index entry, the entry returned by this
0770  * function is essentially meaningless (it may be an internal entry or it
0771  * may be %NULL, even if there are non-NULL entries at some of the indices
0772  * covered by the range).  This is not a problem for any current users,
0773  * and can be changed if needed.
0774  *
0775  * Return: The old entry at this index.
0776  */
0777 void *xas_store(struct xa_state *xas, void *entry)
0778 {
0779     struct xa_node *node;
0780     void __rcu **slot = &xas->xa->xa_head;
0781     unsigned int offset, max;
0782     int count = 0;
0783     int values = 0;
0784     void *first, *next;
0785     bool value = xa_is_value(entry);
0786 
0787     if (entry) {
0788         bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
0789         first = xas_create(xas, allow_root);
0790     } else {
0791         first = xas_load(xas);
0792     }
0793 
0794     if (xas_invalid(xas))
0795         return first;
0796     node = xas->xa_node;
0797     if (node && (xas->xa_shift < node->shift))
0798         xas->xa_sibs = 0;
0799     if ((first == entry) && !xas->xa_sibs)
0800         return first;
0801 
0802     next = first;
0803     offset = xas->xa_offset;
0804     max = xas->xa_offset + xas->xa_sibs;
0805     if (node) {
0806         slot = &node->slots[offset];
0807         if (xas->xa_sibs)
0808             xas_squash_marks(xas);
0809     }
0810     if (!entry)
0811         xas_init_marks(xas);
0812 
0813     for (;;) {
0814         /*
0815          * Must clear the marks before setting the entry to NULL,
0816          * otherwise xas_for_each_marked may find a NULL entry and
0817          * stop early.  rcu_assign_pointer contains a release barrier
0818          * so the mark clearing will appear to happen before the
0819          * entry is set to NULL.
0820          */
0821         rcu_assign_pointer(*slot, entry);
0822         if (xa_is_node(next) && (!node || node->shift))
0823             xas_free_nodes(xas, xa_to_node(next));
0824         if (!node)
0825             break;
0826         count += !next - !entry;
0827         values += !xa_is_value(first) - !value;
0828         if (entry) {
0829             if (offset == max)
0830                 break;
0831             if (!xa_is_sibling(entry))
0832                 entry = xa_mk_sibling(xas->xa_offset);
0833         } else {
0834             if (offset == XA_CHUNK_MASK)
0835                 break;
0836         }
0837         next = xa_entry_locked(xas->xa, node, ++offset);
0838         if (!xa_is_sibling(next)) {
0839             if (!entry && (offset > max))
0840                 break;
0841             first = next;
0842         }
0843         slot++;
0844     }
0845 
0846     update_node(xas, node, count, values);
0847     return first;
0848 }
0849 EXPORT_SYMBOL_GPL(xas_store);
0850 
0851 /**
0852  * xas_get_mark() - Returns the state of this mark.
0853  * @xas: XArray operation state.
0854  * @mark: Mark number.
0855  *
0856  * Return: true if the mark is set, false if the mark is clear or @xas
0857  * is in an error state.
0858  */
0859 bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
0860 {
0861     if (xas_invalid(xas))
0862         return false;
0863     if (!xas->xa_node)
0864         return xa_marked(xas->xa, mark);
0865     return node_get_mark(xas->xa_node, xas->xa_offset, mark);
0866 }
0867 EXPORT_SYMBOL_GPL(xas_get_mark);
0868 
0869 /**
0870  * xas_set_mark() - Sets the mark on this entry and its parents.
0871  * @xas: XArray operation state.
0872  * @mark: Mark number.
0873  *
0874  * Sets the specified mark on this entry, and walks up the tree setting it
0875  * on all the ancestor entries.  Does nothing if @xas has not been walked to
0876  * an entry, or is in an error state.
0877  */
0878 void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
0879 {
0880     struct xa_node *node = xas->xa_node;
0881     unsigned int offset = xas->xa_offset;
0882 
0883     if (xas_invalid(xas))
0884         return;
0885 
0886     while (node) {
0887         if (node_set_mark(node, offset, mark))
0888             return;
0889         offset = node->offset;
0890         node = xa_parent_locked(xas->xa, node);
0891     }
0892 
0893     if (!xa_marked(xas->xa, mark))
0894         xa_mark_set(xas->xa, mark);
0895 }
0896 EXPORT_SYMBOL_GPL(xas_set_mark);
0897 
0898 /**
0899  * xas_clear_mark() - Clears the mark on this entry and its parents.
0900  * @xas: XArray operation state.
0901  * @mark: Mark number.
0902  *
0903  * Clears the specified mark on this entry, and walks back to the head
0904  * attempting to clear it on all the ancestor entries.  Does nothing if
0905  * @xas has not been walked to an entry, or is in an error state.
0906  */
0907 void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
0908 {
0909     struct xa_node *node = xas->xa_node;
0910     unsigned int offset = xas->xa_offset;
0911 
0912     if (xas_invalid(xas))
0913         return;
0914 
0915     while (node) {
0916         if (!node_clear_mark(node, offset, mark))
0917             return;
0918         if (node_any_mark(node, mark))
0919             return;
0920 
0921         offset = node->offset;
0922         node = xa_parent_locked(xas->xa, node);
0923     }
0924 
0925     if (xa_marked(xas->xa, mark))
0926         xa_mark_clear(xas->xa, mark);
0927 }
0928 EXPORT_SYMBOL_GPL(xas_clear_mark);
0929 
0930 /**
0931  * xas_init_marks() - Initialise all marks for the entry
0932  * @xas: Array operations state.
0933  *
0934  * Initialise all marks for the entry specified by @xas.  If we're tracking
0935  * free entries with a mark, we need to set it on all entries.  All other
0936  * marks are cleared.
0937  *
0938  * This implementation is not as efficient as it could be; we may walk
0939  * up the tree multiple times.
0940  */
0941 void xas_init_marks(const struct xa_state *xas)
0942 {
0943     xa_mark_t mark = 0;
0944 
0945     for (;;) {
0946         if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
0947             xas_set_mark(xas, mark);
0948         else
0949             xas_clear_mark(xas, mark);
0950         if (mark == XA_MARK_MAX)
0951             break;
0952         mark_inc(mark);
0953     }
0954 }
0955 EXPORT_SYMBOL_GPL(xas_init_marks);
0956 
0957 #ifdef CONFIG_XARRAY_MULTI
0958 static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
0959 {
0960     unsigned int marks = 0;
0961     xa_mark_t mark = XA_MARK_0;
0962 
0963     for (;;) {
0964         if (node_get_mark(node, offset, mark))
0965             marks |= 1 << (__force unsigned int)mark;
0966         if (mark == XA_MARK_MAX)
0967             break;
0968         mark_inc(mark);
0969     }
0970 
0971     return marks;
0972 }
0973 
0974 static void node_set_marks(struct xa_node *node, unsigned int offset,
0975             struct xa_node *child, unsigned int marks)
0976 {
0977     xa_mark_t mark = XA_MARK_0;
0978 
0979     for (;;) {
0980         if (marks & (1 << (__force unsigned int)mark)) {
0981             node_set_mark(node, offset, mark);
0982             if (child)
0983                 node_mark_all(child, mark);
0984         }
0985         if (mark == XA_MARK_MAX)
0986             break;
0987         mark_inc(mark);
0988     }
0989 }
0990 
0991 /**
0992  * xas_split_alloc() - Allocate memory for splitting an entry.
0993  * @xas: XArray operation state.
0994  * @entry: New entry which will be stored in the array.
0995  * @order: Current entry order.
0996  * @gfp: Memory allocation flags.
0997  *
0998  * This function should be called before calling xas_split().
0999  * If necessary, it will allocate new nodes (and fill them with @entry)
1000  * to prepare for the upcoming split of an entry of @order size into
1001  * entries of the order stored in the @xas.
1002  *
1003  * Context: May sleep if @gfp flags permit.
1004  */
1005 void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1006         gfp_t gfp)
1007 {
1008     unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1009     unsigned int mask = xas->xa_sibs;
1010 
1011     /* XXX: no support for splitting really large entries yet */
1012     if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1013         goto nomem;
1014     if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1015         return;
1016 
1017     do {
1018         unsigned int i;
1019         void *sibling = NULL;
1020         struct xa_node *node;
1021 
1022         node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1023         if (!node)
1024             goto nomem;
1025         node->array = xas->xa;
1026         for (i = 0; i < XA_CHUNK_SIZE; i++) {
1027             if ((i & mask) == 0) {
1028                 RCU_INIT_POINTER(node->slots[i], entry);
1029                 sibling = xa_mk_sibling(i);
1030             } else {
1031                 RCU_INIT_POINTER(node->slots[i], sibling);
1032             }
1033         }
1034         RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1035         xas->xa_alloc = node;
1036     } while (sibs-- > 0);
1037 
1038     return;
1039 nomem:
1040     xas_destroy(xas);
1041     xas_set_err(xas, -ENOMEM);
1042 }
1043 EXPORT_SYMBOL_GPL(xas_split_alloc);
1044 
1045 /**
1046  * xas_split() - Split a multi-index entry into smaller entries.
1047  * @xas: XArray operation state.
1048  * @entry: New entry to store in the array.
1049  * @order: Current entry order.
1050  *
1051  * The size of the new entries is set in @xas.  The value in @entry is
1052  * copied to all the replacement entries.
1053  *
1054  * Context: Any context.  The caller should hold the xa_lock.
1055  */
1056 void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1057 {
1058     unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1059     unsigned int offset, marks;
1060     struct xa_node *node;
1061     void *curr = xas_load(xas);
1062     int values = 0;
1063 
1064     node = xas->xa_node;
1065     if (xas_top(node))
1066         return;
1067 
1068     marks = node_get_marks(node, xas->xa_offset);
1069 
1070     offset = xas->xa_offset + sibs;
1071     do {
1072         if (xas->xa_shift < node->shift) {
1073             struct xa_node *child = xas->xa_alloc;
1074 
1075             xas->xa_alloc = rcu_dereference_raw(child->parent);
1076             child->shift = node->shift - XA_CHUNK_SHIFT;
1077             child->offset = offset;
1078             child->count = XA_CHUNK_SIZE;
1079             child->nr_values = xa_is_value(entry) ?
1080                     XA_CHUNK_SIZE : 0;
1081             RCU_INIT_POINTER(child->parent, node);
1082             node_set_marks(node, offset, child, marks);
1083             rcu_assign_pointer(node->slots[offset],
1084                     xa_mk_node(child));
1085             if (xa_is_value(curr))
1086                 values--;
1087             xas_update(xas, child);
1088         } else {
1089             unsigned int canon = offset - xas->xa_sibs;
1090 
1091             node_set_marks(node, canon, NULL, marks);
1092             rcu_assign_pointer(node->slots[canon], entry);
1093             while (offset > canon)
1094                 rcu_assign_pointer(node->slots[offset--],
1095                         xa_mk_sibling(canon));
1096             values += (xa_is_value(entry) - xa_is_value(curr)) *
1097                     (xas->xa_sibs + 1);
1098         }
1099     } while (offset-- > xas->xa_offset);
1100 
1101     node->nr_values += values;
1102     xas_update(xas, node);
1103 }
1104 EXPORT_SYMBOL_GPL(xas_split);
1105 #endif
1106 
1107 /**
1108  * xas_pause() - Pause a walk to drop a lock.
1109  * @xas: XArray operation state.
1110  *
1111  * Some users need to pause a walk and drop the lock they're holding in
1112  * order to yield to a higher priority thread or carry out an operation
1113  * on an entry.  Those users should call this function before they drop
1114  * the lock.  It resets the @xas to be suitable for the next iteration
1115  * of the loop after the user has reacquired the lock.  If most entries
1116  * found during a walk require you to call xas_pause(), the xa_for_each()
1117  * iterator may be more appropriate.
1118  *
1119  * Note that xas_pause() only works for forward iteration.  If a user needs
1120  * to pause a reverse iteration, we will need a xas_pause_rev().
1121  */
1122 void xas_pause(struct xa_state *xas)
1123 {
1124     struct xa_node *node = xas->xa_node;
1125 
1126     if (xas_invalid(xas))
1127         return;
1128 
1129     xas->xa_node = XAS_RESTART;
1130     if (node) {
1131         unsigned long offset = xas->xa_offset;
1132         while (++offset < XA_CHUNK_SIZE) {
1133             if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1134                 break;
1135         }
1136         xas->xa_index += (offset - xas->xa_offset) << node->shift;
1137         if (xas->xa_index == 0)
1138             xas->xa_node = XAS_BOUNDS;
1139     } else {
1140         xas->xa_index++;
1141     }
1142 }
1143 EXPORT_SYMBOL_GPL(xas_pause);
1144 
1145 /*
1146  * __xas_prev() - Find the previous entry in the XArray.
1147  * @xas: XArray operation state.
1148  *
1149  * Helper function for xas_prev() which handles all the complex cases
1150  * out of line.
1151  */
1152 void *__xas_prev(struct xa_state *xas)
1153 {
1154     void *entry;
1155 
1156     if (!xas_frozen(xas->xa_node))
1157         xas->xa_index--;
1158     if (!xas->xa_node)
1159         return set_bounds(xas);
1160     if (xas_not_node(xas->xa_node))
1161         return xas_load(xas);
1162 
1163     if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1164         xas->xa_offset--;
1165 
1166     while (xas->xa_offset == 255) {
1167         xas->xa_offset = xas->xa_node->offset - 1;
1168         xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1169         if (!xas->xa_node)
1170             return set_bounds(xas);
1171     }
1172 
1173     for (;;) {
1174         entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1175         if (!xa_is_node(entry))
1176             return entry;
1177 
1178         xas->xa_node = xa_to_node(entry);
1179         xas_set_offset(xas);
1180     }
1181 }
1182 EXPORT_SYMBOL_GPL(__xas_prev);
1183 
1184 /*
1185  * __xas_next() - Find the next entry in the XArray.
1186  * @xas: XArray operation state.
1187  *
1188  * Helper function for xas_next() which handles all the complex cases
1189  * out of line.
1190  */
1191 void *__xas_next(struct xa_state *xas)
1192 {
1193     void *entry;
1194 
1195     if (!xas_frozen(xas->xa_node))
1196         xas->xa_index++;
1197     if (!xas->xa_node)
1198         return set_bounds(xas);
1199     if (xas_not_node(xas->xa_node))
1200         return xas_load(xas);
1201 
1202     if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1203         xas->xa_offset++;
1204 
1205     while (xas->xa_offset == XA_CHUNK_SIZE) {
1206         xas->xa_offset = xas->xa_node->offset + 1;
1207         xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1208         if (!xas->xa_node)
1209             return set_bounds(xas);
1210     }
1211 
1212     for (;;) {
1213         entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1214         if (!xa_is_node(entry))
1215             return entry;
1216 
1217         xas->xa_node = xa_to_node(entry);
1218         xas_set_offset(xas);
1219     }
1220 }
1221 EXPORT_SYMBOL_GPL(__xas_next);
1222 
1223 /**
1224  * xas_find() - Find the next present entry in the XArray.
1225  * @xas: XArray operation state.
1226  * @max: Highest index to return.
1227  *
1228  * If the @xas has not yet been walked to an entry, return the entry
1229  * which has an index >= xas.xa_index.  If it has been walked, the entry
1230  * currently being pointed at has been processed, and so we move to the
1231  * next entry.
1232  *
1233  * If no entry is found and the array is smaller than @max, the iterator
1234  * is set to the smallest index not yet in the array.  This allows @xas
1235  * to be immediately passed to xas_store().
1236  *
1237  * Return: The entry, if found, otherwise %NULL.
1238  */
1239 void *xas_find(struct xa_state *xas, unsigned long max)
1240 {
1241     void *entry;
1242 
1243     if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1244         return NULL;
1245     if (xas->xa_index > max)
1246         return set_bounds(xas);
1247 
1248     if (!xas->xa_node) {
1249         xas->xa_index = 1;
1250         return set_bounds(xas);
1251     } else if (xas->xa_node == XAS_RESTART) {
1252         entry = xas_load(xas);
1253         if (entry || xas_not_node(xas->xa_node))
1254             return entry;
1255     } else if (!xas->xa_node->shift &&
1256             xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1257         xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1258     }
1259 
1260     xas_next_offset(xas);
1261 
1262     while (xas->xa_node && (xas->xa_index <= max)) {
1263         if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1264             xas->xa_offset = xas->xa_node->offset + 1;
1265             xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1266             continue;
1267         }
1268 
1269         entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1270         if (xa_is_node(entry)) {
1271             xas->xa_node = xa_to_node(entry);
1272             xas->xa_offset = 0;
1273             continue;
1274         }
1275         if (entry && !xa_is_sibling(entry))
1276             return entry;
1277 
1278         xas_next_offset(xas);
1279     }
1280 
1281     if (!xas->xa_node)
1282         xas->xa_node = XAS_BOUNDS;
1283     return NULL;
1284 }
1285 EXPORT_SYMBOL_GPL(xas_find);
1286 
1287 /**
1288  * xas_find_marked() - Find the next marked entry in the XArray.
1289  * @xas: XArray operation state.
1290  * @max: Highest index to return.
1291  * @mark: Mark number to search for.
1292  *
1293  * If the @xas has not yet been walked to an entry, return the marked entry
1294  * which has an index >= xas.xa_index.  If it has been walked, the entry
1295  * currently being pointed at has been processed, and so we return the
1296  * first marked entry with an index > xas.xa_index.
1297  *
1298  * If no marked entry is found and the array is smaller than @max, @xas is
1299  * set to the bounds state and xas->xa_index is set to the smallest index
1300  * not yet in the array.  This allows @xas to be immediately passed to
1301  * xas_store().
1302  *
1303  * If no entry is found before @max is reached, @xas is set to the restart
1304  * state.
1305  *
1306  * Return: The entry, if found, otherwise %NULL.
1307  */
1308 void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1309 {
1310     bool advance = true;
1311     unsigned int offset;
1312     void *entry;
1313 
1314     if (xas_error(xas))
1315         return NULL;
1316     if (xas->xa_index > max)
1317         goto max;
1318 
1319     if (!xas->xa_node) {
1320         xas->xa_index = 1;
1321         goto out;
1322     } else if (xas_top(xas->xa_node)) {
1323         advance = false;
1324         entry = xa_head(xas->xa);
1325         xas->xa_node = NULL;
1326         if (xas->xa_index > max_index(entry))
1327             goto out;
1328         if (!xa_is_node(entry)) {
1329             if (xa_marked(xas->xa, mark))
1330                 return entry;
1331             xas->xa_index = 1;
1332             goto out;
1333         }
1334         xas->xa_node = xa_to_node(entry);
1335         xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1336     }
1337 
1338     while (xas->xa_index <= max) {
1339         if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1340             xas->xa_offset = xas->xa_node->offset + 1;
1341             xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1342             if (!xas->xa_node)
1343                 break;
1344             advance = false;
1345             continue;
1346         }
1347 
1348         if (!advance) {
1349             entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1350             if (xa_is_sibling(entry)) {
1351                 xas->xa_offset = xa_to_sibling(entry);
1352                 xas_move_index(xas, xas->xa_offset);
1353             }
1354         }
1355 
1356         offset = xas_find_chunk(xas, advance, mark);
1357         if (offset > xas->xa_offset) {
1358             advance = false;
1359             xas_move_index(xas, offset);
1360             /* Mind the wrap */
1361             if ((xas->xa_index - 1) >= max)
1362                 goto max;
1363             xas->xa_offset = offset;
1364             if (offset == XA_CHUNK_SIZE)
1365                 continue;
1366         }
1367 
1368         entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1369         if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1370             continue;
1371         if (!xa_is_node(entry))
1372             return entry;
1373         xas->xa_node = xa_to_node(entry);
1374         xas_set_offset(xas);
1375     }
1376 
1377 out:
1378     if (xas->xa_index > max)
1379         goto max;
1380     return set_bounds(xas);
1381 max:
1382     xas->xa_node = XAS_RESTART;
1383     return NULL;
1384 }
1385 EXPORT_SYMBOL_GPL(xas_find_marked);
1386 
1387 /**
1388  * xas_find_conflict() - Find the next present entry in a range.
1389  * @xas: XArray operation state.
1390  *
1391  * The @xas describes both a range and a position within that range.
1392  *
1393  * Context: Any context.  Expects xa_lock to be held.
1394  * Return: The next entry in the range covered by @xas or %NULL.
1395  */
1396 void *xas_find_conflict(struct xa_state *xas)
1397 {
1398     void *curr;
1399 
1400     if (xas_error(xas))
1401         return NULL;
1402 
1403     if (!xas->xa_node)
1404         return NULL;
1405 
1406     if (xas_top(xas->xa_node)) {
1407         curr = xas_start(xas);
1408         if (!curr)
1409             return NULL;
1410         while (xa_is_node(curr)) {
1411             struct xa_node *node = xa_to_node(curr);
1412             curr = xas_descend(xas, node);
1413         }
1414         if (curr)
1415             return curr;
1416     }
1417 
1418     if (xas->xa_node->shift > xas->xa_shift)
1419         return NULL;
1420 
1421     for (;;) {
1422         if (xas->xa_node->shift == xas->xa_shift) {
1423             if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1424                 break;
1425         } else if (xas->xa_offset == XA_CHUNK_MASK) {
1426             xas->xa_offset = xas->xa_node->offset;
1427             xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1428             if (!xas->xa_node)
1429                 break;
1430             continue;
1431         }
1432         curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1433         if (xa_is_sibling(curr))
1434             continue;
1435         while (xa_is_node(curr)) {
1436             xas->xa_node = xa_to_node(curr);
1437             xas->xa_offset = 0;
1438             curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1439         }
1440         if (curr)
1441             return curr;
1442     }
1443     xas->xa_offset -= xas->xa_sibs;
1444     return NULL;
1445 }
1446 EXPORT_SYMBOL_GPL(xas_find_conflict);
1447 
1448 /**
1449  * xa_load() - Load an entry from an XArray.
1450  * @xa: XArray.
1451  * @index: index into array.
1452  *
1453  * Context: Any context.  Takes and releases the RCU lock.
1454  * Return: The entry at @index in @xa.
1455  */
1456 void *xa_load(struct xarray *xa, unsigned long index)
1457 {
1458     XA_STATE(xas, xa, index);
1459     void *entry;
1460 
1461     rcu_read_lock();
1462     do {
1463         entry = xas_load(&xas);
1464         if (xa_is_zero(entry))
1465             entry = NULL;
1466     } while (xas_retry(&xas, entry));
1467     rcu_read_unlock();
1468 
1469     return entry;
1470 }
1471 EXPORT_SYMBOL(xa_load);
1472 
1473 static void *xas_result(struct xa_state *xas, void *curr)
1474 {
1475     if (xa_is_zero(curr))
1476         return NULL;
1477     if (xas_error(xas))
1478         curr = xas->xa_node;
1479     return curr;
1480 }
1481 
1482 /**
1483  * __xa_erase() - Erase this entry from the XArray while locked.
1484  * @xa: XArray.
1485  * @index: Index into array.
1486  *
1487  * After this function returns, loading from @index will return %NULL.
1488  * If the index is part of a multi-index entry, all indices will be erased
1489  * and none of the entries will be part of a multi-index entry.
1490  *
1491  * Context: Any context.  Expects xa_lock to be held on entry.
1492  * Return: The entry which used to be at this index.
1493  */
1494 void *__xa_erase(struct xarray *xa, unsigned long index)
1495 {
1496     XA_STATE(xas, xa, index);
1497     return xas_result(&xas, xas_store(&xas, NULL));
1498 }
1499 EXPORT_SYMBOL(__xa_erase);
1500 
1501 /**
1502  * xa_erase() - Erase this entry from the XArray.
1503  * @xa: XArray.
1504  * @index: Index of entry.
1505  *
1506  * After this function returns, loading from @index will return %NULL.
1507  * If the index is part of a multi-index entry, all indices will be erased
1508  * and none of the entries will be part of a multi-index entry.
1509  *
1510  * Context: Any context.  Takes and releases the xa_lock.
1511  * Return: The entry which used to be at this index.
1512  */
1513 void *xa_erase(struct xarray *xa, unsigned long index)
1514 {
1515     void *entry;
1516 
1517     xa_lock(xa);
1518     entry = __xa_erase(xa, index);
1519     xa_unlock(xa);
1520 
1521     return entry;
1522 }
1523 EXPORT_SYMBOL(xa_erase);
1524 
1525 /**
1526  * __xa_store() - Store this entry in the XArray.
1527  * @xa: XArray.
1528  * @index: Index into array.
1529  * @entry: New entry.
1530  * @gfp: Memory allocation flags.
1531  *
1532  * You must already be holding the xa_lock when calling this function.
1533  * It will drop the lock if needed to allocate memory, and then reacquire
1534  * it afterwards.
1535  *
1536  * Context: Any context.  Expects xa_lock to be held on entry.  May
1537  * release and reacquire xa_lock if @gfp flags permit.
1538  * Return: The old entry at this index or xa_err() if an error happened.
1539  */
1540 void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1541 {
1542     XA_STATE(xas, xa, index);
1543     void *curr;
1544 
1545     if (WARN_ON_ONCE(xa_is_advanced(entry)))
1546         return XA_ERROR(-EINVAL);
1547     if (xa_track_free(xa) && !entry)
1548         entry = XA_ZERO_ENTRY;
1549 
1550     do {
1551         curr = xas_store(&xas, entry);
1552         if (xa_track_free(xa))
1553             xas_clear_mark(&xas, XA_FREE_MARK);
1554     } while (__xas_nomem(&xas, gfp));
1555 
1556     return xas_result(&xas, curr);
1557 }
1558 EXPORT_SYMBOL(__xa_store);
1559 
1560 /**
1561  * xa_store() - Store this entry in the XArray.
1562  * @xa: XArray.
1563  * @index: Index into array.
1564  * @entry: New entry.
1565  * @gfp: Memory allocation flags.
1566  *
1567  * After this function returns, loads from this index will return @entry.
1568  * Storing into an existing multi-index entry updates the entry of every index.
1569  * The marks associated with @index are unaffected unless @entry is %NULL.
1570  *
1571  * Context: Any context.  Takes and releases the xa_lock.
1572  * May sleep if the @gfp flags permit.
1573  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1574  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1575  * failed.
1576  */
1577 void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1578 {
1579     void *curr;
1580 
1581     xa_lock(xa);
1582     curr = __xa_store(xa, index, entry, gfp);
1583     xa_unlock(xa);
1584 
1585     return curr;
1586 }
1587 EXPORT_SYMBOL(xa_store);
1588 
1589 /**
1590  * __xa_cmpxchg() - Store this entry in the XArray.
1591  * @xa: XArray.
1592  * @index: Index into array.
1593  * @old: Old value to test against.
1594  * @entry: New entry.
1595  * @gfp: Memory allocation flags.
1596  *
1597  * You must already be holding the xa_lock when calling this function.
1598  * It will drop the lock if needed to allocate memory, and then reacquire
1599  * it afterwards.
1600  *
1601  * Context: Any context.  Expects xa_lock to be held on entry.  May
1602  * release and reacquire xa_lock if @gfp flags permit.
1603  * Return: The old entry at this index or xa_err() if an error happened.
1604  */
1605 void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1606             void *old, void *entry, gfp_t gfp)
1607 {
1608     XA_STATE(xas, xa, index);
1609     void *curr;
1610 
1611     if (WARN_ON_ONCE(xa_is_advanced(entry)))
1612         return XA_ERROR(-EINVAL);
1613 
1614     do {
1615         curr = xas_load(&xas);
1616         if (curr == old) {
1617             xas_store(&xas, entry);
1618             if (xa_track_free(xa) && entry && !curr)
1619                 xas_clear_mark(&xas, XA_FREE_MARK);
1620         }
1621     } while (__xas_nomem(&xas, gfp));
1622 
1623     return xas_result(&xas, curr);
1624 }
1625 EXPORT_SYMBOL(__xa_cmpxchg);
1626 
1627 /**
1628  * __xa_insert() - Store this entry in the XArray if no entry is present.
1629  * @xa: XArray.
1630  * @index: Index into array.
1631  * @entry: New entry.
1632  * @gfp: Memory allocation flags.
1633  *
1634  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1635  * if no entry is present.  Inserting will fail if a reserved entry is
1636  * present, even though loading from this index will return NULL.
1637  *
1638  * Context: Any context.  Expects xa_lock to be held on entry.  May
1639  * release and reacquire xa_lock if @gfp flags permit.
1640  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1641  * -ENOMEM if memory could not be allocated.
1642  */
1643 int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1644 {
1645     XA_STATE(xas, xa, index);
1646     void *curr;
1647 
1648     if (WARN_ON_ONCE(xa_is_advanced(entry)))
1649         return -EINVAL;
1650     if (!entry)
1651         entry = XA_ZERO_ENTRY;
1652 
1653     do {
1654         curr = xas_load(&xas);
1655         if (!curr) {
1656             xas_store(&xas, entry);
1657             if (xa_track_free(xa))
1658                 xas_clear_mark(&xas, XA_FREE_MARK);
1659         } else {
1660             xas_set_err(&xas, -EBUSY);
1661         }
1662     } while (__xas_nomem(&xas, gfp));
1663 
1664     return xas_error(&xas);
1665 }
1666 EXPORT_SYMBOL(__xa_insert);
1667 
1668 #ifdef CONFIG_XARRAY_MULTI
1669 static void xas_set_range(struct xa_state *xas, unsigned long first,
1670         unsigned long last)
1671 {
1672     unsigned int shift = 0;
1673     unsigned long sibs = last - first;
1674     unsigned int offset = XA_CHUNK_MASK;
1675 
1676     xas_set(xas, first);
1677 
1678     while ((first & XA_CHUNK_MASK) == 0) {
1679         if (sibs < XA_CHUNK_MASK)
1680             break;
1681         if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1682             break;
1683         shift += XA_CHUNK_SHIFT;
1684         if (offset == XA_CHUNK_MASK)
1685             offset = sibs & XA_CHUNK_MASK;
1686         sibs >>= XA_CHUNK_SHIFT;
1687         first >>= XA_CHUNK_SHIFT;
1688     }
1689 
1690     offset = first & XA_CHUNK_MASK;
1691     if (offset + sibs > XA_CHUNK_MASK)
1692         sibs = XA_CHUNK_MASK - offset;
1693     if ((((first + sibs + 1) << shift) - 1) > last)
1694         sibs -= 1;
1695 
1696     xas->xa_shift = shift;
1697     xas->xa_sibs = sibs;
1698 }
1699 
1700 /**
1701  * xa_store_range() - Store this entry at a range of indices in the XArray.
1702  * @xa: XArray.
1703  * @first: First index to affect.
1704  * @last: Last index to affect.
1705  * @entry: New entry.
1706  * @gfp: Memory allocation flags.
1707  *
1708  * After this function returns, loads from any index between @first and @last,
1709  * inclusive will return @entry.
1710  * Storing into an existing multi-index entry updates the entry of every index.
1711  * The marks associated with @index are unaffected unless @entry is %NULL.
1712  *
1713  * Context: Process context.  Takes and releases the xa_lock.  May sleep
1714  * if the @gfp flags permit.
1715  * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1716  * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1717  */
1718 void *xa_store_range(struct xarray *xa, unsigned long first,
1719         unsigned long last, void *entry, gfp_t gfp)
1720 {
1721     XA_STATE(xas, xa, 0);
1722 
1723     if (WARN_ON_ONCE(xa_is_internal(entry)))
1724         return XA_ERROR(-EINVAL);
1725     if (last < first)
1726         return XA_ERROR(-EINVAL);
1727 
1728     do {
1729         xas_lock(&xas);
1730         if (entry) {
1731             unsigned int order = BITS_PER_LONG;
1732             if (last + 1)
1733                 order = __ffs(last + 1);
1734             xas_set_order(&xas, last, order);
1735             xas_create(&xas, true);
1736             if (xas_error(&xas))
1737                 goto unlock;
1738         }
1739         do {
1740             xas_set_range(&xas, first, last);
1741             xas_store(&xas, entry);
1742             if (xas_error(&xas))
1743                 goto unlock;
1744             first += xas_size(&xas);
1745         } while (first <= last);
1746 unlock:
1747         xas_unlock(&xas);
1748     } while (xas_nomem(&xas, gfp));
1749 
1750     return xas_result(&xas, NULL);
1751 }
1752 EXPORT_SYMBOL(xa_store_range);
1753 
1754 /**
1755  * xa_get_order() - Get the order of an entry.
1756  * @xa: XArray.
1757  * @index: Index of the entry.
1758  *
1759  * Return: A number between 0 and 63 indicating the order of the entry.
1760  */
1761 int xa_get_order(struct xarray *xa, unsigned long index)
1762 {
1763     XA_STATE(xas, xa, index);
1764     void *entry;
1765     int order = 0;
1766 
1767     rcu_read_lock();
1768     entry = xas_load(&xas);
1769 
1770     if (!entry)
1771         goto unlock;
1772 
1773     if (!xas.xa_node)
1774         goto unlock;
1775 
1776     for (;;) {
1777         unsigned int slot = xas.xa_offset + (1 << order);
1778 
1779         if (slot >= XA_CHUNK_SIZE)
1780             break;
1781         if (!xa_is_sibling(xas.xa_node->slots[slot]))
1782             break;
1783         order++;
1784     }
1785 
1786     order += xas.xa_node->shift;
1787 unlock:
1788     rcu_read_unlock();
1789 
1790     return order;
1791 }
1792 EXPORT_SYMBOL(xa_get_order);
1793 #endif /* CONFIG_XARRAY_MULTI */
1794 
1795 /**
1796  * __xa_alloc() - Find somewhere to store this entry in the XArray.
1797  * @xa: XArray.
1798  * @id: Pointer to ID.
1799  * @limit: Range for allocated ID.
1800  * @entry: New entry.
1801  * @gfp: Memory allocation flags.
1802  *
1803  * Finds an empty entry in @xa between @limit.min and @limit.max,
1804  * stores the index into the @id pointer, then stores the entry at
1805  * that index.  A concurrent lookup will not see an uninitialised @id.
1806  *
1807  * Context: Any context.  Expects xa_lock to be held on entry.  May
1808  * release and reacquire xa_lock if @gfp flags permit.
1809  * Return: 0 on success, -ENOMEM if memory could not be allocated or
1810  * -EBUSY if there are no free entries in @limit.
1811  */
1812 int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1813         struct xa_limit limit, gfp_t gfp)
1814 {
1815     XA_STATE(xas, xa, 0);
1816 
1817     if (WARN_ON_ONCE(xa_is_advanced(entry)))
1818         return -EINVAL;
1819     if (WARN_ON_ONCE(!xa_track_free(xa)))
1820         return -EINVAL;
1821 
1822     if (!entry)
1823         entry = XA_ZERO_ENTRY;
1824 
1825     do {
1826         xas.xa_index = limit.min;
1827         xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1828         if (xas.xa_node == XAS_RESTART)
1829             xas_set_err(&xas, -EBUSY);
1830         else
1831             *id = xas.xa_index;
1832         xas_store(&xas, entry);
1833         xas_clear_mark(&xas, XA_FREE_MARK);
1834     } while (__xas_nomem(&xas, gfp));
1835 
1836     return xas_error(&xas);
1837 }
1838 EXPORT_SYMBOL(__xa_alloc);
1839 
1840 /**
1841  * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1842  * @xa: XArray.
1843  * @id: Pointer to ID.
1844  * @entry: New entry.
1845  * @limit: Range of allocated ID.
1846  * @next: Pointer to next ID to allocate.
1847  * @gfp: Memory allocation flags.
1848  *
1849  * Finds an empty entry in @xa between @limit.min and @limit.max,
1850  * stores the index into the @id pointer, then stores the entry at
1851  * that index.  A concurrent lookup will not see an uninitialised @id.
1852  * The search for an empty entry will start at @next and will wrap
1853  * around if necessary.
1854  *
1855  * Context: Any context.  Expects xa_lock to be held on entry.  May
1856  * release and reacquire xa_lock if @gfp flags permit.
1857  * Return: 0 if the allocation succeeded without wrapping.  1 if the
1858  * allocation succeeded after wrapping, -ENOMEM if memory could not be
1859  * allocated or -EBUSY if there are no free entries in @limit.
1860  */
1861 int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1862         struct xa_limit limit, u32 *next, gfp_t gfp)
1863 {
1864     u32 min = limit.min;
1865     int ret;
1866 
1867     limit.min = max(min, *next);
1868     ret = __xa_alloc(xa, id, entry, limit, gfp);
1869     if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1870         xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1871         ret = 1;
1872     }
1873 
1874     if (ret < 0 && limit.min > min) {
1875         limit.min = min;
1876         ret = __xa_alloc(xa, id, entry, limit, gfp);
1877         if (ret == 0)
1878             ret = 1;
1879     }
1880 
1881     if (ret >= 0) {
1882         *next = *id + 1;
1883         if (*next == 0)
1884             xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1885     }
1886     return ret;
1887 }
1888 EXPORT_SYMBOL(__xa_alloc_cyclic);
1889 
1890 /**
1891  * __xa_set_mark() - Set this mark on this entry while locked.
1892  * @xa: XArray.
1893  * @index: Index of entry.
1894  * @mark: Mark number.
1895  *
1896  * Attempting to set a mark on a %NULL entry does not succeed.
1897  *
1898  * Context: Any context.  Expects xa_lock to be held on entry.
1899  */
1900 void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1901 {
1902     XA_STATE(xas, xa, index);
1903     void *entry = xas_load(&xas);
1904 
1905     if (entry)
1906         xas_set_mark(&xas, mark);
1907 }
1908 EXPORT_SYMBOL(__xa_set_mark);
1909 
1910 /**
1911  * __xa_clear_mark() - Clear this mark on this entry while locked.
1912  * @xa: XArray.
1913  * @index: Index of entry.
1914  * @mark: Mark number.
1915  *
1916  * Context: Any context.  Expects xa_lock to be held on entry.
1917  */
1918 void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1919 {
1920     XA_STATE(xas, xa, index);
1921     void *entry = xas_load(&xas);
1922 
1923     if (entry)
1924         xas_clear_mark(&xas, mark);
1925 }
1926 EXPORT_SYMBOL(__xa_clear_mark);
1927 
1928 /**
1929  * xa_get_mark() - Inquire whether this mark is set on this entry.
1930  * @xa: XArray.
1931  * @index: Index of entry.
1932  * @mark: Mark number.
1933  *
1934  * This function uses the RCU read lock, so the result may be out of date
1935  * by the time it returns.  If you need the result to be stable, use a lock.
1936  *
1937  * Context: Any context.  Takes and releases the RCU lock.
1938  * Return: True if the entry at @index has this mark set, false if it doesn't.
1939  */
1940 bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1941 {
1942     XA_STATE(xas, xa, index);
1943     void *entry;
1944 
1945     rcu_read_lock();
1946     entry = xas_start(&xas);
1947     while (xas_get_mark(&xas, mark)) {
1948         if (!xa_is_node(entry))
1949             goto found;
1950         entry = xas_descend(&xas, xa_to_node(entry));
1951     }
1952     rcu_read_unlock();
1953     return false;
1954  found:
1955     rcu_read_unlock();
1956     return true;
1957 }
1958 EXPORT_SYMBOL(xa_get_mark);
1959 
1960 /**
1961  * xa_set_mark() - Set this mark on this entry.
1962  * @xa: XArray.
1963  * @index: Index of entry.
1964  * @mark: Mark number.
1965  *
1966  * Attempting to set a mark on a %NULL entry does not succeed.
1967  *
1968  * Context: Process context.  Takes and releases the xa_lock.
1969  */
1970 void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1971 {
1972     xa_lock(xa);
1973     __xa_set_mark(xa, index, mark);
1974     xa_unlock(xa);
1975 }
1976 EXPORT_SYMBOL(xa_set_mark);
1977 
1978 /**
1979  * xa_clear_mark() - Clear this mark on this entry.
1980  * @xa: XArray.
1981  * @index: Index of entry.
1982  * @mark: Mark number.
1983  *
1984  * Clearing a mark always succeeds.
1985  *
1986  * Context: Process context.  Takes and releases the xa_lock.
1987  */
1988 void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1989 {
1990     xa_lock(xa);
1991     __xa_clear_mark(xa, index, mark);
1992     xa_unlock(xa);
1993 }
1994 EXPORT_SYMBOL(xa_clear_mark);
1995 
1996 /**
1997  * xa_find() - Search the XArray for an entry.
1998  * @xa: XArray.
1999  * @indexp: Pointer to an index.
2000  * @max: Maximum index to search to.
2001  * @filter: Selection criterion.
2002  *
2003  * Finds the entry in @xa which matches the @filter, and has the lowest
2004  * index that is at least @indexp and no more than @max.
2005  * If an entry is found, @indexp is updated to be the index of the entry.
2006  * This function is protected by the RCU read lock, so it may not find
2007  * entries which are being simultaneously added.  It will not return an
2008  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2009  *
2010  * Context: Any context.  Takes and releases the RCU lock.
2011  * Return: The entry, if found, otherwise %NULL.
2012  */
2013 void *xa_find(struct xarray *xa, unsigned long *indexp,
2014             unsigned long max, xa_mark_t filter)
2015 {
2016     XA_STATE(xas, xa, *indexp);
2017     void *entry;
2018 
2019     rcu_read_lock();
2020     do {
2021         if ((__force unsigned int)filter < XA_MAX_MARKS)
2022             entry = xas_find_marked(&xas, max, filter);
2023         else
2024             entry = xas_find(&xas, max);
2025     } while (xas_retry(&xas, entry));
2026     rcu_read_unlock();
2027 
2028     if (entry)
2029         *indexp = xas.xa_index;
2030     return entry;
2031 }
2032 EXPORT_SYMBOL(xa_find);
2033 
2034 static bool xas_sibling(struct xa_state *xas)
2035 {
2036     struct xa_node *node = xas->xa_node;
2037     unsigned long mask;
2038 
2039     if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
2040         return false;
2041     mask = (XA_CHUNK_SIZE << node->shift) - 1;
2042     return (xas->xa_index & mask) >
2043         ((unsigned long)xas->xa_offset << node->shift);
2044 }
2045 
2046 /**
2047  * xa_find_after() - Search the XArray for a present entry.
2048  * @xa: XArray.
2049  * @indexp: Pointer to an index.
2050  * @max: Maximum index to search to.
2051  * @filter: Selection criterion.
2052  *
2053  * Finds the entry in @xa which matches the @filter and has the lowest
2054  * index that is above @indexp and no more than @max.
2055  * If an entry is found, @indexp is updated to be the index of the entry.
2056  * This function is protected by the RCU read lock, so it may miss entries
2057  * which are being simultaneously added.  It will not return an
2058  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2059  *
2060  * Context: Any context.  Takes and releases the RCU lock.
2061  * Return: The pointer, if found, otherwise %NULL.
2062  */
2063 void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2064             unsigned long max, xa_mark_t filter)
2065 {
2066     XA_STATE(xas, xa, *indexp + 1);
2067     void *entry;
2068 
2069     if (xas.xa_index == 0)
2070         return NULL;
2071 
2072     rcu_read_lock();
2073     for (;;) {
2074         if ((__force unsigned int)filter < XA_MAX_MARKS)
2075             entry = xas_find_marked(&xas, max, filter);
2076         else
2077             entry = xas_find(&xas, max);
2078 
2079         if (xas_invalid(&xas))
2080             break;
2081         if (xas_sibling(&xas))
2082             continue;
2083         if (!xas_retry(&xas, entry))
2084             break;
2085     }
2086     rcu_read_unlock();
2087 
2088     if (entry)
2089         *indexp = xas.xa_index;
2090     return entry;
2091 }
2092 EXPORT_SYMBOL(xa_find_after);
2093 
2094 static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2095             unsigned long max, unsigned int n)
2096 {
2097     void *entry;
2098     unsigned int i = 0;
2099 
2100     rcu_read_lock();
2101     xas_for_each(xas, entry, max) {
2102         if (xas_retry(xas, entry))
2103             continue;
2104         dst[i++] = entry;
2105         if (i == n)
2106             break;
2107     }
2108     rcu_read_unlock();
2109 
2110     return i;
2111 }
2112 
2113 static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2114             unsigned long max, unsigned int n, xa_mark_t mark)
2115 {
2116     void *entry;
2117     unsigned int i = 0;
2118 
2119     rcu_read_lock();
2120     xas_for_each_marked(xas, entry, max, mark) {
2121         if (xas_retry(xas, entry))
2122             continue;
2123         dst[i++] = entry;
2124         if (i == n)
2125             break;
2126     }
2127     rcu_read_unlock();
2128 
2129     return i;
2130 }
2131 
2132 /**
2133  * xa_extract() - Copy selected entries from the XArray into a normal array.
2134  * @xa: The source XArray to copy from.
2135  * @dst: The buffer to copy entries into.
2136  * @start: The first index in the XArray eligible to be selected.
2137  * @max: The last index in the XArray eligible to be selected.
2138  * @n: The maximum number of entries to copy.
2139  * @filter: Selection criterion.
2140  *
2141  * Copies up to @n entries that match @filter from the XArray.  The
2142  * copied entries will have indices between @start and @max, inclusive.
2143  *
2144  * The @filter may be an XArray mark value, in which case entries which are
2145  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
2146  * which case all entries which are not %NULL will be copied.
2147  *
2148  * The entries returned may not represent a snapshot of the XArray at a
2149  * moment in time.  For example, if another thread stores to index 5, then
2150  * index 10, calling xa_extract() may return the old contents of index 5
2151  * and the new contents of index 10.  Indices not modified while this
2152  * function is running will not be skipped.
2153  *
2154  * If you need stronger guarantees, holding the xa_lock across calls to this
2155  * function will prevent concurrent modification.
2156  *
2157  * Context: Any context.  Takes and releases the RCU lock.
2158  * Return: The number of entries copied.
2159  */
2160 unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2161             unsigned long max, unsigned int n, xa_mark_t filter)
2162 {
2163     XA_STATE(xas, xa, start);
2164 
2165     if (!n)
2166         return 0;
2167 
2168     if ((__force unsigned int)filter < XA_MAX_MARKS)
2169         return xas_extract_marked(&xas, dst, max, n, filter);
2170     return xas_extract_present(&xas, dst, max, n);
2171 }
2172 EXPORT_SYMBOL(xa_extract);
2173 
2174 /**
2175  * xa_delete_node() - Private interface for workingset code.
2176  * @node: Node to be removed from the tree.
2177  * @update: Function to call to update ancestor nodes.
2178  *
2179  * Context: xa_lock must be held on entry and will not be released.
2180  */
2181 void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2182 {
2183     struct xa_state xas = {
2184         .xa = node->array,
2185         .xa_index = (unsigned long)node->offset <<
2186                 (node->shift + XA_CHUNK_SHIFT),
2187         .xa_shift = node->shift + XA_CHUNK_SHIFT,
2188         .xa_offset = node->offset,
2189         .xa_node = xa_parent_locked(node->array, node),
2190         .xa_update = update,
2191     };
2192 
2193     xas_store(&xas, NULL);
2194 }
2195 EXPORT_SYMBOL_GPL(xa_delete_node);  /* For the benefit of the test suite */
2196 
2197 /**
2198  * xa_destroy() - Free all internal data structures.
2199  * @xa: XArray.
2200  *
2201  * After calling this function, the XArray is empty and has freed all memory
2202  * allocated for its internal data structures.  You are responsible for
2203  * freeing the objects referenced by the XArray.
2204  *
2205  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
2206  */
2207 void xa_destroy(struct xarray *xa)
2208 {
2209     XA_STATE(xas, xa, 0);
2210     unsigned long flags;
2211     void *entry;
2212 
2213     xas.xa_node = NULL;
2214     xas_lock_irqsave(&xas, flags);
2215     entry = xa_head_locked(xa);
2216     RCU_INIT_POINTER(xa->xa_head, NULL);
2217     xas_init_marks(&xas);
2218     if (xa_zero_busy(xa))
2219         xa_mark_clear(xa, XA_FREE_MARK);
2220     /* lockdep checks we're still holding the lock in xas_free_nodes() */
2221     if (xa_is_node(entry))
2222         xas_free_nodes(&xas, xa_to_node(entry));
2223     xas_unlock_irqrestore(&xas, flags);
2224 }
2225 EXPORT_SYMBOL(xa_destroy);
2226 
2227 #ifdef XA_DEBUG
2228 void xa_dump_node(const struct xa_node *node)
2229 {
2230     unsigned i, j;
2231 
2232     if (!node)
2233         return;
2234     if ((unsigned long)node & 3) {
2235         pr_cont("node %px\n", node);
2236         return;
2237     }
2238 
2239     pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2240         "array %px list %px %px marks",
2241         node, node->parent ? "offset" : "max", node->offset,
2242         node->parent, node->shift, node->count, node->nr_values,
2243         node->array, node->private_list.prev, node->private_list.next);
2244     for (i = 0; i < XA_MAX_MARKS; i++)
2245         for (j = 0; j < XA_MARK_LONGS; j++)
2246             pr_cont(" %lx", node->marks[i][j]);
2247     pr_cont("\n");
2248 }
2249 
2250 void xa_dump_index(unsigned long index, unsigned int shift)
2251 {
2252     if (!shift)
2253         pr_info("%lu: ", index);
2254     else if (shift >= BITS_PER_LONG)
2255         pr_info("0-%lu: ", ~0UL);
2256     else
2257         pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2258 }
2259 
2260 void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2261 {
2262     if (!entry)
2263         return;
2264 
2265     xa_dump_index(index, shift);
2266 
2267     if (xa_is_node(entry)) {
2268         if (shift == 0) {
2269             pr_cont("%px\n", entry);
2270         } else {
2271             unsigned long i;
2272             struct xa_node *node = xa_to_node(entry);
2273             xa_dump_node(node);
2274             for (i = 0; i < XA_CHUNK_SIZE; i++)
2275                 xa_dump_entry(node->slots[i],
2276                       index + (i << node->shift), node->shift);
2277         }
2278     } else if (xa_is_value(entry))
2279         pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2280                         xa_to_value(entry), entry);
2281     else if (!xa_is_internal(entry))
2282         pr_cont("%px\n", entry);
2283     else if (xa_is_retry(entry))
2284         pr_cont("retry (%ld)\n", xa_to_internal(entry));
2285     else if (xa_is_sibling(entry))
2286         pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2287     else if (xa_is_zero(entry))
2288         pr_cont("zero (%ld)\n", xa_to_internal(entry));
2289     else
2290         pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2291 }
2292 
2293 void xa_dump(const struct xarray *xa)
2294 {
2295     void *entry = xa->xa_head;
2296     unsigned int shift = 0;
2297 
2298     pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2299             xa->xa_flags, xa_marked(xa, XA_MARK_0),
2300             xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2301     if (xa_is_node(entry))
2302         shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2303     xa_dump_entry(entry, 0, shift);
2304 }
2305 #endif