Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <linux/bitmap.h>
0003 #include <linux/bug.h>
0004 #include <linux/export.h>
0005 #include <linux/idr.h>
0006 #include <linux/slab.h>
0007 #include <linux/spinlock.h>
0008 #include <linux/xarray.h>
0009 
0010 /**
0011  * idr_alloc_u32() - Allocate an ID.
0012  * @idr: IDR handle.
0013  * @ptr: Pointer to be associated with the new ID.
0014  * @nextid: Pointer to an ID.
0015  * @max: The maximum ID to allocate (inclusive).
0016  * @gfp: Memory allocation flags.
0017  *
0018  * Allocates an unused ID in the range specified by @nextid and @max.
0019  * Note that @max is inclusive whereas the @end parameter to idr_alloc()
0020  * is exclusive.  The new ID is assigned to @nextid before the pointer
0021  * is inserted into the IDR, so if @nextid points into the object pointed
0022  * to by @ptr, a concurrent lookup will not find an uninitialised ID.
0023  *
0024  * The caller should provide their own locking to ensure that two
0025  * concurrent modifications to the IDR are not possible.  Read-only
0026  * accesses to the IDR may be done under the RCU read lock or may
0027  * exclude simultaneous writers.
0028  *
0029  * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
0030  * or -ENOSPC if no free IDs could be found.  If an error occurred,
0031  * @nextid is unchanged.
0032  */
0033 int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
0034             unsigned long max, gfp_t gfp)
0035 {
0036     struct radix_tree_iter iter;
0037     void __rcu **slot;
0038     unsigned int base = idr->idr_base;
0039     unsigned int id = *nextid;
0040 
0041     if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
0042         idr->idr_rt.xa_flags |= IDR_RT_MARKER;
0043 
0044     id = (id < base) ? 0 : id - base;
0045     radix_tree_iter_init(&iter, id);
0046     slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
0047     if (IS_ERR(slot))
0048         return PTR_ERR(slot);
0049 
0050     *nextid = iter.index + base;
0051     /* there is a memory barrier inside radix_tree_iter_replace() */
0052     radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
0053     radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
0054 
0055     return 0;
0056 }
0057 EXPORT_SYMBOL_GPL(idr_alloc_u32);
0058 
0059 /**
0060  * idr_alloc() - Allocate an ID.
0061  * @idr: IDR handle.
0062  * @ptr: Pointer to be associated with the new ID.
0063  * @start: The minimum ID (inclusive).
0064  * @end: The maximum ID (exclusive).
0065  * @gfp: Memory allocation flags.
0066  *
0067  * Allocates an unused ID in the range specified by @start and @end.  If
0068  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
0069  * callers to use @start + N as @end as long as N is within integer range.
0070  *
0071  * The caller should provide their own locking to ensure that two
0072  * concurrent modifications to the IDR are not possible.  Read-only
0073  * accesses to the IDR may be done under the RCU read lock or may
0074  * exclude simultaneous writers.
0075  *
0076  * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
0077  * or -ENOSPC if no free IDs could be found.
0078  */
0079 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
0080 {
0081     u32 id = start;
0082     int ret;
0083 
0084     if (WARN_ON_ONCE(start < 0))
0085         return -EINVAL;
0086 
0087     ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
0088     if (ret)
0089         return ret;
0090 
0091     return id;
0092 }
0093 EXPORT_SYMBOL_GPL(idr_alloc);
0094 
0095 /**
0096  * idr_alloc_cyclic() - Allocate an ID cyclically.
0097  * @idr: IDR handle.
0098  * @ptr: Pointer to be associated with the new ID.
0099  * @start: The minimum ID (inclusive).
0100  * @end: The maximum ID (exclusive).
0101  * @gfp: Memory allocation flags.
0102  *
0103  * Allocates an unused ID in the range specified by @nextid and @end.  If
0104  * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
0105  * callers to use @start + N as @end as long as N is within integer range.
0106  * The search for an unused ID will start at the last ID allocated and will
0107  * wrap around to @start if no free IDs are found before reaching @end.
0108  *
0109  * The caller should provide their own locking to ensure that two
0110  * concurrent modifications to the IDR are not possible.  Read-only
0111  * accesses to the IDR may be done under the RCU read lock or may
0112  * exclude simultaneous writers.
0113  *
0114  * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
0115  * or -ENOSPC if no free IDs could be found.
0116  */
0117 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
0118 {
0119     u32 id = idr->idr_next;
0120     int err, max = end > 0 ? end - 1 : INT_MAX;
0121 
0122     if ((int)id < start)
0123         id = start;
0124 
0125     err = idr_alloc_u32(idr, ptr, &id, max, gfp);
0126     if ((err == -ENOSPC) && (id > start)) {
0127         id = start;
0128         err = idr_alloc_u32(idr, ptr, &id, max, gfp);
0129     }
0130     if (err)
0131         return err;
0132 
0133     idr->idr_next = id + 1;
0134     return id;
0135 }
0136 EXPORT_SYMBOL(idr_alloc_cyclic);
0137 
0138 /**
0139  * idr_remove() - Remove an ID from the IDR.
0140  * @idr: IDR handle.
0141  * @id: Pointer ID.
0142  *
0143  * Removes this ID from the IDR.  If the ID was not previously in the IDR,
0144  * this function returns %NULL.
0145  *
0146  * Since this function modifies the IDR, the caller should provide their
0147  * own locking to ensure that concurrent modification of the same IDR is
0148  * not possible.
0149  *
0150  * Return: The pointer formerly associated with this ID.
0151  */
0152 void *idr_remove(struct idr *idr, unsigned long id)
0153 {
0154     return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
0155 }
0156 EXPORT_SYMBOL_GPL(idr_remove);
0157 
0158 /**
0159  * idr_find() - Return pointer for given ID.
0160  * @idr: IDR handle.
0161  * @id: Pointer ID.
0162  *
0163  * Looks up the pointer associated with this ID.  A %NULL pointer may
0164  * indicate that @id is not allocated or that the %NULL pointer was
0165  * associated with this ID.
0166  *
0167  * This function can be called under rcu_read_lock(), given that the leaf
0168  * pointers lifetimes are correctly managed.
0169  *
0170  * Return: The pointer associated with this ID.
0171  */
0172 void *idr_find(const struct idr *idr, unsigned long id)
0173 {
0174     return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
0175 }
0176 EXPORT_SYMBOL_GPL(idr_find);
0177 
0178 /**
0179  * idr_for_each() - Iterate through all stored pointers.
0180  * @idr: IDR handle.
0181  * @fn: Function to be called for each pointer.
0182  * @data: Data passed to callback function.
0183  *
0184  * The callback function will be called for each entry in @idr, passing
0185  * the ID, the entry and @data.
0186  *
0187  * If @fn returns anything other than %0, the iteration stops and that
0188  * value is returned from this function.
0189  *
0190  * idr_for_each() can be called concurrently with idr_alloc() and
0191  * idr_remove() if protected by RCU.  Newly added entries may not be
0192  * seen and deleted entries may be seen, but adding and removing entries
0193  * will not cause other entries to be skipped, nor spurious ones to be seen.
0194  */
0195 int idr_for_each(const struct idr *idr,
0196         int (*fn)(int id, void *p, void *data), void *data)
0197 {
0198     struct radix_tree_iter iter;
0199     void __rcu **slot;
0200     int base = idr->idr_base;
0201 
0202     radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
0203         int ret;
0204         unsigned long id = iter.index + base;
0205 
0206         if (WARN_ON_ONCE(id > INT_MAX))
0207             break;
0208         ret = fn(id, rcu_dereference_raw(*slot), data);
0209         if (ret)
0210             return ret;
0211     }
0212 
0213     return 0;
0214 }
0215 EXPORT_SYMBOL(idr_for_each);
0216 
0217 /**
0218  * idr_get_next_ul() - Find next populated entry.
0219  * @idr: IDR handle.
0220  * @nextid: Pointer to an ID.
0221  *
0222  * Returns the next populated entry in the tree with an ID greater than
0223  * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
0224  * to the ID of the found value.  To use in a loop, the value pointed to by
0225  * nextid must be incremented by the user.
0226  */
0227 void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
0228 {
0229     struct radix_tree_iter iter;
0230     void __rcu **slot;
0231     void *entry = NULL;
0232     unsigned long base = idr->idr_base;
0233     unsigned long id = *nextid;
0234 
0235     id = (id < base) ? 0 : id - base;
0236     radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
0237         entry = rcu_dereference_raw(*slot);
0238         if (!entry)
0239             continue;
0240         if (!xa_is_internal(entry))
0241             break;
0242         if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
0243             break;
0244         slot = radix_tree_iter_retry(&iter);
0245     }
0246     if (!slot)
0247         return NULL;
0248 
0249     *nextid = iter.index + base;
0250     return entry;
0251 }
0252 EXPORT_SYMBOL(idr_get_next_ul);
0253 
0254 /**
0255  * idr_get_next() - Find next populated entry.
0256  * @idr: IDR handle.
0257  * @nextid: Pointer to an ID.
0258  *
0259  * Returns the next populated entry in the tree with an ID greater than
0260  * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
0261  * to the ID of the found value.  To use in a loop, the value pointed to by
0262  * nextid must be incremented by the user.
0263  */
0264 void *idr_get_next(struct idr *idr, int *nextid)
0265 {
0266     unsigned long id = *nextid;
0267     void *entry = idr_get_next_ul(idr, &id);
0268 
0269     if (WARN_ON_ONCE(id > INT_MAX))
0270         return NULL;
0271     *nextid = id;
0272     return entry;
0273 }
0274 EXPORT_SYMBOL(idr_get_next);
0275 
0276 /**
0277  * idr_replace() - replace pointer for given ID.
0278  * @idr: IDR handle.
0279  * @ptr: New pointer to associate with the ID.
0280  * @id: ID to change.
0281  *
0282  * Replace the pointer registered with an ID and return the old value.
0283  * This function can be called under the RCU read lock concurrently with
0284  * idr_alloc() and idr_remove() (as long as the ID being removed is not
0285  * the one being replaced!).
0286  *
0287  * Returns: the old value on success.  %-ENOENT indicates that @id was not
0288  * found.  %-EINVAL indicates that @ptr was not valid.
0289  */
0290 void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
0291 {
0292     struct radix_tree_node *node;
0293     void __rcu **slot = NULL;
0294     void *entry;
0295 
0296     id -= idr->idr_base;
0297 
0298     entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
0299     if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
0300         return ERR_PTR(-ENOENT);
0301 
0302     __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
0303 
0304     return entry;
0305 }
0306 EXPORT_SYMBOL(idr_replace);
0307 
0308 /**
0309  * DOC: IDA description
0310  *
0311  * The IDA is an ID allocator which does not provide the ability to
0312  * associate an ID with a pointer.  As such, it only needs to store one
0313  * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
0314  * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
0315  * then initialise it using ida_init()).  To allocate a new ID, call
0316  * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
0317  * To free an ID, call ida_free().
0318  *
0319  * ida_destroy() can be used to dispose of an IDA without needing to
0320  * free the individual IDs in it.  You can use ida_is_empty() to find
0321  * out whether the IDA has any IDs currently allocated.
0322  *
0323  * The IDA handles its own locking.  It is safe to call any of the IDA
0324  * functions without synchronisation in your code.
0325  *
0326  * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
0327  * limitation, it should be quite straightforward to raise the maximum.
0328  */
0329 
0330 /*
0331  * Developer's notes:
0332  *
0333  * The IDA uses the functionality provided by the XArray to store bitmaps in
0334  * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
0335  * have been set.
0336  *
0337  * I considered telling the XArray that each slot is an order-10 node
0338  * and indexing by bit number, but the XArray can't allow a single multi-index
0339  * entry in the head, which would significantly increase memory consumption
0340  * for the IDA.  So instead we divide the index by the number of bits in the
0341  * leaf bitmap before doing a radix tree lookup.
0342  *
0343  * As an optimisation, if there are only a few low bits set in any given
0344  * leaf, instead of allocating a 128-byte bitmap, we store the bits
0345  * as a value entry.  Value entries never have the XA_FREE_MARK cleared
0346  * because we can always convert them into a bitmap entry.
0347  *
0348  * It would be possible to optimise further; once we've run out of a
0349  * single 128-byte bitmap, we currently switch to a 576-byte node, put
0350  * the 128-byte bitmap in the first entry and then start allocating extra
0351  * 128-byte entries.  We could instead use the 512 bytes of the node's
0352  * data as a bitmap before moving to that scheme.  I do not believe this
0353  * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
0354  * users of the IDA and almost none of them use more than 1024 entries.
0355  * Those that do use more than the 8192 IDs that the 512 bytes would
0356  * provide.
0357  *
0358  * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
0359  * equivalent, it will still need locking.  Going to RCU lookup would require
0360  * using RCU to free bitmaps, and that's not trivial without embedding an
0361  * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
0362  * bitmap, which is excessive.
0363  */
0364 
0365 /**
0366  * ida_alloc_range() - Allocate an unused ID.
0367  * @ida: IDA handle.
0368  * @min: Lowest ID to allocate.
0369  * @max: Highest ID to allocate.
0370  * @gfp: Memory allocation flags.
0371  *
0372  * Allocate an ID between @min and @max, inclusive.  The allocated ID will
0373  * not exceed %INT_MAX, even if @max is larger.
0374  *
0375  * Context: Any context. It is safe to call this function without
0376  * locking in your code.
0377  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
0378  * or %-ENOSPC if there are no free IDs.
0379  */
0380 int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
0381             gfp_t gfp)
0382 {
0383     XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
0384     unsigned bit = min % IDA_BITMAP_BITS;
0385     unsigned long flags;
0386     struct ida_bitmap *bitmap, *alloc = NULL;
0387 
0388     if ((int)min < 0)
0389         return -ENOSPC;
0390 
0391     if ((int)max < 0)
0392         max = INT_MAX;
0393 
0394 retry:
0395     xas_lock_irqsave(&xas, flags);
0396 next:
0397     bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
0398     if (xas.xa_index > min / IDA_BITMAP_BITS)
0399         bit = 0;
0400     if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
0401         goto nospc;
0402 
0403     if (xa_is_value(bitmap)) {
0404         unsigned long tmp = xa_to_value(bitmap);
0405 
0406         if (bit < BITS_PER_XA_VALUE) {
0407             bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
0408             if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
0409                 goto nospc;
0410             if (bit < BITS_PER_XA_VALUE) {
0411                 tmp |= 1UL << bit;
0412                 xas_store(&xas, xa_mk_value(tmp));
0413                 goto out;
0414             }
0415         }
0416         bitmap = alloc;
0417         if (!bitmap)
0418             bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
0419         if (!bitmap)
0420             goto alloc;
0421         bitmap->bitmap[0] = tmp;
0422         xas_store(&xas, bitmap);
0423         if (xas_error(&xas)) {
0424             bitmap->bitmap[0] = 0;
0425             goto out;
0426         }
0427     }
0428 
0429     if (bitmap) {
0430         bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
0431         if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
0432             goto nospc;
0433         if (bit == IDA_BITMAP_BITS)
0434             goto next;
0435 
0436         __set_bit(bit, bitmap->bitmap);
0437         if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
0438             xas_clear_mark(&xas, XA_FREE_MARK);
0439     } else {
0440         if (bit < BITS_PER_XA_VALUE) {
0441             bitmap = xa_mk_value(1UL << bit);
0442         } else {
0443             bitmap = alloc;
0444             if (!bitmap)
0445                 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
0446             if (!bitmap)
0447                 goto alloc;
0448             __set_bit(bit, bitmap->bitmap);
0449         }
0450         xas_store(&xas, bitmap);
0451     }
0452 out:
0453     xas_unlock_irqrestore(&xas, flags);
0454     if (xas_nomem(&xas, gfp)) {
0455         xas.xa_index = min / IDA_BITMAP_BITS;
0456         bit = min % IDA_BITMAP_BITS;
0457         goto retry;
0458     }
0459     if (bitmap != alloc)
0460         kfree(alloc);
0461     if (xas_error(&xas))
0462         return xas_error(&xas);
0463     return xas.xa_index * IDA_BITMAP_BITS + bit;
0464 alloc:
0465     xas_unlock_irqrestore(&xas, flags);
0466     alloc = kzalloc(sizeof(*bitmap), gfp);
0467     if (!alloc)
0468         return -ENOMEM;
0469     xas_set(&xas, min / IDA_BITMAP_BITS);
0470     bit = min % IDA_BITMAP_BITS;
0471     goto retry;
0472 nospc:
0473     xas_unlock_irqrestore(&xas, flags);
0474     kfree(alloc);
0475     return -ENOSPC;
0476 }
0477 EXPORT_SYMBOL(ida_alloc_range);
0478 
0479 /**
0480  * ida_free() - Release an allocated ID.
0481  * @ida: IDA handle.
0482  * @id: Previously allocated ID.
0483  *
0484  * Context: Any context. It is safe to call this function without
0485  * locking in your code.
0486  */
0487 void ida_free(struct ida *ida, unsigned int id)
0488 {
0489     XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
0490     unsigned bit = id % IDA_BITMAP_BITS;
0491     struct ida_bitmap *bitmap;
0492     unsigned long flags;
0493 
0494     if ((int)id < 0)
0495         return;
0496 
0497     xas_lock_irqsave(&xas, flags);
0498     bitmap = xas_load(&xas);
0499 
0500     if (xa_is_value(bitmap)) {
0501         unsigned long v = xa_to_value(bitmap);
0502         if (bit >= BITS_PER_XA_VALUE)
0503             goto err;
0504         if (!(v & (1UL << bit)))
0505             goto err;
0506         v &= ~(1UL << bit);
0507         if (!v)
0508             goto delete;
0509         xas_store(&xas, xa_mk_value(v));
0510     } else {
0511         if (!test_bit(bit, bitmap->bitmap))
0512             goto err;
0513         __clear_bit(bit, bitmap->bitmap);
0514         xas_set_mark(&xas, XA_FREE_MARK);
0515         if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
0516             kfree(bitmap);
0517 delete:
0518             xas_store(&xas, NULL);
0519         }
0520     }
0521     xas_unlock_irqrestore(&xas, flags);
0522     return;
0523  err:
0524     xas_unlock_irqrestore(&xas, flags);
0525     WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
0526 }
0527 EXPORT_SYMBOL(ida_free);
0528 
0529 /**
0530  * ida_destroy() - Free all IDs.
0531  * @ida: IDA handle.
0532  *
0533  * Calling this function frees all IDs and releases all resources used
0534  * by an IDA.  When this call returns, the IDA is empty and can be reused
0535  * or freed.  If the IDA is already empty, there is no need to call this
0536  * function.
0537  *
0538  * Context: Any context. It is safe to call this function without
0539  * locking in your code.
0540  */
0541 void ida_destroy(struct ida *ida)
0542 {
0543     XA_STATE(xas, &ida->xa, 0);
0544     struct ida_bitmap *bitmap;
0545     unsigned long flags;
0546 
0547     xas_lock_irqsave(&xas, flags);
0548     xas_for_each(&xas, bitmap, ULONG_MAX) {
0549         if (!xa_is_value(bitmap))
0550             kfree(bitmap);
0551         xas_store(&xas, NULL);
0552     }
0553     xas_unlock_irqrestore(&xas, flags);
0554 }
0555 EXPORT_SYMBOL(ida_destroy);
0556 
0557 #ifndef __KERNEL__
0558 extern void xa_dump_index(unsigned long index, unsigned int shift);
0559 #define IDA_CHUNK_SHIFT     ilog2(IDA_BITMAP_BITS)
0560 
0561 static void ida_dump_entry(void *entry, unsigned long index)
0562 {
0563     unsigned long i;
0564 
0565     if (!entry)
0566         return;
0567 
0568     if (xa_is_node(entry)) {
0569         struct xa_node *node = xa_to_node(entry);
0570         unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
0571             XA_CHUNK_SHIFT;
0572 
0573         xa_dump_index(index * IDA_BITMAP_BITS, shift);
0574         xa_dump_node(node);
0575         for (i = 0; i < XA_CHUNK_SIZE; i++)
0576             ida_dump_entry(node->slots[i],
0577                     index | (i << node->shift));
0578     } else if (xa_is_value(entry)) {
0579         xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
0580         pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
0581     } else {
0582         struct ida_bitmap *bitmap = entry;
0583 
0584         xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
0585         pr_cont("bitmap: %p data", bitmap);
0586         for (i = 0; i < IDA_BITMAP_LONGS; i++)
0587             pr_cont(" %lx", bitmap->bitmap[i]);
0588         pr_cont("\n");
0589     }
0590 }
0591 
0592 static void ida_dump(struct ida *ida)
0593 {
0594     struct xarray *xa = &ida->xa;
0595     pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
0596                 xa->xa_flags >> ROOT_TAG_SHIFT);
0597     ida_dump_entry(xa->xa_head, 0);
0598 }
0599 #endif