Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * include/linux/idr.h
0004  * 
0005  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
0006  *  Copyright (C) 2002 by Concurrent Computer Corporation
0007  *
0008  * Small id to pointer translation service avoiding fixed sized
0009  * tables.
0010  */
0011 
0012 #ifndef __IDR_H__
0013 #define __IDR_H__
0014 
0015 #include <linux/radix-tree.h>
0016 #include <linux/gfp.h>
0017 #include <linux/percpu.h>
0018 
0019 struct idr {
0020     struct radix_tree_root  idr_rt;
0021     unsigned int        idr_base;
0022     unsigned int        idr_next;
0023 };
0024 
0025 /*
0026  * The IDR API does not expose the tagging functionality of the radix tree
0027  * to users.  Use tag 0 to track whether a node has free space below it.
0028  */
0029 #define IDR_FREE    0
0030 
0031 /* Set the IDR flag and the IDR_FREE tag */
0032 #define IDR_RT_MARKER   (ROOT_IS_IDR | (__force gfp_t)          \
0033                     (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
0034 
0035 #define IDR_INIT_BASE(name, base) {                 \
0036     .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER),         \
0037     .idr_base = (base),                     \
0038     .idr_next = 0,                          \
0039 }
0040 
0041 /**
0042  * IDR_INIT() - Initialise an IDR.
0043  * @name: Name of IDR.
0044  *
0045  * A freshly-initialised IDR contains no IDs.
0046  */
0047 #define IDR_INIT(name)  IDR_INIT_BASE(name, 0)
0048 
0049 /**
0050  * DEFINE_IDR() - Define a statically-allocated IDR.
0051  * @name: Name of IDR.
0052  *
0053  * An IDR defined using this macro is ready for use with no additional
0054  * initialisation required.  It contains no IDs.
0055  */
0056 #define DEFINE_IDR(name)    struct idr name = IDR_INIT(name)
0057 
0058 /**
0059  * idr_get_cursor - Return the current position of the cyclic allocator
0060  * @idr: idr handle
0061  *
0062  * The value returned is the value that will be next returned from
0063  * idr_alloc_cyclic() if it is free (otherwise the search will start from
0064  * this position).
0065  */
0066 static inline unsigned int idr_get_cursor(const struct idr *idr)
0067 {
0068     return READ_ONCE(idr->idr_next);
0069 }
0070 
0071 /**
0072  * idr_set_cursor - Set the current position of the cyclic allocator
0073  * @idr: idr handle
0074  * @val: new position
0075  *
0076  * The next call to idr_alloc_cyclic() will return @val if it is free
0077  * (otherwise the search will start from this position).
0078  */
0079 static inline void idr_set_cursor(struct idr *idr, unsigned int val)
0080 {
0081     WRITE_ONCE(idr->idr_next, val);
0082 }
0083 
0084 /**
0085  * DOC: idr sync
0086  * idr synchronization (stolen from radix-tree.h)
0087  *
0088  * idr_find() is able to be called locklessly, using RCU. The caller must
0089  * ensure calls to this function are made within rcu_read_lock() regions.
0090  * Other readers (lock-free or otherwise) and modifications may be running
0091  * concurrently.
0092  *
0093  * It is still required that the caller manage the synchronization and
0094  * lifetimes of the items. So if RCU lock-free lookups are used, typically
0095  * this would mean that the items have their own locks, or are amenable to
0096  * lock-free access; and that the items are freed by RCU (or only freed after
0097  * having been deleted from the idr tree *and* a synchronize_rcu() grace
0098  * period).
0099  */
0100 
0101 #define idr_lock(idr)       xa_lock(&(idr)->idr_rt)
0102 #define idr_unlock(idr)     xa_unlock(&(idr)->idr_rt)
0103 #define idr_lock_bh(idr)    xa_lock_bh(&(idr)->idr_rt)
0104 #define idr_unlock_bh(idr)  xa_unlock_bh(&(idr)->idr_rt)
0105 #define idr_lock_irq(idr)   xa_lock_irq(&(idr)->idr_rt)
0106 #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
0107 #define idr_lock_irqsave(idr, flags) \
0108                 xa_lock_irqsave(&(idr)->idr_rt, flags)
0109 #define idr_unlock_irqrestore(idr, flags) \
0110                 xa_unlock_irqrestore(&(idr)->idr_rt, flags)
0111 
0112 void idr_preload(gfp_t gfp_mask);
0113 
0114 int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
0115 int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
0116                 unsigned long max, gfp_t);
0117 int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
0118 void *idr_remove(struct idr *, unsigned long id);
0119 void *idr_find(const struct idr *, unsigned long id);
0120 int idr_for_each(const struct idr *,
0121          int (*fn)(int id, void *p, void *data), void *data);
0122 void *idr_get_next(struct idr *, int *nextid);
0123 void *idr_get_next_ul(struct idr *, unsigned long *nextid);
0124 void *idr_replace(struct idr *, void *, unsigned long id);
0125 void idr_destroy(struct idr *);
0126 
0127 /**
0128  * idr_init_base() - Initialise an IDR.
0129  * @idr: IDR handle.
0130  * @base: The base value for the IDR.
0131  *
0132  * This variation of idr_init() creates an IDR which will allocate IDs
0133  * starting at %base.
0134  */
0135 static inline void idr_init_base(struct idr *idr, int base)
0136 {
0137     INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
0138     idr->idr_base = base;
0139     idr->idr_next = 0;
0140 }
0141 
0142 /**
0143  * idr_init() - Initialise an IDR.
0144  * @idr: IDR handle.
0145  *
0146  * Initialise a dynamically allocated IDR.  To initialise a
0147  * statically allocated IDR, use DEFINE_IDR().
0148  */
0149 static inline void idr_init(struct idr *idr)
0150 {
0151     idr_init_base(idr, 0);
0152 }
0153 
0154 /**
0155  * idr_is_empty() - Are there any IDs allocated?
0156  * @idr: IDR handle.
0157  *
0158  * Return: %true if any IDs have been allocated from this IDR.
0159  */
0160 static inline bool idr_is_empty(const struct idr *idr)
0161 {
0162     return radix_tree_empty(&idr->idr_rt) &&
0163         radix_tree_tagged(&idr->idr_rt, IDR_FREE);
0164 }
0165 
0166 /**
0167  * idr_preload_end - end preload section started with idr_preload()
0168  *
0169  * Each idr_preload() should be matched with an invocation of this
0170  * function.  See idr_preload() for details.
0171  */
0172 static inline void idr_preload_end(void)
0173 {
0174     local_unlock(&radix_tree_preloads.lock);
0175 }
0176 
0177 /**
0178  * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
0179  * @idr: IDR handle.
0180  * @entry: The type * to use as cursor
0181  * @id: Entry ID.
0182  *
0183  * @entry and @id do not need to be initialized before the loop, and
0184  * after normal termination @entry is left with the value NULL.  This
0185  * is convenient for a "not found" value.
0186  */
0187 #define idr_for_each_entry(idr, entry, id)          \
0188     for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
0189 
0190 /**
0191  * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
0192  * @idr: IDR handle.
0193  * @entry: The type * to use as cursor.
0194  * @tmp: A temporary placeholder for ID.
0195  * @id: Entry ID.
0196  *
0197  * @entry and @id do not need to be initialized before the loop, and
0198  * after normal termination @entry is left with the value NULL.  This
0199  * is convenient for a "not found" value.
0200  */
0201 #define idr_for_each_entry_ul(idr, entry, tmp, id)          \
0202     for (tmp = 0, id = 0;                       \
0203          tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
0204          tmp = id, ++id)
0205 
0206 /**
0207  * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
0208  * @idr: IDR handle.
0209  * @entry: The type * to use as a cursor.
0210  * @id: Entry ID.
0211  *
0212  * Continue to iterate over entries, continuing after the current position.
0213  */
0214 #define idr_for_each_entry_continue(idr, entry, id)         \
0215     for ((entry) = idr_get_next((idr), &(id));          \
0216          entry;                         \
0217          ++id, (entry) = idr_get_next((idr), &(id)))
0218 
0219 /**
0220  * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
0221  * @idr: IDR handle.
0222  * @entry: The type * to use as a cursor.
0223  * @tmp: A temporary placeholder for ID.
0224  * @id: Entry ID.
0225  *
0226  * Continue to iterate over entries, continuing after the current position.
0227  */
0228 #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)     \
0229     for (tmp = id;                          \
0230          tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
0231          tmp = id, ++id)
0232 
0233 /*
0234  * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
0235  */
0236 #define IDA_CHUNK_SIZE      128 /* 128 bytes per chunk */
0237 #define IDA_BITMAP_LONGS    (IDA_CHUNK_SIZE / sizeof(long))
0238 #define IDA_BITMAP_BITS     (IDA_BITMAP_LONGS * sizeof(long) * 8)
0239 
0240 struct ida_bitmap {
0241     unsigned long       bitmap[IDA_BITMAP_LONGS];
0242 };
0243 
0244 struct ida {
0245     struct xarray xa;
0246 };
0247 
0248 #define IDA_INIT_FLAGS  (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
0249 
0250 #define IDA_INIT(name)  {                       \
0251     .xa = XARRAY_INIT(name, IDA_INIT_FLAGS)             \
0252 }
0253 #define DEFINE_IDA(name)    struct ida name = IDA_INIT(name)
0254 
0255 int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
0256 void ida_free(struct ida *, unsigned int id);
0257 void ida_destroy(struct ida *ida);
0258 
0259 /**
0260  * ida_alloc() - Allocate an unused ID.
0261  * @ida: IDA handle.
0262  * @gfp: Memory allocation flags.
0263  *
0264  * Allocate an ID between 0 and %INT_MAX, inclusive.
0265  *
0266  * Context: Any context. It is safe to call this function without
0267  * locking in your code.
0268  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
0269  * or %-ENOSPC if there are no free IDs.
0270  */
0271 static inline int ida_alloc(struct ida *ida, gfp_t gfp)
0272 {
0273     return ida_alloc_range(ida, 0, ~0, gfp);
0274 }
0275 
0276 /**
0277  * ida_alloc_min() - Allocate an unused ID.
0278  * @ida: IDA handle.
0279  * @min: Lowest ID to allocate.
0280  * @gfp: Memory allocation flags.
0281  *
0282  * Allocate an ID between @min and %INT_MAX, inclusive.
0283  *
0284  * Context: Any context. It is safe to call this function without
0285  * locking in your code.
0286  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
0287  * or %-ENOSPC if there are no free IDs.
0288  */
0289 static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
0290 {
0291     return ida_alloc_range(ida, min, ~0, gfp);
0292 }
0293 
0294 /**
0295  * ida_alloc_max() - Allocate an unused ID.
0296  * @ida: IDA handle.
0297  * @max: Highest ID to allocate.
0298  * @gfp: Memory allocation flags.
0299  *
0300  * Allocate an ID between 0 and @max, inclusive.
0301  *
0302  * Context: Any context. It is safe to call this function without
0303  * locking in your code.
0304  * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
0305  * or %-ENOSPC if there are no free IDs.
0306  */
0307 static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
0308 {
0309     return ida_alloc_range(ida, 0, max, gfp);
0310 }
0311 
0312 static inline void ida_init(struct ida *ida)
0313 {
0314     xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
0315 }
0316 
0317 /*
0318  * ida_simple_get() and ida_simple_remove() are deprecated. Use
0319  * ida_alloc() and ida_free() instead respectively.
0320  */
0321 #define ida_simple_get(ida, start, end, gfp)    \
0322             ida_alloc_range(ida, start, (end) - 1, gfp)
0323 #define ida_simple_remove(ida, id)  ida_free(ida, id)
0324 
0325 static inline bool ida_is_empty(const struct ida *ida)
0326 {
0327     return xa_empty(&ida->xa);
0328 }
0329 #endif /* __IDR_H__ */