0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #ifndef __IDR_H__
0013 #define __IDR_H__
0014
0015 #include <linux/radix-tree.h>
0016 #include <linux/gfp.h>
0017 #include <linux/percpu.h>
0018
0019 struct idr {
0020 struct radix_tree_root idr_rt;
0021 unsigned int idr_base;
0022 unsigned int idr_next;
0023 };
0024
0025
0026
0027
0028
0029 #define IDR_FREE 0
0030
0031
0032 #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
0033 (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
0034
0035 #define IDR_INIT_BASE(name, base) { \
0036 .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
0037 .idr_base = (base), \
0038 .idr_next = 0, \
0039 }
0040
0041
0042
0043
0044
0045
0046
0047 #define IDR_INIT(name) IDR_INIT_BASE(name, 0)
0048
0049
0050
0051
0052
0053
0054
0055
0056 #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 static inline unsigned int idr_get_cursor(const struct idr *idr)
0067 {
0068 return READ_ONCE(idr->idr_next);
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079 static inline void idr_set_cursor(struct idr *idr, unsigned int val)
0080 {
0081 WRITE_ONCE(idr->idr_next, val);
0082 }
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 #define idr_lock(idr) xa_lock(&(idr)->idr_rt)
0102 #define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
0103 #define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
0104 #define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
0105 #define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
0106 #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
0107 #define idr_lock_irqsave(idr, flags) \
0108 xa_lock_irqsave(&(idr)->idr_rt, flags)
0109 #define idr_unlock_irqrestore(idr, flags) \
0110 xa_unlock_irqrestore(&(idr)->idr_rt, flags)
0111
0112 void idr_preload(gfp_t gfp_mask);
0113
0114 int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
0115 int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
0116 unsigned long max, gfp_t);
0117 int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
0118 void *idr_remove(struct idr *, unsigned long id);
0119 void *idr_find(const struct idr *, unsigned long id);
0120 int idr_for_each(const struct idr *,
0121 int (*fn)(int id, void *p, void *data), void *data);
0122 void *idr_get_next(struct idr *, int *nextid);
0123 void *idr_get_next_ul(struct idr *, unsigned long *nextid);
0124 void *idr_replace(struct idr *, void *, unsigned long id);
0125 void idr_destroy(struct idr *);
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135 static inline void idr_init_base(struct idr *idr, int base)
0136 {
0137 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
0138 idr->idr_base = base;
0139 idr->idr_next = 0;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149 static inline void idr_init(struct idr *idr)
0150 {
0151 idr_init_base(idr, 0);
0152 }
0153
0154
0155
0156
0157
0158
0159
0160 static inline bool idr_is_empty(const struct idr *idr)
0161 {
0162 return radix_tree_empty(&idr->idr_rt) &&
0163 radix_tree_tagged(&idr->idr_rt, IDR_FREE);
0164 }
0165
0166
0167
0168
0169
0170
0171
0172 static inline void idr_preload_end(void)
0173 {
0174 local_unlock(&radix_tree_preloads.lock);
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 #define idr_for_each_entry(idr, entry, id) \
0188 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201 #define idr_for_each_entry_ul(idr, entry, tmp, id) \
0202 for (tmp = 0, id = 0; \
0203 tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
0204 tmp = id, ++id)
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 #define idr_for_each_entry_continue(idr, entry, id) \
0215 for ((entry) = idr_get_next((idr), &(id)); \
0216 entry; \
0217 ++id, (entry) = idr_get_next((idr), &(id)))
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
0229 for (tmp = id; \
0230 tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
0231 tmp = id, ++id)
0232
0233
0234
0235
0236 #define IDA_CHUNK_SIZE 128
0237 #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
0238 #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
0239
0240 struct ida_bitmap {
0241 unsigned long bitmap[IDA_BITMAP_LONGS];
0242 };
0243
0244 struct ida {
0245 struct xarray xa;
0246 };
0247
0248 #define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
0249
0250 #define IDA_INIT(name) { \
0251 .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
0252 }
0253 #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
0254
0255 int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
0256 void ida_free(struct ida *, unsigned int id);
0257 void ida_destroy(struct ida *ida);
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271 static inline int ida_alloc(struct ida *ida, gfp_t gfp)
0272 {
0273 return ida_alloc_range(ida, 0, ~0, gfp);
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
0290 {
0291 return ida_alloc_range(ida, min, ~0, gfp);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307 static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
0308 {
0309 return ida_alloc_range(ida, 0, max, gfp);
0310 }
0311
0312 static inline void ida_init(struct ida *ida)
0313 {
0314 xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
0315 }
0316
0317
0318
0319
0320
0321 #define ida_simple_get(ida, start, end, gfp) \
0322 ida_alloc_range(ida, start, (end) - 1, gfp)
0323 #define ida_simple_remove(ida, id) ida_free(ida, id)
0324
0325 static inline bool ida_is_empty(const struct ida *ida)
0326 {
0327 return xa_empty(&ida->xa);
0328 }
0329 #endif