Back to home page

LXR

 
 

    


0001 /*
0002  * Basic general purpose allocator for managing special purpose
0003  * memory, for example, memory that is not managed by the regular
0004  * kmalloc/kfree interface.  Uses for this includes on-device special
0005  * memory, uncached memory etc.
0006  *
0007  * It is safe to use the allocator in NMI handlers and other special
0008  * unblockable contexts that could otherwise deadlock on locks.  This
0009  * is implemented by using atomic operations and retries on any
0010  * conflicts.  The disadvantage is that there may be livelocks in
0011  * extreme cases.  For better scalability, one allocator can be used
0012  * for each CPU.
0013  *
0014  * The lockless operation only works if there is enough memory
0015  * available.  If new memory is added to the pool a lock has to be
0016  * still taken.  So any user relying on locklessness has to ensure
0017  * that sufficient memory is preallocated.
0018  *
0019  * The basic atomic operation of this allocator is cmpxchg on long.
0020  * On architectures that don't have NMI-safe cmpxchg implementation,
0021  * the allocator can NOT be used in NMI handler.  So code uses the
0022  * allocator in NMI handler should depend on
0023  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
0024  *
0025  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
0026  *
0027  * This source code is licensed under the GNU General Public License,
0028  * Version 2.  See the file COPYING for more details.
0029  */
0030 
0031 #include <linux/slab.h>
0032 #include <linux/export.h>
0033 #include <linux/bitmap.h>
0034 #include <linux/rculist.h>
0035 #include <linux/interrupt.h>
0036 #include <linux/genalloc.h>
0037 #include <linux/of_device.h>
0038 
0039 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
0040 {
0041     return chunk->end_addr - chunk->start_addr + 1;
0042 }
0043 
0044 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
0045 {
0046     unsigned long val, nval;
0047 
0048     nval = *addr;
0049     do {
0050         val = nval;
0051         if (val & mask_to_set)
0052             return -EBUSY;
0053         cpu_relax();
0054     } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
0055 
0056     return 0;
0057 }
0058 
0059 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
0060 {
0061     unsigned long val, nval;
0062 
0063     nval = *addr;
0064     do {
0065         val = nval;
0066         if ((val & mask_to_clear) != mask_to_clear)
0067             return -EBUSY;
0068         cpu_relax();
0069     } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
0070 
0071     return 0;
0072 }
0073 
0074 /*
0075  * bitmap_set_ll - set the specified number of bits at the specified position
0076  * @map: pointer to a bitmap
0077  * @start: a bit position in @map
0078  * @nr: number of bits to set
0079  *
0080  * Set @nr bits start from @start in @map lock-lessly. Several users
0081  * can set/clear the same bitmap simultaneously without lock. If two
0082  * users set the same bit, one user will return remain bits, otherwise
0083  * return 0.
0084  */
0085 static int bitmap_set_ll(unsigned long *map, int start, int nr)
0086 {
0087     unsigned long *p = map + BIT_WORD(start);
0088     const int size = start + nr;
0089     int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
0090     unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
0091 
0092     while (nr - bits_to_set >= 0) {
0093         if (set_bits_ll(p, mask_to_set))
0094             return nr;
0095         nr -= bits_to_set;
0096         bits_to_set = BITS_PER_LONG;
0097         mask_to_set = ~0UL;
0098         p++;
0099     }
0100     if (nr) {
0101         mask_to_set &= BITMAP_LAST_WORD_MASK(size);
0102         if (set_bits_ll(p, mask_to_set))
0103             return nr;
0104     }
0105 
0106     return 0;
0107 }
0108 
0109 /*
0110  * bitmap_clear_ll - clear the specified number of bits at the specified position
0111  * @map: pointer to a bitmap
0112  * @start: a bit position in @map
0113  * @nr: number of bits to set
0114  *
0115  * Clear @nr bits start from @start in @map lock-lessly. Several users
0116  * can set/clear the same bitmap simultaneously without lock. If two
0117  * users clear the same bit, one user will return remain bits,
0118  * otherwise return 0.
0119  */
0120 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
0121 {
0122     unsigned long *p = map + BIT_WORD(start);
0123     const int size = start + nr;
0124     int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
0125     unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
0126 
0127     while (nr - bits_to_clear >= 0) {
0128         if (clear_bits_ll(p, mask_to_clear))
0129             return nr;
0130         nr -= bits_to_clear;
0131         bits_to_clear = BITS_PER_LONG;
0132         mask_to_clear = ~0UL;
0133         p++;
0134     }
0135     if (nr) {
0136         mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
0137         if (clear_bits_ll(p, mask_to_clear))
0138             return nr;
0139     }
0140 
0141     return 0;
0142 }
0143 
0144 /**
0145  * gen_pool_create - create a new special memory pool
0146  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
0147  * @nid: node id of the node the pool structure should be allocated on, or -1
0148  *
0149  * Create a new special memory pool that can be used to manage special purpose
0150  * memory not managed by the regular kmalloc/kfree interface.
0151  */
0152 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
0153 {
0154     struct gen_pool *pool;
0155 
0156     pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
0157     if (pool != NULL) {
0158         spin_lock_init(&pool->lock);
0159         INIT_LIST_HEAD(&pool->chunks);
0160         pool->min_alloc_order = min_alloc_order;
0161         pool->algo = gen_pool_first_fit;
0162         pool->data = NULL;
0163         pool->name = NULL;
0164     }
0165     return pool;
0166 }
0167 EXPORT_SYMBOL(gen_pool_create);
0168 
0169 /**
0170  * gen_pool_add_virt - add a new chunk of special memory to the pool
0171  * @pool: pool to add new memory chunk to
0172  * @virt: virtual starting address of memory chunk to add to pool
0173  * @phys: physical starting address of memory chunk to add to pool
0174  * @size: size in bytes of the memory chunk to add to pool
0175  * @nid: node id of the node the chunk structure and bitmap should be
0176  *       allocated on, or -1
0177  *
0178  * Add a new chunk of special memory to the specified pool.
0179  *
0180  * Returns 0 on success or a -ve errno on failure.
0181  */
0182 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
0183          size_t size, int nid)
0184 {
0185     struct gen_pool_chunk *chunk;
0186     int nbits = size >> pool->min_alloc_order;
0187     int nbytes = sizeof(struct gen_pool_chunk) +
0188                 BITS_TO_LONGS(nbits) * sizeof(long);
0189 
0190     chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
0191     if (unlikely(chunk == NULL))
0192         return -ENOMEM;
0193 
0194     chunk->phys_addr = phys;
0195     chunk->start_addr = virt;
0196     chunk->end_addr = virt + size - 1;
0197     atomic_set(&chunk->avail, size);
0198 
0199     spin_lock(&pool->lock);
0200     list_add_rcu(&chunk->next_chunk, &pool->chunks);
0201     spin_unlock(&pool->lock);
0202 
0203     return 0;
0204 }
0205 EXPORT_SYMBOL(gen_pool_add_virt);
0206 
0207 /**
0208  * gen_pool_virt_to_phys - return the physical address of memory
0209  * @pool: pool to allocate from
0210  * @addr: starting address of memory
0211  *
0212  * Returns the physical address on success, or -1 on error.
0213  */
0214 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
0215 {
0216     struct gen_pool_chunk *chunk;
0217     phys_addr_t paddr = -1;
0218 
0219     rcu_read_lock();
0220     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0221         if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
0222             paddr = chunk->phys_addr + (addr - chunk->start_addr);
0223             break;
0224         }
0225     }
0226     rcu_read_unlock();
0227 
0228     return paddr;
0229 }
0230 EXPORT_SYMBOL(gen_pool_virt_to_phys);
0231 
0232 /**
0233  * gen_pool_destroy - destroy a special memory pool
0234  * @pool: pool to destroy
0235  *
0236  * Destroy the specified special memory pool. Verifies that there are no
0237  * outstanding allocations.
0238  */
0239 void gen_pool_destroy(struct gen_pool *pool)
0240 {
0241     struct list_head *_chunk, *_next_chunk;
0242     struct gen_pool_chunk *chunk;
0243     int order = pool->min_alloc_order;
0244     int bit, end_bit;
0245 
0246     list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
0247         chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
0248         list_del(&chunk->next_chunk);
0249 
0250         end_bit = chunk_size(chunk) >> order;
0251         bit = find_next_bit(chunk->bits, end_bit, 0);
0252         BUG_ON(bit < end_bit);
0253 
0254         kfree(chunk);
0255     }
0256     kfree_const(pool->name);
0257     kfree(pool);
0258 }
0259 EXPORT_SYMBOL(gen_pool_destroy);
0260 
0261 /**
0262  * gen_pool_alloc - allocate special memory from the pool
0263  * @pool: pool to allocate from
0264  * @size: number of bytes to allocate from the pool
0265  *
0266  * Allocate the requested number of bytes from the specified pool.
0267  * Uses the pool allocation function (with first-fit algorithm by default).
0268  * Can not be used in NMI handler on architectures without
0269  * NMI-safe cmpxchg implementation.
0270  */
0271 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
0272 {
0273     return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
0274 }
0275 EXPORT_SYMBOL(gen_pool_alloc);
0276 
0277 /**
0278  * gen_pool_alloc_algo - allocate special memory from the pool
0279  * @pool: pool to allocate from
0280  * @size: number of bytes to allocate from the pool
0281  * @algo: algorithm passed from caller
0282  * @data: data passed to algorithm
0283  *
0284  * Allocate the requested number of bytes from the specified pool.
0285  * Uses the pool allocation function (with first-fit algorithm by default).
0286  * Can not be used in NMI handler on architectures without
0287  * NMI-safe cmpxchg implementation.
0288  */
0289 unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
0290         genpool_algo_t algo, void *data)
0291 {
0292     struct gen_pool_chunk *chunk;
0293     unsigned long addr = 0;
0294     int order = pool->min_alloc_order;
0295     int nbits, start_bit, end_bit, remain;
0296 
0297 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
0298     BUG_ON(in_nmi());
0299 #endif
0300 
0301     if (size == 0)
0302         return 0;
0303 
0304     nbits = (size + (1UL << order) - 1) >> order;
0305     rcu_read_lock();
0306     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0307         if (size > atomic_read(&chunk->avail))
0308             continue;
0309 
0310         start_bit = 0;
0311         end_bit = chunk_size(chunk) >> order;
0312 retry:
0313         start_bit = algo(chunk->bits, end_bit, start_bit,
0314                  nbits, data, pool);
0315         if (start_bit >= end_bit)
0316             continue;
0317         remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
0318         if (remain) {
0319             remain = bitmap_clear_ll(chunk->bits, start_bit,
0320                          nbits - remain);
0321             BUG_ON(remain);
0322             goto retry;
0323         }
0324 
0325         addr = chunk->start_addr + ((unsigned long)start_bit << order);
0326         size = nbits << order;
0327         atomic_sub(size, &chunk->avail);
0328         break;
0329     }
0330     rcu_read_unlock();
0331     return addr;
0332 }
0333 EXPORT_SYMBOL(gen_pool_alloc_algo);
0334 
0335 /**
0336  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
0337  * @pool: pool to allocate from
0338  * @size: number of bytes to allocate from the pool
0339  * @dma: dma-view physical address return value.  Use NULL if unneeded.
0340  *
0341  * Allocate the requested number of bytes from the specified pool.
0342  * Uses the pool allocation function (with first-fit algorithm by default).
0343  * Can not be used in NMI handler on architectures without
0344  * NMI-safe cmpxchg implementation.
0345  */
0346 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
0347 {
0348     unsigned long vaddr;
0349 
0350     if (!pool)
0351         return NULL;
0352 
0353     vaddr = gen_pool_alloc(pool, size);
0354     if (!vaddr)
0355         return NULL;
0356 
0357     if (dma)
0358         *dma = gen_pool_virt_to_phys(pool, vaddr);
0359 
0360     return (void *)vaddr;
0361 }
0362 EXPORT_SYMBOL(gen_pool_dma_alloc);
0363 
0364 /**
0365  * gen_pool_free - free allocated special memory back to the pool
0366  * @pool: pool to free to
0367  * @addr: starting address of memory to free back to pool
0368  * @size: size in bytes of memory to free
0369  *
0370  * Free previously allocated special memory back to the specified
0371  * pool.  Can not be used in NMI handler on architectures without
0372  * NMI-safe cmpxchg implementation.
0373  */
0374 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
0375 {
0376     struct gen_pool_chunk *chunk;
0377     int order = pool->min_alloc_order;
0378     int start_bit, nbits, remain;
0379 
0380 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
0381     BUG_ON(in_nmi());
0382 #endif
0383 
0384     nbits = (size + (1UL << order) - 1) >> order;
0385     rcu_read_lock();
0386     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
0387         if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
0388             BUG_ON(addr + size - 1 > chunk->end_addr);
0389             start_bit = (addr - chunk->start_addr) >> order;
0390             remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
0391             BUG_ON(remain);
0392             size = nbits << order;
0393             atomic_add(size, &chunk->avail);
0394             rcu_read_unlock();
0395             return;
0396         }
0397     }
0398     rcu_read_unlock();
0399     BUG();
0400 }
0401 EXPORT_SYMBOL(gen_pool_free);
0402 
0403 /**
0404  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
0405  * @pool:   the generic memory pool
0406  * @func:   func to call
0407  * @data:   additional data used by @func
0408  *
0409  * Call @func for every chunk of generic memory pool.  The @func is
0410  * called with rcu_read_lock held.
0411  */
0412 void gen_pool_for_each_chunk(struct gen_pool *pool,
0413     void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
0414     void *data)
0415 {
0416     struct gen_pool_chunk *chunk;
0417 
0418     rcu_read_lock();
0419     list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
0420         func(pool, chunk, data);
0421     rcu_read_unlock();
0422 }
0423 EXPORT_SYMBOL(gen_pool_for_each_chunk);
0424 
0425 /**
0426  * addr_in_gen_pool - checks if an address falls within the range of a pool
0427  * @pool:   the generic memory pool
0428  * @start:  start address
0429  * @size:   size of the region
0430  *
0431  * Check if the range of addresses falls within the specified pool. Returns
0432  * true if the entire range is contained in the pool and false otherwise.
0433  */
0434 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
0435             size_t size)
0436 {
0437     bool found = false;
0438     unsigned long end = start + size - 1;
0439     struct gen_pool_chunk *chunk;
0440 
0441     rcu_read_lock();
0442     list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
0443         if (start >= chunk->start_addr && start <= chunk->end_addr) {
0444             if (end <= chunk->end_addr) {
0445                 found = true;
0446                 break;
0447             }
0448         }
0449     }
0450     rcu_read_unlock();
0451     return found;
0452 }
0453 
0454 /**
0455  * gen_pool_avail - get available free space of the pool
0456  * @pool: pool to get available free space
0457  *
0458  * Return available free space of the specified pool.
0459  */
0460 size_t gen_pool_avail(struct gen_pool *pool)
0461 {
0462     struct gen_pool_chunk *chunk;
0463     size_t avail = 0;
0464 
0465     rcu_read_lock();
0466     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
0467         avail += atomic_read(&chunk->avail);
0468     rcu_read_unlock();
0469     return avail;
0470 }
0471 EXPORT_SYMBOL_GPL(gen_pool_avail);
0472 
0473 /**
0474  * gen_pool_size - get size in bytes of memory managed by the pool
0475  * @pool: pool to get size
0476  *
0477  * Return size in bytes of memory managed by the pool.
0478  */
0479 size_t gen_pool_size(struct gen_pool *pool)
0480 {
0481     struct gen_pool_chunk *chunk;
0482     size_t size = 0;
0483 
0484     rcu_read_lock();
0485     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
0486         size += chunk_size(chunk);
0487     rcu_read_unlock();
0488     return size;
0489 }
0490 EXPORT_SYMBOL_GPL(gen_pool_size);
0491 
0492 /**
0493  * gen_pool_set_algo - set the allocation algorithm
0494  * @pool: pool to change allocation algorithm
0495  * @algo: custom algorithm function
0496  * @data: additional data used by @algo
0497  *
0498  * Call @algo for each memory allocation in the pool.
0499  * If @algo is NULL use gen_pool_first_fit as default
0500  * memory allocation function.
0501  */
0502 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
0503 {
0504     rcu_read_lock();
0505 
0506     pool->algo = algo;
0507     if (!pool->algo)
0508         pool->algo = gen_pool_first_fit;
0509 
0510     pool->data = data;
0511 
0512     rcu_read_unlock();
0513 }
0514 EXPORT_SYMBOL(gen_pool_set_algo);
0515 
0516 /**
0517  * gen_pool_first_fit - find the first available region
0518  * of memory matching the size requirement (no alignment constraint)
0519  * @map: The address to base the search on
0520  * @size: The bitmap size in bits
0521  * @start: The bitnumber to start searching at
0522  * @nr: The number of zeroed bits we're looking for
0523  * @data: additional data - unused
0524  * @pool: pool to find the fit region memory from
0525  */
0526 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
0527         unsigned long start, unsigned int nr, void *data,
0528         struct gen_pool *pool)
0529 {
0530     return bitmap_find_next_zero_area(map, size, start, nr, 0);
0531 }
0532 EXPORT_SYMBOL(gen_pool_first_fit);
0533 
0534 /**
0535  * gen_pool_first_fit_align - find the first available region
0536  * of memory matching the size requirement (alignment constraint)
0537  * @map: The address to base the search on
0538  * @size: The bitmap size in bits
0539  * @start: The bitnumber to start searching at
0540  * @nr: The number of zeroed bits we're looking for
0541  * @data: data for alignment
0542  * @pool: pool to get order from
0543  */
0544 unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
0545         unsigned long start, unsigned int nr, void *data,
0546         struct gen_pool *pool)
0547 {
0548     struct genpool_data_align *alignment;
0549     unsigned long align_mask;
0550     int order;
0551 
0552     alignment = data;
0553     order = pool->min_alloc_order;
0554     align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
0555     return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
0556 }
0557 EXPORT_SYMBOL(gen_pool_first_fit_align);
0558 
0559 /**
0560  * gen_pool_fixed_alloc - reserve a specific region
0561  * @map: The address to base the search on
0562  * @size: The bitmap size in bits
0563  * @start: The bitnumber to start searching at
0564  * @nr: The number of zeroed bits we're looking for
0565  * @data: data for alignment
0566  * @pool: pool to get order from
0567  */
0568 unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
0569         unsigned long start, unsigned int nr, void *data,
0570         struct gen_pool *pool)
0571 {
0572     struct genpool_data_fixed *fixed_data;
0573     int order;
0574     unsigned long offset_bit;
0575     unsigned long start_bit;
0576 
0577     fixed_data = data;
0578     order = pool->min_alloc_order;
0579     offset_bit = fixed_data->offset >> order;
0580     if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
0581         return size;
0582 
0583     start_bit = bitmap_find_next_zero_area(map, size,
0584             start + offset_bit, nr, 0);
0585     if (start_bit != offset_bit)
0586         start_bit = size;
0587     return start_bit;
0588 }
0589 EXPORT_SYMBOL(gen_pool_fixed_alloc);
0590 
0591 /**
0592  * gen_pool_first_fit_order_align - find the first available region
0593  * of memory matching the size requirement. The region will be aligned
0594  * to the order of the size specified.
0595  * @map: The address to base the search on
0596  * @size: The bitmap size in bits
0597  * @start: The bitnumber to start searching at
0598  * @nr: The number of zeroed bits we're looking for
0599  * @data: additional data - unused
0600  * @pool: pool to find the fit region memory from
0601  */
0602 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
0603         unsigned long size, unsigned long start,
0604         unsigned int nr, void *data, struct gen_pool *pool)
0605 {
0606     unsigned long align_mask = roundup_pow_of_two(nr) - 1;
0607 
0608     return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
0609 }
0610 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
0611 
0612 /**
0613  * gen_pool_best_fit - find the best fitting region of memory
0614  * macthing the size requirement (no alignment constraint)
0615  * @map: The address to base the search on
0616  * @size: The bitmap size in bits
0617  * @start: The bitnumber to start searching at
0618  * @nr: The number of zeroed bits we're looking for
0619  * @data: additional data - unused
0620  * @pool: pool to find the fit region memory from
0621  *
0622  * Iterate over the bitmap to find the smallest free region
0623  * which we can allocate the memory.
0624  */
0625 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
0626         unsigned long start, unsigned int nr, void *data,
0627         struct gen_pool *pool)
0628 {
0629     unsigned long start_bit = size;
0630     unsigned long len = size + 1;
0631     unsigned long index;
0632 
0633     index = bitmap_find_next_zero_area(map, size, start, nr, 0);
0634 
0635     while (index < size) {
0636         int next_bit = find_next_bit(map, size, index + nr);
0637         if ((next_bit - index) < len) {
0638             len = next_bit - index;
0639             start_bit = index;
0640             if (len == nr)
0641                 return start_bit;
0642         }
0643         index = bitmap_find_next_zero_area(map, size,
0644                            next_bit + 1, nr, 0);
0645     }
0646 
0647     return start_bit;
0648 }
0649 EXPORT_SYMBOL(gen_pool_best_fit);
0650 
0651 static void devm_gen_pool_release(struct device *dev, void *res)
0652 {
0653     gen_pool_destroy(*(struct gen_pool **)res);
0654 }
0655 
0656 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
0657 {
0658     struct gen_pool **p = res;
0659 
0660     /* NULL data matches only a pool without an assigned name */
0661     if (!data && !(*p)->name)
0662         return 1;
0663 
0664     if (!data || !(*p)->name)
0665         return 0;
0666 
0667     return !strcmp((*p)->name, data);
0668 }
0669 
0670 /**
0671  * gen_pool_get - Obtain the gen_pool (if any) for a device
0672  * @dev: device to retrieve the gen_pool from
0673  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
0674  *
0675  * Returns the gen_pool for the device if one is present, or NULL.
0676  */
0677 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
0678 {
0679     struct gen_pool **p;
0680 
0681     p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
0682             (void *)name);
0683     if (!p)
0684         return NULL;
0685     return *p;
0686 }
0687 EXPORT_SYMBOL_GPL(gen_pool_get);
0688 
0689 /**
0690  * devm_gen_pool_create - managed gen_pool_create
0691  * @dev: device that provides the gen_pool
0692  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
0693  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
0694  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
0695  *
0696  * Create a new special memory pool that can be used to manage special purpose
0697  * memory not managed by the regular kmalloc/kfree interface. The pool will be
0698  * automatically destroyed by the device management code.
0699  */
0700 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
0701                       int nid, const char *name)
0702 {
0703     struct gen_pool **ptr, *pool;
0704     const char *pool_name = NULL;
0705 
0706     /* Check that genpool to be created is uniquely addressed on device */
0707     if (gen_pool_get(dev, name))
0708         return ERR_PTR(-EINVAL);
0709 
0710     if (name) {
0711         pool_name = kstrdup_const(name, GFP_KERNEL);
0712         if (!pool_name)
0713             return ERR_PTR(-ENOMEM);
0714     }
0715 
0716     ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
0717     if (!ptr)
0718         goto free_pool_name;
0719 
0720     pool = gen_pool_create(min_alloc_order, nid);
0721     if (!pool)
0722         goto free_devres;
0723 
0724     *ptr = pool;
0725     pool->name = pool_name;
0726     devres_add(dev, ptr);
0727 
0728     return pool;
0729 
0730 free_devres:
0731     devres_free(ptr);
0732 free_pool_name:
0733     kfree_const(pool_name);
0734 
0735     return ERR_PTR(-ENOMEM);
0736 }
0737 EXPORT_SYMBOL(devm_gen_pool_create);
0738 
0739 #ifdef CONFIG_OF
0740 /**
0741  * of_gen_pool_get - find a pool by phandle property
0742  * @np: device node
0743  * @propname: property name containing phandle(s)
0744  * @index: index into the phandle array
0745  *
0746  * Returns the pool that contains the chunk starting at the physical
0747  * address of the device tree node pointed at by the phandle property,
0748  * or NULL if not found.
0749  */
0750 struct gen_pool *of_gen_pool_get(struct device_node *np,
0751     const char *propname, int index)
0752 {
0753     struct platform_device *pdev;
0754     struct device_node *np_pool, *parent;
0755     const char *name = NULL;
0756     struct gen_pool *pool = NULL;
0757 
0758     np_pool = of_parse_phandle(np, propname, index);
0759     if (!np_pool)
0760         return NULL;
0761 
0762     pdev = of_find_device_by_node(np_pool);
0763     if (!pdev) {
0764         /* Check if named gen_pool is created by parent node device */
0765         parent = of_get_parent(np_pool);
0766         pdev = of_find_device_by_node(parent);
0767         of_node_put(parent);
0768 
0769         of_property_read_string(np_pool, "label", &name);
0770         if (!name)
0771             name = np_pool->name;
0772     }
0773     if (pdev)
0774         pool = gen_pool_get(&pdev->dev, name);
0775     of_node_put(np_pool);
0776 
0777     return pool;
0778 }
0779 EXPORT_SYMBOL_GPL(of_gen_pool_get);
0780 #endif /* CONFIG_OF */