Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/mm/mempool.c
0004  *
0005  *  memory buffer pool support. Such pools are mostly used
0006  *  for guaranteed, deadlock-free memory allocations during
0007  *  extreme VM load.
0008  *
0009  *  started by Ingo Molnar, Copyright (C) 2001
0010  *  debugging by David Rientjes, Copyright (C) 2015
0011  */
0012 
0013 #include <linux/mm.h>
0014 #include <linux/slab.h>
0015 #include <linux/highmem.h>
0016 #include <linux/kasan.h>
0017 #include <linux/kmemleak.h>
0018 #include <linux/export.h>
0019 #include <linux/mempool.h>
0020 #include <linux/writeback.h>
0021 #include "slab.h"
0022 
0023 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
0024 static void poison_error(mempool_t *pool, void *element, size_t size,
0025              size_t byte)
0026 {
0027     const int nr = pool->curr_nr;
0028     const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
0029     const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
0030     int i;
0031 
0032     pr_err("BUG: mempool element poison mismatch\n");
0033     pr_err("Mempool %p size %zu\n", pool, size);
0034     pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
0035     for (i = start; i < end; i++)
0036         pr_cont("%x ", *(u8 *)(element + i));
0037     pr_cont("%s\n", end < size ? "..." : "");
0038     dump_stack();
0039 }
0040 
0041 static void __check_element(mempool_t *pool, void *element, size_t size)
0042 {
0043     u8 *obj = element;
0044     size_t i;
0045 
0046     for (i = 0; i < size; i++) {
0047         u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
0048 
0049         if (obj[i] != exp) {
0050             poison_error(pool, element, size, i);
0051             return;
0052         }
0053     }
0054     memset(obj, POISON_INUSE, size);
0055 }
0056 
0057 static void check_element(mempool_t *pool, void *element)
0058 {
0059     /* Mempools backed by slab allocator */
0060     if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
0061         __check_element(pool, element, ksize(element));
0062     } else if (pool->free == mempool_free_pages) {
0063         /* Mempools backed by page allocator */
0064         int order = (int)(long)pool->pool_data;
0065         void *addr = kmap_atomic((struct page *)element);
0066 
0067         __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
0068         kunmap_atomic(addr);
0069     }
0070 }
0071 
0072 static void __poison_element(void *element, size_t size)
0073 {
0074     u8 *obj = element;
0075 
0076     memset(obj, POISON_FREE, size - 1);
0077     obj[size - 1] = POISON_END;
0078 }
0079 
0080 static void poison_element(mempool_t *pool, void *element)
0081 {
0082     /* Mempools backed by slab allocator */
0083     if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
0084         __poison_element(element, ksize(element));
0085     } else if (pool->alloc == mempool_alloc_pages) {
0086         /* Mempools backed by page allocator */
0087         int order = (int)(long)pool->pool_data;
0088         void *addr = kmap_atomic((struct page *)element);
0089 
0090         __poison_element(addr, 1UL << (PAGE_SHIFT + order));
0091         kunmap_atomic(addr);
0092     }
0093 }
0094 #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
0095 static inline void check_element(mempool_t *pool, void *element)
0096 {
0097 }
0098 static inline void poison_element(mempool_t *pool, void *element)
0099 {
0100 }
0101 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
0102 
0103 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
0104 {
0105     if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
0106         kasan_slab_free_mempool(element);
0107     else if (pool->alloc == mempool_alloc_pages)
0108         kasan_poison_pages(element, (unsigned long)pool->pool_data,
0109                    false);
0110 }
0111 
0112 static void kasan_unpoison_element(mempool_t *pool, void *element)
0113 {
0114     if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
0115         kasan_unpoison_range(element, __ksize(element));
0116     else if (pool->alloc == mempool_alloc_pages)
0117         kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
0118                      false);
0119 }
0120 
0121 static __always_inline void add_element(mempool_t *pool, void *element)
0122 {
0123     BUG_ON(pool->curr_nr >= pool->min_nr);
0124     poison_element(pool, element);
0125     kasan_poison_element(pool, element);
0126     pool->elements[pool->curr_nr++] = element;
0127 }
0128 
0129 static void *remove_element(mempool_t *pool)
0130 {
0131     void *element = pool->elements[--pool->curr_nr];
0132 
0133     BUG_ON(pool->curr_nr < 0);
0134     kasan_unpoison_element(pool, element);
0135     check_element(pool, element);
0136     return element;
0137 }
0138 
0139 /**
0140  * mempool_exit - exit a mempool initialized with mempool_init()
0141  * @pool:      pointer to the memory pool which was initialized with
0142  *             mempool_init().
0143  *
0144  * Free all reserved elements in @pool and @pool itself.  This function
0145  * only sleeps if the free_fn() function sleeps.
0146  *
0147  * May be called on a zeroed but uninitialized mempool (i.e. allocated with
0148  * kzalloc()).
0149  */
0150 void mempool_exit(mempool_t *pool)
0151 {
0152     while (pool->curr_nr) {
0153         void *element = remove_element(pool);
0154         pool->free(element, pool->pool_data);
0155     }
0156     kfree(pool->elements);
0157     pool->elements = NULL;
0158 }
0159 EXPORT_SYMBOL(mempool_exit);
0160 
0161 /**
0162  * mempool_destroy - deallocate a memory pool
0163  * @pool:      pointer to the memory pool which was allocated via
0164  *             mempool_create().
0165  *
0166  * Free all reserved elements in @pool and @pool itself.  This function
0167  * only sleeps if the free_fn() function sleeps.
0168  */
0169 void mempool_destroy(mempool_t *pool)
0170 {
0171     if (unlikely(!pool))
0172         return;
0173 
0174     mempool_exit(pool);
0175     kfree(pool);
0176 }
0177 EXPORT_SYMBOL(mempool_destroy);
0178 
0179 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
0180               mempool_free_t *free_fn, void *pool_data,
0181               gfp_t gfp_mask, int node_id)
0182 {
0183     spin_lock_init(&pool->lock);
0184     pool->min_nr    = min_nr;
0185     pool->pool_data = pool_data;
0186     pool->alloc = alloc_fn;
0187     pool->free  = free_fn;
0188     init_waitqueue_head(&pool->wait);
0189 
0190     pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
0191                         gfp_mask, node_id);
0192     if (!pool->elements)
0193         return -ENOMEM;
0194 
0195     /*
0196      * First pre-allocate the guaranteed number of buffers.
0197      */
0198     while (pool->curr_nr < pool->min_nr) {
0199         void *element;
0200 
0201         element = pool->alloc(gfp_mask, pool->pool_data);
0202         if (unlikely(!element)) {
0203             mempool_exit(pool);
0204             return -ENOMEM;
0205         }
0206         add_element(pool, element);
0207     }
0208 
0209     return 0;
0210 }
0211 EXPORT_SYMBOL(mempool_init_node);
0212 
0213 /**
0214  * mempool_init - initialize a memory pool
0215  * @pool:      pointer to the memory pool that should be initialized
0216  * @min_nr:    the minimum number of elements guaranteed to be
0217  *             allocated for this pool.
0218  * @alloc_fn:  user-defined element-allocation function.
0219  * @free_fn:   user-defined element-freeing function.
0220  * @pool_data: optional private data available to the user-defined functions.
0221  *
0222  * Like mempool_create(), but initializes the pool in (i.e. embedded in another
0223  * structure).
0224  *
0225  * Return: %0 on success, negative error code otherwise.
0226  */
0227 int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
0228          mempool_free_t *free_fn, void *pool_data)
0229 {
0230     return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
0231                  pool_data, GFP_KERNEL, NUMA_NO_NODE);
0232 
0233 }
0234 EXPORT_SYMBOL(mempool_init);
0235 
0236 /**
0237  * mempool_create - create a memory pool
0238  * @min_nr:    the minimum number of elements guaranteed to be
0239  *             allocated for this pool.
0240  * @alloc_fn:  user-defined element-allocation function.
0241  * @free_fn:   user-defined element-freeing function.
0242  * @pool_data: optional private data available to the user-defined functions.
0243  *
0244  * this function creates and allocates a guaranteed size, preallocated
0245  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
0246  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
0247  * functions might sleep - as long as the mempool_alloc() function is not called
0248  * from IRQ contexts.
0249  *
0250  * Return: pointer to the created memory pool object or %NULL on error.
0251  */
0252 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
0253                 mempool_free_t *free_fn, void *pool_data)
0254 {
0255     return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
0256                    GFP_KERNEL, NUMA_NO_NODE);
0257 }
0258 EXPORT_SYMBOL(mempool_create);
0259 
0260 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
0261                    mempool_free_t *free_fn, void *pool_data,
0262                    gfp_t gfp_mask, int node_id)
0263 {
0264     mempool_t *pool;
0265 
0266     pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
0267     if (!pool)
0268         return NULL;
0269 
0270     if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
0271                   gfp_mask, node_id)) {
0272         kfree(pool);
0273         return NULL;
0274     }
0275 
0276     return pool;
0277 }
0278 EXPORT_SYMBOL(mempool_create_node);
0279 
0280 /**
0281  * mempool_resize - resize an existing memory pool
0282  * @pool:       pointer to the memory pool which was allocated via
0283  *              mempool_create().
0284  * @new_min_nr: the new minimum number of elements guaranteed to be
0285  *              allocated for this pool.
0286  *
0287  * This function shrinks/grows the pool. In the case of growing,
0288  * it cannot be guaranteed that the pool will be grown to the new
0289  * size immediately, but new mempool_free() calls will refill it.
0290  * This function may sleep.
0291  *
0292  * Note, the caller must guarantee that no mempool_destroy is called
0293  * while this function is running. mempool_alloc() & mempool_free()
0294  * might be called (eg. from IRQ contexts) while this function executes.
0295  *
0296  * Return: %0 on success, negative error code otherwise.
0297  */
0298 int mempool_resize(mempool_t *pool, int new_min_nr)
0299 {
0300     void *element;
0301     void **new_elements;
0302     unsigned long flags;
0303 
0304     BUG_ON(new_min_nr <= 0);
0305     might_sleep();
0306 
0307     spin_lock_irqsave(&pool->lock, flags);
0308     if (new_min_nr <= pool->min_nr) {
0309         while (new_min_nr < pool->curr_nr) {
0310             element = remove_element(pool);
0311             spin_unlock_irqrestore(&pool->lock, flags);
0312             pool->free(element, pool->pool_data);
0313             spin_lock_irqsave(&pool->lock, flags);
0314         }
0315         pool->min_nr = new_min_nr;
0316         goto out_unlock;
0317     }
0318     spin_unlock_irqrestore(&pool->lock, flags);
0319 
0320     /* Grow the pool */
0321     new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
0322                      GFP_KERNEL);
0323     if (!new_elements)
0324         return -ENOMEM;
0325 
0326     spin_lock_irqsave(&pool->lock, flags);
0327     if (unlikely(new_min_nr <= pool->min_nr)) {
0328         /* Raced, other resize will do our work */
0329         spin_unlock_irqrestore(&pool->lock, flags);
0330         kfree(new_elements);
0331         goto out;
0332     }
0333     memcpy(new_elements, pool->elements,
0334             pool->curr_nr * sizeof(*new_elements));
0335     kfree(pool->elements);
0336     pool->elements = new_elements;
0337     pool->min_nr = new_min_nr;
0338 
0339     while (pool->curr_nr < pool->min_nr) {
0340         spin_unlock_irqrestore(&pool->lock, flags);
0341         element = pool->alloc(GFP_KERNEL, pool->pool_data);
0342         if (!element)
0343             goto out;
0344         spin_lock_irqsave(&pool->lock, flags);
0345         if (pool->curr_nr < pool->min_nr) {
0346             add_element(pool, element);
0347         } else {
0348             spin_unlock_irqrestore(&pool->lock, flags);
0349             pool->free(element, pool->pool_data);   /* Raced */
0350             goto out;
0351         }
0352     }
0353 out_unlock:
0354     spin_unlock_irqrestore(&pool->lock, flags);
0355 out:
0356     return 0;
0357 }
0358 EXPORT_SYMBOL(mempool_resize);
0359 
0360 /**
0361  * mempool_alloc - allocate an element from a specific memory pool
0362  * @pool:      pointer to the memory pool which was allocated via
0363  *             mempool_create().
0364  * @gfp_mask:  the usual allocation bitmask.
0365  *
0366  * this function only sleeps if the alloc_fn() function sleeps or
0367  * returns NULL. Note that due to preallocation, this function
0368  * *never* fails when called from process contexts. (it might
0369  * fail if called from an IRQ context.)
0370  * Note: using __GFP_ZERO is not supported.
0371  *
0372  * Return: pointer to the allocated element or %NULL on error.
0373  */
0374 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
0375 {
0376     void *element;
0377     unsigned long flags;
0378     wait_queue_entry_t wait;
0379     gfp_t gfp_temp;
0380 
0381     VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
0382     might_alloc(gfp_mask);
0383 
0384     gfp_mask |= __GFP_NOMEMALLOC;   /* don't allocate emergency reserves */
0385     gfp_mask |= __GFP_NORETRY;  /* don't loop in __alloc_pages */
0386     gfp_mask |= __GFP_NOWARN;   /* failures are OK */
0387 
0388     gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
0389 
0390 repeat_alloc:
0391 
0392     element = pool->alloc(gfp_temp, pool->pool_data);
0393     if (likely(element != NULL))
0394         return element;
0395 
0396     spin_lock_irqsave(&pool->lock, flags);
0397     if (likely(pool->curr_nr)) {
0398         element = remove_element(pool);
0399         spin_unlock_irqrestore(&pool->lock, flags);
0400         /* paired with rmb in mempool_free(), read comment there */
0401         smp_wmb();
0402         /*
0403          * Update the allocation stack trace as this is more useful
0404          * for debugging.
0405          */
0406         kmemleak_update_trace(element);
0407         return element;
0408     }
0409 
0410     /*
0411      * We use gfp mask w/o direct reclaim or IO for the first round.  If
0412      * alloc failed with that and @pool was empty, retry immediately.
0413      */
0414     if (gfp_temp != gfp_mask) {
0415         spin_unlock_irqrestore(&pool->lock, flags);
0416         gfp_temp = gfp_mask;
0417         goto repeat_alloc;
0418     }
0419 
0420     /* We must not sleep if !__GFP_DIRECT_RECLAIM */
0421     if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
0422         spin_unlock_irqrestore(&pool->lock, flags);
0423         return NULL;
0424     }
0425 
0426     /* Let's wait for someone else to return an element to @pool */
0427     init_wait(&wait);
0428     prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
0429 
0430     spin_unlock_irqrestore(&pool->lock, flags);
0431 
0432     /*
0433      * FIXME: this should be io_schedule().  The timeout is there as a
0434      * workaround for some DM problems in 2.6.18.
0435      */
0436     io_schedule_timeout(5*HZ);
0437 
0438     finish_wait(&pool->wait, &wait);
0439     goto repeat_alloc;
0440 }
0441 EXPORT_SYMBOL(mempool_alloc);
0442 
0443 /**
0444  * mempool_free - return an element to the pool.
0445  * @element:   pool element pointer.
0446  * @pool:      pointer to the memory pool which was allocated via
0447  *             mempool_create().
0448  *
0449  * this function only sleeps if the free_fn() function sleeps.
0450  */
0451 void mempool_free(void *element, mempool_t *pool)
0452 {
0453     unsigned long flags;
0454 
0455     if (unlikely(element == NULL))
0456         return;
0457 
0458     /*
0459      * Paired with the wmb in mempool_alloc().  The preceding read is
0460      * for @element and the following @pool->curr_nr.  This ensures
0461      * that the visible value of @pool->curr_nr is from after the
0462      * allocation of @element.  This is necessary for fringe cases
0463      * where @element was passed to this task without going through
0464      * barriers.
0465      *
0466      * For example, assume @p is %NULL at the beginning and one task
0467      * performs "p = mempool_alloc(...);" while another task is doing
0468      * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
0469      * may end up using curr_nr value which is from before allocation
0470      * of @p without the following rmb.
0471      */
0472     smp_rmb();
0473 
0474     /*
0475      * For correctness, we need a test which is guaranteed to trigger
0476      * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
0477      * without locking achieves that and refilling as soon as possible
0478      * is desirable.
0479      *
0480      * Because curr_nr visible here is always a value after the
0481      * allocation of @element, any task which decremented curr_nr below
0482      * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
0483      * incremented to min_nr afterwards.  If curr_nr gets incremented
0484      * to min_nr after the allocation of @element, the elements
0485      * allocated after that are subject to the same guarantee.
0486      *
0487      * Waiters happen iff curr_nr is 0 and the above guarantee also
0488      * ensures that there will be frees which return elements to the
0489      * pool waking up the waiters.
0490      */
0491     if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
0492         spin_lock_irqsave(&pool->lock, flags);
0493         if (likely(pool->curr_nr < pool->min_nr)) {
0494             add_element(pool, element);
0495             spin_unlock_irqrestore(&pool->lock, flags);
0496             wake_up(&pool->wait);
0497             return;
0498         }
0499         spin_unlock_irqrestore(&pool->lock, flags);
0500     }
0501     pool->free(element, pool->pool_data);
0502 }
0503 EXPORT_SYMBOL(mempool_free);
0504 
0505 /*
0506  * A commonly used alloc and free fn.
0507  */
0508 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
0509 {
0510     struct kmem_cache *mem = pool_data;
0511     VM_BUG_ON(mem->ctor);
0512     return kmem_cache_alloc(mem, gfp_mask);
0513 }
0514 EXPORT_SYMBOL(mempool_alloc_slab);
0515 
0516 void mempool_free_slab(void *element, void *pool_data)
0517 {
0518     struct kmem_cache *mem = pool_data;
0519     kmem_cache_free(mem, element);
0520 }
0521 EXPORT_SYMBOL(mempool_free_slab);
0522 
0523 /*
0524  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
0525  * specified by pool_data
0526  */
0527 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
0528 {
0529     size_t size = (size_t)pool_data;
0530     return kmalloc(size, gfp_mask);
0531 }
0532 EXPORT_SYMBOL(mempool_kmalloc);
0533 
0534 void mempool_kfree(void *element, void *pool_data)
0535 {
0536     kfree(element);
0537 }
0538 EXPORT_SYMBOL(mempool_kfree);
0539 
0540 /*
0541  * A simple mempool-backed page allocator that allocates pages
0542  * of the order specified by pool_data.
0543  */
0544 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
0545 {
0546     int order = (int)(long)pool_data;
0547     return alloc_pages(gfp_mask, order);
0548 }
0549 EXPORT_SYMBOL(mempool_alloc_pages);
0550 
0551 void mempool_free_pages(void *element, void *pool_data)
0552 {
0553     int order = (int)(long)pool_data;
0554     __free_pages(element, order);
0555 }
0556 EXPORT_SYMBOL(mempool_free_pages);