Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * DMA Pool allocator
0004  *
0005  * Copyright 2001 David Brownell
0006  * Copyright 2007 Intel Corporation
0007  *   Author: Matthew Wilcox <willy@linux.intel.com>
0008  *
0009  * This allocator returns small blocks of a given size which are DMA-able by
0010  * the given device.  It uses the dma_alloc_coherent page allocator to get
0011  * new pages, then splits them up into blocks of the required size.
0012  * Many older drivers still have their own code to do this.
0013  *
0014  * The current design of this allocator is fairly simple.  The pool is
0015  * represented by the 'struct dma_pool' which keeps a doubly-linked list of
0016  * allocated pages.  Each page in the page_list is split into blocks of at
0017  * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
0018  * list of free blocks within the page.  Used blocks aren't tracked, but we
0019  * keep a count of how many are currently allocated from each page.
0020  */
0021 
0022 #include <linux/device.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/dmapool.h>
0025 #include <linux/kernel.h>
0026 #include <linux/list.h>
0027 #include <linux/export.h>
0028 #include <linux/mutex.h>
0029 #include <linux/poison.h>
0030 #include <linux/sched.h>
0031 #include <linux/sched/mm.h>
0032 #include <linux/slab.h>
0033 #include <linux/stat.h>
0034 #include <linux/spinlock.h>
0035 #include <linux/string.h>
0036 #include <linux/types.h>
0037 #include <linux/wait.h>
0038 
0039 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
0040 #define DMAPOOL_DEBUG 1
0041 #endif
0042 
0043 struct dma_pool {       /* the pool */
0044     struct list_head page_list;
0045     spinlock_t lock;
0046     size_t size;
0047     struct device *dev;
0048     size_t allocation;
0049     size_t boundary;
0050     char name[32];
0051     struct list_head pools;
0052 };
0053 
0054 struct dma_page {       /* cacheable header for 'allocation' bytes */
0055     struct list_head page_list;
0056     void *vaddr;
0057     dma_addr_t dma;
0058     unsigned int in_use;
0059     unsigned int offset;
0060 };
0061 
0062 static DEFINE_MUTEX(pools_lock);
0063 static DEFINE_MUTEX(pools_reg_lock);
0064 
0065 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
0066 {
0067     unsigned temp;
0068     unsigned size;
0069     char *next;
0070     struct dma_page *page;
0071     struct dma_pool *pool;
0072 
0073     next = buf;
0074     size = PAGE_SIZE;
0075 
0076     temp = scnprintf(next, size, "poolinfo - 0.1\n");
0077     size -= temp;
0078     next += temp;
0079 
0080     mutex_lock(&pools_lock);
0081     list_for_each_entry(pool, &dev->dma_pools, pools) {
0082         unsigned pages = 0;
0083         unsigned blocks = 0;
0084 
0085         spin_lock_irq(&pool->lock);
0086         list_for_each_entry(page, &pool->page_list, page_list) {
0087             pages++;
0088             blocks += page->in_use;
0089         }
0090         spin_unlock_irq(&pool->lock);
0091 
0092         /* per-pool info, no real statistics yet */
0093         temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
0094                  pool->name, blocks,
0095                  pages * (pool->allocation / pool->size),
0096                  pool->size, pages);
0097         size -= temp;
0098         next += temp;
0099     }
0100     mutex_unlock(&pools_lock);
0101 
0102     return PAGE_SIZE - size;
0103 }
0104 
0105 static DEVICE_ATTR_RO(pools);
0106 
0107 /**
0108  * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
0109  * @name: name of pool, for diagnostics
0110  * @dev: device that will be doing the DMA
0111  * @size: size of the blocks in this pool.
0112  * @align: alignment requirement for blocks; must be a power of two
0113  * @boundary: returned blocks won't cross this power of two boundary
0114  * Context: not in_interrupt()
0115  *
0116  * Given one of these pools, dma_pool_alloc()
0117  * may be used to allocate memory.  Such memory will all have "consistent"
0118  * DMA mappings, accessible by the device and its driver without using
0119  * cache flushing primitives.  The actual size of blocks allocated may be
0120  * larger than requested because of alignment.
0121  *
0122  * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
0123  * cross that size boundary.  This is useful for devices which have
0124  * addressing restrictions on individual DMA transfers, such as not crossing
0125  * boundaries of 4KBytes.
0126  *
0127  * Return: a dma allocation pool with the requested characteristics, or
0128  * %NULL if one can't be created.
0129  */
0130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
0131                  size_t size, size_t align, size_t boundary)
0132 {
0133     struct dma_pool *retval;
0134     size_t allocation;
0135     bool empty = false;
0136 
0137     if (align == 0)
0138         align = 1;
0139     else if (align & (align - 1))
0140         return NULL;
0141 
0142     if (size == 0)
0143         return NULL;
0144     else if (size < 4)
0145         size = 4;
0146 
0147     size = ALIGN(size, align);
0148     allocation = max_t(size_t, size, PAGE_SIZE);
0149 
0150     if (!boundary)
0151         boundary = allocation;
0152     else if ((boundary < size) || (boundary & (boundary - 1)))
0153         return NULL;
0154 
0155     retval = kmalloc(sizeof(*retval), GFP_KERNEL);
0156     if (!retval)
0157         return retval;
0158 
0159     strscpy(retval->name, name, sizeof(retval->name));
0160 
0161     retval->dev = dev;
0162 
0163     INIT_LIST_HEAD(&retval->page_list);
0164     spin_lock_init(&retval->lock);
0165     retval->size = size;
0166     retval->boundary = boundary;
0167     retval->allocation = allocation;
0168 
0169     INIT_LIST_HEAD(&retval->pools);
0170 
0171     /*
0172      * pools_lock ensures that the ->dma_pools list does not get corrupted.
0173      * pools_reg_lock ensures that there is not a race between
0174      * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
0175      * when the first invocation of dma_pool_create() failed on
0176      * device_create_file() and the second assumes that it has been done (I
0177      * know it is a short window).
0178      */
0179     mutex_lock(&pools_reg_lock);
0180     mutex_lock(&pools_lock);
0181     if (list_empty(&dev->dma_pools))
0182         empty = true;
0183     list_add(&retval->pools, &dev->dma_pools);
0184     mutex_unlock(&pools_lock);
0185     if (empty) {
0186         int err;
0187 
0188         err = device_create_file(dev, &dev_attr_pools);
0189         if (err) {
0190             mutex_lock(&pools_lock);
0191             list_del(&retval->pools);
0192             mutex_unlock(&pools_lock);
0193             mutex_unlock(&pools_reg_lock);
0194             kfree(retval);
0195             return NULL;
0196         }
0197     }
0198     mutex_unlock(&pools_reg_lock);
0199     return retval;
0200 }
0201 EXPORT_SYMBOL(dma_pool_create);
0202 
0203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
0204 {
0205     unsigned int offset = 0;
0206     unsigned int next_boundary = pool->boundary;
0207 
0208     do {
0209         unsigned int next = offset + pool->size;
0210         if (unlikely((next + pool->size) >= next_boundary)) {
0211             next = next_boundary;
0212             next_boundary += pool->boundary;
0213         }
0214         *(int *)(page->vaddr + offset) = next;
0215         offset = next;
0216     } while (offset < pool->allocation);
0217 }
0218 
0219 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
0220 {
0221     struct dma_page *page;
0222 
0223     page = kmalloc(sizeof(*page), mem_flags);
0224     if (!page)
0225         return NULL;
0226     page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
0227                      &page->dma, mem_flags);
0228     if (page->vaddr) {
0229 #ifdef  DMAPOOL_DEBUG
0230         memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
0231 #endif
0232         pool_initialise_page(pool, page);
0233         page->in_use = 0;
0234         page->offset = 0;
0235     } else {
0236         kfree(page);
0237         page = NULL;
0238     }
0239     return page;
0240 }
0241 
0242 static inline bool is_page_busy(struct dma_page *page)
0243 {
0244     return page->in_use != 0;
0245 }
0246 
0247 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
0248 {
0249     dma_addr_t dma = page->dma;
0250 
0251 #ifdef  DMAPOOL_DEBUG
0252     memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
0253 #endif
0254     dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
0255     list_del(&page->page_list);
0256     kfree(page);
0257 }
0258 
0259 /**
0260  * dma_pool_destroy - destroys a pool of dma memory blocks.
0261  * @pool: dma pool that will be destroyed
0262  * Context: !in_interrupt()
0263  *
0264  * Caller guarantees that no more memory from the pool is in use,
0265  * and that nothing will try to use the pool after this call.
0266  */
0267 void dma_pool_destroy(struct dma_pool *pool)
0268 {
0269     struct dma_page *page, *tmp;
0270     bool empty = false;
0271 
0272     if (unlikely(!pool))
0273         return;
0274 
0275     mutex_lock(&pools_reg_lock);
0276     mutex_lock(&pools_lock);
0277     list_del(&pool->pools);
0278     if (pool->dev && list_empty(&pool->dev->dma_pools))
0279         empty = true;
0280     mutex_unlock(&pools_lock);
0281     if (empty)
0282         device_remove_file(pool->dev, &dev_attr_pools);
0283     mutex_unlock(&pools_reg_lock);
0284 
0285     list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
0286         if (is_page_busy(page)) {
0287             if (pool->dev)
0288                 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
0289                     pool->name, page->vaddr);
0290             else
0291                 pr_err("%s %s, %p busy\n", __func__,
0292                        pool->name, page->vaddr);
0293             /* leak the still-in-use consistent memory */
0294             list_del(&page->page_list);
0295             kfree(page);
0296         } else
0297             pool_free_page(pool, page);
0298     }
0299 
0300     kfree(pool);
0301 }
0302 EXPORT_SYMBOL(dma_pool_destroy);
0303 
0304 /**
0305  * dma_pool_alloc - get a block of consistent memory
0306  * @pool: dma pool that will produce the block
0307  * @mem_flags: GFP_* bitmask
0308  * @handle: pointer to dma address of block
0309  *
0310  * Return: the kernel virtual address of a currently unused block,
0311  * and reports its dma address through the handle.
0312  * If such a memory block can't be allocated, %NULL is returned.
0313  */
0314 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
0315              dma_addr_t *handle)
0316 {
0317     unsigned long flags;
0318     struct dma_page *page;
0319     size_t offset;
0320     void *retval;
0321 
0322     might_alloc(mem_flags);
0323 
0324     spin_lock_irqsave(&pool->lock, flags);
0325     list_for_each_entry(page, &pool->page_list, page_list) {
0326         if (page->offset < pool->allocation)
0327             goto ready;
0328     }
0329 
0330     /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
0331     spin_unlock_irqrestore(&pool->lock, flags);
0332 
0333     page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
0334     if (!page)
0335         return NULL;
0336 
0337     spin_lock_irqsave(&pool->lock, flags);
0338 
0339     list_add(&page->page_list, &pool->page_list);
0340  ready:
0341     page->in_use++;
0342     offset = page->offset;
0343     page->offset = *(int *)(page->vaddr + offset);
0344     retval = offset + page->vaddr;
0345     *handle = offset + page->dma;
0346 #ifdef  DMAPOOL_DEBUG
0347     {
0348         int i;
0349         u8 *data = retval;
0350         /* page->offset is stored in first 4 bytes */
0351         for (i = sizeof(page->offset); i < pool->size; i++) {
0352             if (data[i] == POOL_POISON_FREED)
0353                 continue;
0354             if (pool->dev)
0355                 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
0356                     __func__, pool->name, retval);
0357             else
0358                 pr_err("%s %s, %p (corrupted)\n",
0359                     __func__, pool->name, retval);
0360 
0361             /*
0362              * Dump the first 4 bytes even if they are not
0363              * POOL_POISON_FREED
0364              */
0365             print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
0366                     data, pool->size, 1);
0367             break;
0368         }
0369     }
0370     if (!(mem_flags & __GFP_ZERO))
0371         memset(retval, POOL_POISON_ALLOCATED, pool->size);
0372 #endif
0373     spin_unlock_irqrestore(&pool->lock, flags);
0374 
0375     if (want_init_on_alloc(mem_flags))
0376         memset(retval, 0, pool->size);
0377 
0378     return retval;
0379 }
0380 EXPORT_SYMBOL(dma_pool_alloc);
0381 
0382 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
0383 {
0384     struct dma_page *page;
0385 
0386     list_for_each_entry(page, &pool->page_list, page_list) {
0387         if (dma < page->dma)
0388             continue;
0389         if ((dma - page->dma) < pool->allocation)
0390             return page;
0391     }
0392     return NULL;
0393 }
0394 
0395 /**
0396  * dma_pool_free - put block back into dma pool
0397  * @pool: the dma pool holding the block
0398  * @vaddr: virtual address of block
0399  * @dma: dma address of block
0400  *
0401  * Caller promises neither device nor driver will again touch this block
0402  * unless it is first re-allocated.
0403  */
0404 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
0405 {
0406     struct dma_page *page;
0407     unsigned long flags;
0408     unsigned int offset;
0409 
0410     spin_lock_irqsave(&pool->lock, flags);
0411     page = pool_find_page(pool, dma);
0412     if (!page) {
0413         spin_unlock_irqrestore(&pool->lock, flags);
0414         if (pool->dev)
0415             dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
0416                 __func__, pool->name, vaddr, &dma);
0417         else
0418             pr_err("%s %s, %p/%pad (bad dma)\n",
0419                    __func__, pool->name, vaddr, &dma);
0420         return;
0421     }
0422 
0423     offset = vaddr - page->vaddr;
0424     if (want_init_on_free())
0425         memset(vaddr, 0, pool->size);
0426 #ifdef  DMAPOOL_DEBUG
0427     if ((dma - page->dma) != offset) {
0428         spin_unlock_irqrestore(&pool->lock, flags);
0429         if (pool->dev)
0430             dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
0431                 __func__, pool->name, vaddr, &dma);
0432         else
0433             pr_err("%s %s, %p (bad vaddr)/%pad\n",
0434                    __func__, pool->name, vaddr, &dma);
0435         return;
0436     }
0437     {
0438         unsigned int chain = page->offset;
0439         while (chain < pool->allocation) {
0440             if (chain != offset) {
0441                 chain = *(int *)(page->vaddr + chain);
0442                 continue;
0443             }
0444             spin_unlock_irqrestore(&pool->lock, flags);
0445             if (pool->dev)
0446                 dev_err(pool->dev, "%s %s, dma %pad already free\n",
0447                     __func__, pool->name, &dma);
0448             else
0449                 pr_err("%s %s, dma %pad already free\n",
0450                        __func__, pool->name, &dma);
0451             return;
0452         }
0453     }
0454     memset(vaddr, POOL_POISON_FREED, pool->size);
0455 #endif
0456 
0457     page->in_use--;
0458     *(int *)vaddr = page->offset;
0459     page->offset = offset;
0460     /*
0461      * Resist a temptation to do
0462      *    if (!is_page_busy(page)) pool_free_page(pool, page);
0463      * Better have a few empty pages hang around.
0464      */
0465     spin_unlock_irqrestore(&pool->lock, flags);
0466 }
0467 EXPORT_SYMBOL(dma_pool_free);
0468 
0469 /*
0470  * Managed DMA pool
0471  */
0472 static void dmam_pool_release(struct device *dev, void *res)
0473 {
0474     struct dma_pool *pool = *(struct dma_pool **)res;
0475 
0476     dma_pool_destroy(pool);
0477 }
0478 
0479 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
0480 {
0481     return *(struct dma_pool **)res == match_data;
0482 }
0483 
0484 /**
0485  * dmam_pool_create - Managed dma_pool_create()
0486  * @name: name of pool, for diagnostics
0487  * @dev: device that will be doing the DMA
0488  * @size: size of the blocks in this pool.
0489  * @align: alignment requirement for blocks; must be a power of two
0490  * @allocation: returned blocks won't cross this boundary (or zero)
0491  *
0492  * Managed dma_pool_create().  DMA pool created with this function is
0493  * automatically destroyed on driver detach.
0494  *
0495  * Return: a managed dma allocation pool with the requested
0496  * characteristics, or %NULL if one can't be created.
0497  */
0498 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
0499                   size_t size, size_t align, size_t allocation)
0500 {
0501     struct dma_pool **ptr, *pool;
0502 
0503     ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
0504     if (!ptr)
0505         return NULL;
0506 
0507     pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
0508     if (pool)
0509         devres_add(dev, ptr);
0510     else
0511         devres_free(ptr);
0512 
0513     return pool;
0514 }
0515 EXPORT_SYMBOL(dmam_pool_create);
0516 
0517 /**
0518  * dmam_pool_destroy - Managed dma_pool_destroy()
0519  * @pool: dma pool that will be destroyed
0520  *
0521  * Managed dma_pool_destroy().
0522  */
0523 void dmam_pool_destroy(struct dma_pool *pool)
0524 {
0525     struct device *dev = pool->dev;
0526 
0527     WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
0528 }
0529 EXPORT_SYMBOL(dmam_pool_destroy);