0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #include <linux/device.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/dmapool.h>
0025 #include <linux/kernel.h>
0026 #include <linux/list.h>
0027 #include <linux/export.h>
0028 #include <linux/mutex.h>
0029 #include <linux/poison.h>
0030 #include <linux/sched.h>
0031 #include <linux/sched/mm.h>
0032 #include <linux/slab.h>
0033 #include <linux/stat.h>
0034 #include <linux/spinlock.h>
0035 #include <linux/string.h>
0036 #include <linux/types.h>
0037 #include <linux/wait.h>
0038
0039 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
0040 #define DMAPOOL_DEBUG 1
0041 #endif
0042
0043 struct dma_pool {
0044 struct list_head page_list;
0045 spinlock_t lock;
0046 size_t size;
0047 struct device *dev;
0048 size_t allocation;
0049 size_t boundary;
0050 char name[32];
0051 struct list_head pools;
0052 };
0053
0054 struct dma_page {
0055 struct list_head page_list;
0056 void *vaddr;
0057 dma_addr_t dma;
0058 unsigned int in_use;
0059 unsigned int offset;
0060 };
0061
0062 static DEFINE_MUTEX(pools_lock);
0063 static DEFINE_MUTEX(pools_reg_lock);
0064
0065 static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
0066 {
0067 unsigned temp;
0068 unsigned size;
0069 char *next;
0070 struct dma_page *page;
0071 struct dma_pool *pool;
0072
0073 next = buf;
0074 size = PAGE_SIZE;
0075
0076 temp = scnprintf(next, size, "poolinfo - 0.1\n");
0077 size -= temp;
0078 next += temp;
0079
0080 mutex_lock(&pools_lock);
0081 list_for_each_entry(pool, &dev->dma_pools, pools) {
0082 unsigned pages = 0;
0083 unsigned blocks = 0;
0084
0085 spin_lock_irq(&pool->lock);
0086 list_for_each_entry(page, &pool->page_list, page_list) {
0087 pages++;
0088 blocks += page->in_use;
0089 }
0090 spin_unlock_irq(&pool->lock);
0091
0092
0093 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
0094 pool->name, blocks,
0095 pages * (pool->allocation / pool->size),
0096 pool->size, pages);
0097 size -= temp;
0098 next += temp;
0099 }
0100 mutex_unlock(&pools_lock);
0101
0102 return PAGE_SIZE - size;
0103 }
0104
0105 static DEVICE_ATTR_RO(pools);
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
0131 size_t size, size_t align, size_t boundary)
0132 {
0133 struct dma_pool *retval;
0134 size_t allocation;
0135 bool empty = false;
0136
0137 if (align == 0)
0138 align = 1;
0139 else if (align & (align - 1))
0140 return NULL;
0141
0142 if (size == 0)
0143 return NULL;
0144 else if (size < 4)
0145 size = 4;
0146
0147 size = ALIGN(size, align);
0148 allocation = max_t(size_t, size, PAGE_SIZE);
0149
0150 if (!boundary)
0151 boundary = allocation;
0152 else if ((boundary < size) || (boundary & (boundary - 1)))
0153 return NULL;
0154
0155 retval = kmalloc(sizeof(*retval), GFP_KERNEL);
0156 if (!retval)
0157 return retval;
0158
0159 strscpy(retval->name, name, sizeof(retval->name));
0160
0161 retval->dev = dev;
0162
0163 INIT_LIST_HEAD(&retval->page_list);
0164 spin_lock_init(&retval->lock);
0165 retval->size = size;
0166 retval->boundary = boundary;
0167 retval->allocation = allocation;
0168
0169 INIT_LIST_HEAD(&retval->pools);
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 mutex_lock(&pools_reg_lock);
0180 mutex_lock(&pools_lock);
0181 if (list_empty(&dev->dma_pools))
0182 empty = true;
0183 list_add(&retval->pools, &dev->dma_pools);
0184 mutex_unlock(&pools_lock);
0185 if (empty) {
0186 int err;
0187
0188 err = device_create_file(dev, &dev_attr_pools);
0189 if (err) {
0190 mutex_lock(&pools_lock);
0191 list_del(&retval->pools);
0192 mutex_unlock(&pools_lock);
0193 mutex_unlock(&pools_reg_lock);
0194 kfree(retval);
0195 return NULL;
0196 }
0197 }
0198 mutex_unlock(&pools_reg_lock);
0199 return retval;
0200 }
0201 EXPORT_SYMBOL(dma_pool_create);
0202
0203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
0204 {
0205 unsigned int offset = 0;
0206 unsigned int next_boundary = pool->boundary;
0207
0208 do {
0209 unsigned int next = offset + pool->size;
0210 if (unlikely((next + pool->size) >= next_boundary)) {
0211 next = next_boundary;
0212 next_boundary += pool->boundary;
0213 }
0214 *(int *)(page->vaddr + offset) = next;
0215 offset = next;
0216 } while (offset < pool->allocation);
0217 }
0218
0219 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
0220 {
0221 struct dma_page *page;
0222
0223 page = kmalloc(sizeof(*page), mem_flags);
0224 if (!page)
0225 return NULL;
0226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
0227 &page->dma, mem_flags);
0228 if (page->vaddr) {
0229 #ifdef DMAPOOL_DEBUG
0230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
0231 #endif
0232 pool_initialise_page(pool, page);
0233 page->in_use = 0;
0234 page->offset = 0;
0235 } else {
0236 kfree(page);
0237 page = NULL;
0238 }
0239 return page;
0240 }
0241
0242 static inline bool is_page_busy(struct dma_page *page)
0243 {
0244 return page->in_use != 0;
0245 }
0246
0247 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
0248 {
0249 dma_addr_t dma = page->dma;
0250
0251 #ifdef DMAPOOL_DEBUG
0252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
0253 #endif
0254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
0255 list_del(&page->page_list);
0256 kfree(page);
0257 }
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267 void dma_pool_destroy(struct dma_pool *pool)
0268 {
0269 struct dma_page *page, *tmp;
0270 bool empty = false;
0271
0272 if (unlikely(!pool))
0273 return;
0274
0275 mutex_lock(&pools_reg_lock);
0276 mutex_lock(&pools_lock);
0277 list_del(&pool->pools);
0278 if (pool->dev && list_empty(&pool->dev->dma_pools))
0279 empty = true;
0280 mutex_unlock(&pools_lock);
0281 if (empty)
0282 device_remove_file(pool->dev, &dev_attr_pools);
0283 mutex_unlock(&pools_reg_lock);
0284
0285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
0286 if (is_page_busy(page)) {
0287 if (pool->dev)
0288 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
0289 pool->name, page->vaddr);
0290 else
0291 pr_err("%s %s, %p busy\n", __func__,
0292 pool->name, page->vaddr);
0293
0294 list_del(&page->page_list);
0295 kfree(page);
0296 } else
0297 pool_free_page(pool, page);
0298 }
0299
0300 kfree(pool);
0301 }
0302 EXPORT_SYMBOL(dma_pool_destroy);
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
0315 dma_addr_t *handle)
0316 {
0317 unsigned long flags;
0318 struct dma_page *page;
0319 size_t offset;
0320 void *retval;
0321
0322 might_alloc(mem_flags);
0323
0324 spin_lock_irqsave(&pool->lock, flags);
0325 list_for_each_entry(page, &pool->page_list, page_list) {
0326 if (page->offset < pool->allocation)
0327 goto ready;
0328 }
0329
0330
0331 spin_unlock_irqrestore(&pool->lock, flags);
0332
0333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
0334 if (!page)
0335 return NULL;
0336
0337 spin_lock_irqsave(&pool->lock, flags);
0338
0339 list_add(&page->page_list, &pool->page_list);
0340 ready:
0341 page->in_use++;
0342 offset = page->offset;
0343 page->offset = *(int *)(page->vaddr + offset);
0344 retval = offset + page->vaddr;
0345 *handle = offset + page->dma;
0346 #ifdef DMAPOOL_DEBUG
0347 {
0348 int i;
0349 u8 *data = retval;
0350
0351 for (i = sizeof(page->offset); i < pool->size; i++) {
0352 if (data[i] == POOL_POISON_FREED)
0353 continue;
0354 if (pool->dev)
0355 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
0356 __func__, pool->name, retval);
0357 else
0358 pr_err("%s %s, %p (corrupted)\n",
0359 __func__, pool->name, retval);
0360
0361
0362
0363
0364
0365 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
0366 data, pool->size, 1);
0367 break;
0368 }
0369 }
0370 if (!(mem_flags & __GFP_ZERO))
0371 memset(retval, POOL_POISON_ALLOCATED, pool->size);
0372 #endif
0373 spin_unlock_irqrestore(&pool->lock, flags);
0374
0375 if (want_init_on_alloc(mem_flags))
0376 memset(retval, 0, pool->size);
0377
0378 return retval;
0379 }
0380 EXPORT_SYMBOL(dma_pool_alloc);
0381
0382 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
0383 {
0384 struct dma_page *page;
0385
0386 list_for_each_entry(page, &pool->page_list, page_list) {
0387 if (dma < page->dma)
0388 continue;
0389 if ((dma - page->dma) < pool->allocation)
0390 return page;
0391 }
0392 return NULL;
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
0405 {
0406 struct dma_page *page;
0407 unsigned long flags;
0408 unsigned int offset;
0409
0410 spin_lock_irqsave(&pool->lock, flags);
0411 page = pool_find_page(pool, dma);
0412 if (!page) {
0413 spin_unlock_irqrestore(&pool->lock, flags);
0414 if (pool->dev)
0415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
0416 __func__, pool->name, vaddr, &dma);
0417 else
0418 pr_err("%s %s, %p/%pad (bad dma)\n",
0419 __func__, pool->name, vaddr, &dma);
0420 return;
0421 }
0422
0423 offset = vaddr - page->vaddr;
0424 if (want_init_on_free())
0425 memset(vaddr, 0, pool->size);
0426 #ifdef DMAPOOL_DEBUG
0427 if ((dma - page->dma) != offset) {
0428 spin_unlock_irqrestore(&pool->lock, flags);
0429 if (pool->dev)
0430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
0431 __func__, pool->name, vaddr, &dma);
0432 else
0433 pr_err("%s %s, %p (bad vaddr)/%pad\n",
0434 __func__, pool->name, vaddr, &dma);
0435 return;
0436 }
0437 {
0438 unsigned int chain = page->offset;
0439 while (chain < pool->allocation) {
0440 if (chain != offset) {
0441 chain = *(int *)(page->vaddr + chain);
0442 continue;
0443 }
0444 spin_unlock_irqrestore(&pool->lock, flags);
0445 if (pool->dev)
0446 dev_err(pool->dev, "%s %s, dma %pad already free\n",
0447 __func__, pool->name, &dma);
0448 else
0449 pr_err("%s %s, dma %pad already free\n",
0450 __func__, pool->name, &dma);
0451 return;
0452 }
0453 }
0454 memset(vaddr, POOL_POISON_FREED, pool->size);
0455 #endif
0456
0457 page->in_use--;
0458 *(int *)vaddr = page->offset;
0459 page->offset = offset;
0460
0461
0462
0463
0464
0465 spin_unlock_irqrestore(&pool->lock, flags);
0466 }
0467 EXPORT_SYMBOL(dma_pool_free);
0468
0469
0470
0471
0472 static void dmam_pool_release(struct device *dev, void *res)
0473 {
0474 struct dma_pool *pool = *(struct dma_pool **)res;
0475
0476 dma_pool_destroy(pool);
0477 }
0478
0479 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
0480 {
0481 return *(struct dma_pool **)res == match_data;
0482 }
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
0499 size_t size, size_t align, size_t allocation)
0500 {
0501 struct dma_pool **ptr, *pool;
0502
0503 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
0504 if (!ptr)
0505 return NULL;
0506
0507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
0508 if (pool)
0509 devres_add(dev, ptr);
0510 else
0511 devres_free(ptr);
0512
0513 return pool;
0514 }
0515 EXPORT_SYMBOL(dmam_pool_create);
0516
0517
0518
0519
0520
0521
0522
0523 void dmam_pool_destroy(struct dma_pool *pool)
0524 {
0525 struct device *dev = pool->dev;
0526
0527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
0528 }
0529 EXPORT_SYMBOL(dmam_pool_destroy);