0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/module.h>
0035 #include <linux/dma-mapping.h>
0036 #include <linux/highmem.h>
0037 #include <linux/sched/mm.h>
0038
0039 #ifdef CONFIG_X86
0040 #include <asm/set_memory.h>
0041 #endif
0042
0043 #include <drm/ttm/ttm_pool.h>
0044 #include <drm/ttm/ttm_bo_driver.h>
0045 #include <drm/ttm/ttm_tt.h>
0046
0047 #include "ttm_module.h"
0048
0049
0050
0051
0052
0053
0054
0055 struct ttm_pool_dma {
0056 dma_addr_t addr;
0057 unsigned long vaddr;
0058 };
0059
0060 static unsigned long page_pool_size;
0061
0062 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
0063 module_param(page_pool_size, ulong, 0644);
0064
0065 static atomic_long_t allocated_pages;
0066
0067 static struct ttm_pool_type global_write_combined[MAX_ORDER];
0068 static struct ttm_pool_type global_uncached[MAX_ORDER];
0069
0070 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
0071 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
0072
0073 static spinlock_t shrinker_lock;
0074 static struct list_head shrinker_list;
0075 static struct shrinker mm_shrinker;
0076
0077
0078 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
0079 unsigned int order)
0080 {
0081 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
0082 struct ttm_pool_dma *dma;
0083 struct page *p;
0084 void *vaddr;
0085
0086
0087
0088
0089
0090 if (order)
0091 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
0092 __GFP_KSWAPD_RECLAIM;
0093
0094 if (!pool->use_dma_alloc) {
0095 p = alloc_pages(gfp_flags, order);
0096 if (p)
0097 p->private = order;
0098 return p;
0099 }
0100
0101 dma = kmalloc(sizeof(*dma), GFP_KERNEL);
0102 if (!dma)
0103 return NULL;
0104
0105 if (order)
0106 attr |= DMA_ATTR_NO_WARN;
0107
0108 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
0109 &dma->addr, gfp_flags, attr);
0110 if (!vaddr)
0111 goto error_free;
0112
0113
0114
0115
0116 if (is_vmalloc_addr(vaddr))
0117 p = vmalloc_to_page(vaddr);
0118 else
0119 p = virt_to_page(vaddr);
0120
0121 dma->vaddr = (unsigned long)vaddr | order;
0122 p->private = (unsigned long)dma;
0123 return p;
0124
0125 error_free:
0126 kfree(dma);
0127 return NULL;
0128 }
0129
0130
0131 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
0132 unsigned int order, struct page *p)
0133 {
0134 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
0135 struct ttm_pool_dma *dma;
0136 void *vaddr;
0137
0138 #ifdef CONFIG_X86
0139
0140
0141
0142 if (caching != ttm_cached && !PageHighMem(p))
0143 set_pages_wb(p, 1 << order);
0144 #endif
0145
0146 if (!pool || !pool->use_dma_alloc) {
0147 __free_pages(p, order);
0148 return;
0149 }
0150
0151 if (order)
0152 attr |= DMA_ATTR_NO_WARN;
0153
0154 dma = (void *)p->private;
0155 vaddr = (void *)(dma->vaddr & PAGE_MASK);
0156 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
0157 attr);
0158 kfree(dma);
0159 }
0160
0161
0162 static int ttm_pool_apply_caching(struct page **first, struct page **last,
0163 enum ttm_caching caching)
0164 {
0165 #ifdef CONFIG_X86
0166 unsigned int num_pages = last - first;
0167
0168 if (!num_pages)
0169 return 0;
0170
0171 switch (caching) {
0172 case ttm_cached:
0173 break;
0174 case ttm_write_combined:
0175 return set_pages_array_wc(first, num_pages);
0176 case ttm_uncached:
0177 return set_pages_array_uc(first, num_pages);
0178 }
0179 #endif
0180 return 0;
0181 }
0182
0183
0184 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
0185 struct page *p, dma_addr_t **dma_addr)
0186 {
0187 dma_addr_t addr;
0188 unsigned int i;
0189
0190 if (pool->use_dma_alloc) {
0191 struct ttm_pool_dma *dma = (void *)p->private;
0192
0193 addr = dma->addr;
0194 } else {
0195 size_t size = (1ULL << order) * PAGE_SIZE;
0196
0197 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
0198 if (dma_mapping_error(pool->dev, addr))
0199 return -EFAULT;
0200 }
0201
0202 for (i = 1 << order; i ; --i) {
0203 *(*dma_addr)++ = addr;
0204 addr += PAGE_SIZE;
0205 }
0206
0207 return 0;
0208 }
0209
0210
0211 static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
0212 unsigned int num_pages)
0213 {
0214
0215 if (pool->use_dma_alloc)
0216 return;
0217
0218 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
0219 DMA_BIDIRECTIONAL);
0220 }
0221
0222
0223 static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
0224 {
0225 unsigned int i, num_pages = 1 << pt->order;
0226
0227 for (i = 0; i < num_pages; ++i) {
0228 if (PageHighMem(p))
0229 clear_highpage(p + i);
0230 else
0231 clear_page(page_address(p + i));
0232 }
0233
0234 spin_lock(&pt->lock);
0235 list_add(&p->lru, &pt->pages);
0236 spin_unlock(&pt->lock);
0237 atomic_long_add(1 << pt->order, &allocated_pages);
0238 }
0239
0240
0241 static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
0242 {
0243 struct page *p;
0244
0245 spin_lock(&pt->lock);
0246 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
0247 if (p) {
0248 atomic_long_sub(1 << pt->order, &allocated_pages);
0249 list_del(&p->lru);
0250 }
0251 spin_unlock(&pt->lock);
0252
0253 return p;
0254 }
0255
0256
0257 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
0258 enum ttm_caching caching, unsigned int order)
0259 {
0260 pt->pool = pool;
0261 pt->caching = caching;
0262 pt->order = order;
0263 spin_lock_init(&pt->lock);
0264 INIT_LIST_HEAD(&pt->pages);
0265
0266 spin_lock(&shrinker_lock);
0267 list_add_tail(&pt->shrinker_list, &shrinker_list);
0268 spin_unlock(&shrinker_lock);
0269 }
0270
0271
0272 static void ttm_pool_type_fini(struct ttm_pool_type *pt)
0273 {
0274 struct page *p;
0275
0276 spin_lock(&shrinker_lock);
0277 list_del(&pt->shrinker_list);
0278 spin_unlock(&shrinker_lock);
0279
0280 while ((p = ttm_pool_type_take(pt)))
0281 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
0282 }
0283
0284
0285 static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
0286 enum ttm_caching caching,
0287 unsigned int order)
0288 {
0289 if (pool->use_dma_alloc)
0290 return &pool->caching[caching].orders[order];
0291
0292 #ifdef CONFIG_X86
0293 switch (caching) {
0294 case ttm_write_combined:
0295 if (pool->use_dma32)
0296 return &global_dma32_write_combined[order];
0297
0298 return &global_write_combined[order];
0299 case ttm_uncached:
0300 if (pool->use_dma32)
0301 return &global_dma32_uncached[order];
0302
0303 return &global_uncached[order];
0304 default:
0305 break;
0306 }
0307 #endif
0308
0309 return NULL;
0310 }
0311
0312
0313 static unsigned int ttm_pool_shrink(void)
0314 {
0315 struct ttm_pool_type *pt;
0316 unsigned int num_pages;
0317 struct page *p;
0318
0319 spin_lock(&shrinker_lock);
0320 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
0321 list_move_tail(&pt->shrinker_list, &shrinker_list);
0322 spin_unlock(&shrinker_lock);
0323
0324 p = ttm_pool_type_take(pt);
0325 if (p) {
0326 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
0327 num_pages = 1 << pt->order;
0328 } else {
0329 num_pages = 0;
0330 }
0331
0332 return num_pages;
0333 }
0334
0335
0336 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
0337 {
0338 if (pool->use_dma_alloc) {
0339 struct ttm_pool_dma *dma = (void *)p->private;
0340
0341 return dma->vaddr & ~PAGE_MASK;
0342 }
0343
0344 return p->private;
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
0360 struct ttm_operation_ctx *ctx)
0361 {
0362 unsigned long num_pages = tt->num_pages;
0363 dma_addr_t *dma_addr = tt->dma_address;
0364 struct page **caching = tt->pages;
0365 struct page **pages = tt->pages;
0366 gfp_t gfp_flags = GFP_USER;
0367 unsigned int i, order;
0368 struct page *p;
0369 int r;
0370
0371 WARN_ON(!num_pages || ttm_tt_is_populated(tt));
0372 WARN_ON(dma_addr && !pool->dev);
0373
0374 if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
0375 gfp_flags |= __GFP_ZERO;
0376
0377 if (ctx->gfp_retry_mayfail)
0378 gfp_flags |= __GFP_RETRY_MAYFAIL;
0379
0380 if (pool->use_dma32)
0381 gfp_flags |= GFP_DMA32;
0382 else
0383 gfp_flags |= GFP_HIGHUSER;
0384
0385 for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
0386 num_pages;
0387 order = min_t(unsigned int, order, __fls(num_pages))) {
0388 bool apply_caching = false;
0389 struct ttm_pool_type *pt;
0390
0391 pt = ttm_pool_select_type(pool, tt->caching, order);
0392 p = pt ? ttm_pool_type_take(pt) : NULL;
0393 if (p) {
0394 apply_caching = true;
0395 } else {
0396 p = ttm_pool_alloc_page(pool, gfp_flags, order);
0397 if (p && PageHighMem(p))
0398 apply_caching = true;
0399 }
0400
0401 if (!p) {
0402 if (order) {
0403 --order;
0404 continue;
0405 }
0406 r = -ENOMEM;
0407 goto error_free_all;
0408 }
0409
0410 if (apply_caching) {
0411 r = ttm_pool_apply_caching(caching, pages,
0412 tt->caching);
0413 if (r)
0414 goto error_free_page;
0415 caching = pages + (1 << order);
0416 }
0417
0418 if (dma_addr) {
0419 r = ttm_pool_map(pool, order, p, &dma_addr);
0420 if (r)
0421 goto error_free_page;
0422 }
0423
0424 num_pages -= 1 << order;
0425 for (i = 1 << order; i; --i)
0426 *(pages++) = p++;
0427 }
0428
0429 r = ttm_pool_apply_caching(caching, pages, tt->caching);
0430 if (r)
0431 goto error_free_all;
0432
0433 return 0;
0434
0435 error_free_page:
0436 ttm_pool_free_page(pool, tt->caching, order, p);
0437
0438 error_free_all:
0439 num_pages = tt->num_pages - num_pages;
0440 for (i = 0; i < num_pages; ) {
0441 order = ttm_pool_page_order(pool, tt->pages[i]);
0442 ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
0443 i += 1 << order;
0444 }
0445
0446 return r;
0447 }
0448 EXPORT_SYMBOL(ttm_pool_alloc);
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
0459 {
0460 unsigned int i;
0461
0462 for (i = 0; i < tt->num_pages; ) {
0463 struct page *p = tt->pages[i];
0464 unsigned int order, num_pages;
0465 struct ttm_pool_type *pt;
0466
0467 order = ttm_pool_page_order(pool, p);
0468 num_pages = 1ULL << order;
0469 if (tt->dma_address)
0470 ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
0471
0472 pt = ttm_pool_select_type(pool, tt->caching, order);
0473 if (pt)
0474 ttm_pool_type_give(pt, tt->pages[i]);
0475 else
0476 ttm_pool_free_page(pool, tt->caching, order,
0477 tt->pages[i]);
0478
0479 i += num_pages;
0480 }
0481
0482 while (atomic_long_read(&allocated_pages) > page_pool_size)
0483 ttm_pool_shrink();
0484 }
0485 EXPORT_SYMBOL(ttm_pool_free);
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497 void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
0498 bool use_dma_alloc, bool use_dma32)
0499 {
0500 unsigned int i, j;
0501
0502 WARN_ON(!dev && use_dma_alloc);
0503
0504 pool->dev = dev;
0505 pool->use_dma_alloc = use_dma_alloc;
0506 pool->use_dma32 = use_dma32;
0507
0508 if (use_dma_alloc) {
0509 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
0510 for (j = 0; j < MAX_ORDER; ++j)
0511 ttm_pool_type_init(&pool->caching[i].orders[j],
0512 pool, i, j);
0513 }
0514 }
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 void ttm_pool_fini(struct ttm_pool *pool)
0525 {
0526 unsigned int i, j;
0527
0528 if (pool->use_dma_alloc) {
0529 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
0530 for (j = 0; j < MAX_ORDER; ++j)
0531 ttm_pool_type_fini(&pool->caching[i].orders[j]);
0532 }
0533
0534
0535
0536
0537 synchronize_shrinkers();
0538 }
0539
0540
0541 static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
0542 struct shrink_control *sc)
0543 {
0544 unsigned long num_freed = 0;
0545
0546 do
0547 num_freed += ttm_pool_shrink();
0548 while (!num_freed && atomic_long_read(&allocated_pages));
0549
0550 return num_freed;
0551 }
0552
0553
0554 static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
0555 struct shrink_control *sc)
0556 {
0557 unsigned long num_pages = atomic_long_read(&allocated_pages);
0558
0559 return num_pages ? num_pages : SHRINK_EMPTY;
0560 }
0561
0562 #ifdef CONFIG_DEBUG_FS
0563
0564 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
0565 {
0566 unsigned int count = 0;
0567 struct page *p;
0568
0569 spin_lock(&pt->lock);
0570
0571 list_for_each_entry(p, &pt->pages, lru)
0572 ++count;
0573 spin_unlock(&pt->lock);
0574
0575 return count;
0576 }
0577
0578
0579 static void ttm_pool_debugfs_header(struct seq_file *m)
0580 {
0581 unsigned int i;
0582
0583 seq_puts(m, "\t ");
0584 for (i = 0; i < MAX_ORDER; ++i)
0585 seq_printf(m, " ---%2u---", i);
0586 seq_puts(m, "\n");
0587 }
0588
0589
0590 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
0591 struct seq_file *m)
0592 {
0593 unsigned int i;
0594
0595 for (i = 0; i < MAX_ORDER; ++i)
0596 seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
0597 seq_puts(m, "\n");
0598 }
0599
0600
0601 static void ttm_pool_debugfs_footer(struct seq_file *m)
0602 {
0603 seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
0604 atomic_long_read(&allocated_pages), page_pool_size);
0605 }
0606
0607
0608 static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
0609 {
0610 ttm_pool_debugfs_header(m);
0611
0612 spin_lock(&shrinker_lock);
0613 seq_puts(m, "wc\t:");
0614 ttm_pool_debugfs_orders(global_write_combined, m);
0615 seq_puts(m, "uc\t:");
0616 ttm_pool_debugfs_orders(global_uncached, m);
0617 seq_puts(m, "wc 32\t:");
0618 ttm_pool_debugfs_orders(global_dma32_write_combined, m);
0619 seq_puts(m, "uc 32\t:");
0620 ttm_pool_debugfs_orders(global_dma32_uncached, m);
0621 spin_unlock(&shrinker_lock);
0622
0623 ttm_pool_debugfs_footer(m);
0624
0625 return 0;
0626 }
0627 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
0638 {
0639 unsigned int i;
0640
0641 if (!pool->use_dma_alloc) {
0642 seq_puts(m, "unused\n");
0643 return 0;
0644 }
0645
0646 ttm_pool_debugfs_header(m);
0647
0648 spin_lock(&shrinker_lock);
0649 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
0650 seq_puts(m, "DMA ");
0651 switch (i) {
0652 case ttm_cached:
0653 seq_puts(m, "\t:");
0654 break;
0655 case ttm_write_combined:
0656 seq_puts(m, "wc\t:");
0657 break;
0658 case ttm_uncached:
0659 seq_puts(m, "uc\t:");
0660 break;
0661 }
0662 ttm_pool_debugfs_orders(pool->caching[i].orders, m);
0663 }
0664 spin_unlock(&shrinker_lock);
0665
0666 ttm_pool_debugfs_footer(m);
0667 return 0;
0668 }
0669 EXPORT_SYMBOL(ttm_pool_debugfs);
0670
0671
0672 static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
0673 {
0674 struct shrink_control sc = { .gfp_mask = GFP_NOFS };
0675
0676 fs_reclaim_acquire(GFP_KERNEL);
0677 seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
0678 ttm_pool_shrinker_scan(&mm_shrinker, &sc));
0679 fs_reclaim_release(GFP_KERNEL);
0680
0681 return 0;
0682 }
0683 DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
0684
0685 #endif
0686
0687
0688
0689
0690
0691
0692
0693
0694 int ttm_pool_mgr_init(unsigned long num_pages)
0695 {
0696 unsigned int i;
0697
0698 if (!page_pool_size)
0699 page_pool_size = num_pages;
0700
0701 spin_lock_init(&shrinker_lock);
0702 INIT_LIST_HEAD(&shrinker_list);
0703
0704 for (i = 0; i < MAX_ORDER; ++i) {
0705 ttm_pool_type_init(&global_write_combined[i], NULL,
0706 ttm_write_combined, i);
0707 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
0708
0709 ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
0710 ttm_write_combined, i);
0711 ttm_pool_type_init(&global_dma32_uncached[i], NULL,
0712 ttm_uncached, i);
0713 }
0714
0715 #ifdef CONFIG_DEBUG_FS
0716 debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
0717 &ttm_pool_debugfs_globals_fops);
0718 debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
0719 &ttm_pool_debugfs_shrink_fops);
0720 #endif
0721
0722 mm_shrinker.count_objects = ttm_pool_shrinker_count;
0723 mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
0724 mm_shrinker.seeks = 1;
0725 return register_shrinker(&mm_shrinker, "drm-ttm_pool");
0726 }
0727
0728
0729
0730
0731
0732
0733 void ttm_pool_mgr_fini(void)
0734 {
0735 unsigned int i;
0736
0737 for (i = 0; i < MAX_ORDER; ++i) {
0738 ttm_pool_type_fini(&global_write_combined[i]);
0739 ttm_pool_type_fini(&global_uncached[i]);
0740
0741 ttm_pool_type_fini(&global_dma32_write_combined[i]);
0742 ttm_pool_type_fini(&global_dma32_uncached[i]);
0743 }
0744
0745 unregister_shrinker(&mm_shrinker);
0746 WARN_ON(!list_empty(&shrinker_list));
0747 }