0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/dma-mapping.h>
0026 #include <drm/ttm/ttm_range_manager.h>
0027
0028 #include "amdgpu.h"
0029 #include "amdgpu_vm.h"
0030 #include "amdgpu_res_cursor.h"
0031 #include "amdgpu_atomfirmware.h"
0032 #include "atom.h"
0033
0034 struct amdgpu_vram_reservation {
0035 u64 start;
0036 u64 size;
0037 struct list_head allocated;
0038 struct list_head blocks;
0039 };
0040
0041 static inline struct amdgpu_vram_mgr *
0042 to_vram_mgr(struct ttm_resource_manager *man)
0043 {
0044 return container_of(man, struct amdgpu_vram_mgr, manager);
0045 }
0046
0047 static inline struct amdgpu_device *
0048 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
0049 {
0050 return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
0051 }
0052
0053 static inline struct drm_buddy_block *
0054 amdgpu_vram_mgr_first_block(struct list_head *list)
0055 {
0056 return list_first_entry_or_null(list, struct drm_buddy_block, link);
0057 }
0058
0059 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
0060 {
0061 struct drm_buddy_block *block;
0062 u64 start, size;
0063
0064 block = amdgpu_vram_mgr_first_block(head);
0065 if (!block)
0066 return false;
0067
0068 while (head != block->link.next) {
0069 start = amdgpu_vram_mgr_block_start(block);
0070 size = amdgpu_vram_mgr_block_size(block);
0071
0072 block = list_entry(block->link.next, struct drm_buddy_block, link);
0073 if (start + size != amdgpu_vram_mgr_block_start(block))
0074 return false;
0075 }
0076
0077 return true;
0078 }
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
0091 struct device_attribute *attr, char *buf)
0092 {
0093 struct drm_device *ddev = dev_get_drvdata(dev);
0094 struct amdgpu_device *adev = drm_to_adev(ddev);
0095
0096 return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
0108 struct device_attribute *attr, char *buf)
0109 {
0110 struct drm_device *ddev = dev_get_drvdata(dev);
0111 struct amdgpu_device *adev = drm_to_adev(ddev);
0112
0113 return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
0125 struct device_attribute *attr,
0126 char *buf)
0127 {
0128 struct drm_device *ddev = dev_get_drvdata(dev);
0129 struct amdgpu_device *adev = drm_to_adev(ddev);
0130 struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
0131
0132 return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
0133 }
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143 static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
0144 struct device_attribute *attr,
0145 char *buf)
0146 {
0147 struct drm_device *ddev = dev_get_drvdata(dev);
0148 struct amdgpu_device *adev = drm_to_adev(ddev);
0149
0150 return sysfs_emit(buf, "%llu\n",
0151 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
0152 }
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
0163 struct device_attribute *attr,
0164 char *buf)
0165 {
0166 struct drm_device *ddev = dev_get_drvdata(dev);
0167 struct amdgpu_device *adev = drm_to_adev(ddev);
0168
0169 switch (adev->gmc.vram_vendor) {
0170 case SAMSUNG:
0171 return sysfs_emit(buf, "samsung\n");
0172 case INFINEON:
0173 return sysfs_emit(buf, "infineon\n");
0174 case ELPIDA:
0175 return sysfs_emit(buf, "elpida\n");
0176 case ETRON:
0177 return sysfs_emit(buf, "etron\n");
0178 case NANYA:
0179 return sysfs_emit(buf, "nanya\n");
0180 case HYNIX:
0181 return sysfs_emit(buf, "hynix\n");
0182 case MOSEL:
0183 return sysfs_emit(buf, "mosel\n");
0184 case WINBOND:
0185 return sysfs_emit(buf, "winbond\n");
0186 case ESMT:
0187 return sysfs_emit(buf, "esmt\n");
0188 case MICRON:
0189 return sysfs_emit(buf, "micron\n");
0190 default:
0191 return sysfs_emit(buf, "unknown\n");
0192 }
0193 }
0194
0195 static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
0196 amdgpu_mem_info_vram_total_show, NULL);
0197 static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
0198 amdgpu_mem_info_vis_vram_total_show,NULL);
0199 static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
0200 amdgpu_mem_info_vram_used_show, NULL);
0201 static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
0202 amdgpu_mem_info_vis_vram_used_show, NULL);
0203 static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
0204 amdgpu_mem_info_vram_vendor, NULL);
0205
0206 static struct attribute *amdgpu_vram_mgr_attributes[] = {
0207 &dev_attr_mem_info_vram_total.attr,
0208 &dev_attr_mem_info_vis_vram_total.attr,
0209 &dev_attr_mem_info_vram_used.attr,
0210 &dev_attr_mem_info_vis_vram_used.attr,
0211 &dev_attr_mem_info_vram_vendor.attr,
0212 NULL
0213 };
0214
0215 const struct attribute_group amdgpu_vram_mgr_attr_group = {
0216 .attrs = amdgpu_vram_mgr_attributes
0217 };
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
0228 struct drm_buddy_block *block)
0229 {
0230 u64 start = amdgpu_vram_mgr_block_start(block);
0231 u64 end = start + amdgpu_vram_mgr_block_size(block);
0232
0233 if (start >= adev->gmc.visible_vram_size)
0234 return 0;
0235
0236 return (end > adev->gmc.visible_vram_size ?
0237 adev->gmc.visible_vram_size : end) - start;
0238 }
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
0249 {
0250 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
0251 struct ttm_resource *res = bo->tbo.resource;
0252 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
0253 struct drm_buddy_block *block;
0254 u64 usage = 0;
0255
0256 if (amdgpu_gmc_vram_full_visible(&adev->gmc))
0257 return amdgpu_bo_size(bo);
0258
0259 if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
0260 return 0;
0261
0262 list_for_each_entry(block, &vres->blocks, link)
0263 usage += amdgpu_vram_mgr_vis_size(adev, block);
0264
0265 return usage;
0266 }
0267
0268
0269 static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
0270 {
0271 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
0272 struct amdgpu_device *adev = to_amdgpu_device(mgr);
0273 struct drm_buddy *mm = &mgr->mm;
0274 struct amdgpu_vram_reservation *rsv, *temp;
0275 struct drm_buddy_block *block;
0276 uint64_t vis_usage;
0277
0278 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
0279 if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
0280 rsv->size, mm->chunk_size, &rsv->allocated,
0281 DRM_BUDDY_RANGE_ALLOCATION))
0282 continue;
0283
0284 block = amdgpu_vram_mgr_first_block(&rsv->allocated);
0285 if (!block)
0286 continue;
0287
0288 dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
0289 rsv->start, rsv->size);
0290
0291 vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
0292 atomic64_add(vis_usage, &mgr->vis_usage);
0293 spin_lock(&man->bdev->lru_lock);
0294 man->usage += rsv->size;
0295 spin_unlock(&man->bdev->lru_lock);
0296 list_move(&rsv->blocks, &mgr->reserved_pages);
0297 }
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
0310 uint64_t start, uint64_t size)
0311 {
0312 struct amdgpu_vram_reservation *rsv;
0313
0314 rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
0315 if (!rsv)
0316 return -ENOMEM;
0317
0318 INIT_LIST_HEAD(&rsv->allocated);
0319 INIT_LIST_HEAD(&rsv->blocks);
0320
0321 rsv->start = start;
0322 rsv->size = size;
0323
0324 mutex_lock(&mgr->lock);
0325 list_add_tail(&rsv->blocks, &mgr->reservations_pending);
0326 amdgpu_vram_mgr_do_reserve(&mgr->manager);
0327 mutex_unlock(&mgr->lock);
0328
0329 return 0;
0330 }
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
0344 uint64_t start)
0345 {
0346 struct amdgpu_vram_reservation *rsv;
0347 int ret;
0348
0349 mutex_lock(&mgr->lock);
0350
0351 list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
0352 if (rsv->start <= start &&
0353 (start < (rsv->start + rsv->size))) {
0354 ret = -EBUSY;
0355 goto out;
0356 }
0357 }
0358
0359 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
0360 if (rsv->start <= start &&
0361 (start < (rsv->start + rsv->size))) {
0362 ret = 0;
0363 goto out;
0364 }
0365 }
0366
0367 ret = -ENOENT;
0368 out:
0369 mutex_unlock(&mgr->lock);
0370 return ret;
0371 }
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
0384 struct ttm_buffer_object *tbo,
0385 const struct ttm_place *place,
0386 struct ttm_resource **res)
0387 {
0388 u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
0389 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
0390 struct amdgpu_device *adev = to_amdgpu_device(mgr);
0391 struct amdgpu_vram_mgr_resource *vres;
0392 u64 size, remaining_size, lpfn, fpfn;
0393 struct drm_buddy *mm = &mgr->mm;
0394 struct drm_buddy_block *block;
0395 unsigned long pages_per_block;
0396 int r;
0397
0398 lpfn = (u64)place->lpfn << PAGE_SHIFT;
0399 if (!lpfn)
0400 lpfn = man->size;
0401
0402 fpfn = (u64)place->fpfn << PAGE_SHIFT;
0403
0404 max_bytes = adev->gmc.mc_vram_size;
0405 if (tbo->type != ttm_bo_type_kernel)
0406 max_bytes -= AMDGPU_VM_RESERVED_VRAM;
0407
0408 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
0409 pages_per_block = ~0ul;
0410 } else {
0411 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
0412 pages_per_block = HPAGE_PMD_NR;
0413 #else
0414
0415 pages_per_block = 2UL << (20UL - PAGE_SHIFT);
0416 #endif
0417 pages_per_block = max_t(uint32_t, pages_per_block,
0418 tbo->page_alignment);
0419 }
0420
0421 vres = kzalloc(sizeof(*vres), GFP_KERNEL);
0422 if (!vres)
0423 return -ENOMEM;
0424
0425 ttm_resource_init(tbo, place, &vres->base);
0426
0427
0428 if (ttm_resource_manager_usage(man) > max_bytes) {
0429 r = -ENOSPC;
0430 goto error_fini;
0431 }
0432
0433 INIT_LIST_HEAD(&vres->blocks);
0434
0435 if (place->flags & TTM_PL_FLAG_TOPDOWN)
0436 vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
0437
0438 if (fpfn || lpfn != man->size)
0439
0440 vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
0441
0442 remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
0443
0444 mutex_lock(&mgr->lock);
0445 while (remaining_size) {
0446 if (tbo->page_alignment)
0447 min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
0448 else
0449 min_block_size = mgr->default_page_size;
0450
0451 BUG_ON(min_block_size < mm->chunk_size);
0452
0453
0454 size = min(remaining_size, 2ULL << 30);
0455
0456 if (size >= (u64)pages_per_block << PAGE_SHIFT)
0457 min_block_size = (u64)pages_per_block << PAGE_SHIFT;
0458
0459 cur_size = size;
0460
0461 if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
0462
0463
0464
0465
0466 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
0467 size = roundup_pow_of_two(size);
0468 min_block_size = size;
0469
0470
0471
0472
0473 } else if (!IS_ALIGNED(size, min_block_size)) {
0474 size = round_up(size, min_block_size);
0475 }
0476 }
0477
0478 r = drm_buddy_alloc_blocks(mm, fpfn,
0479 lpfn,
0480 size,
0481 min_block_size,
0482 &vres->blocks,
0483 vres->flags);
0484 if (unlikely(r))
0485 goto error_free_blocks;
0486
0487 if (size > remaining_size)
0488 remaining_size = 0;
0489 else
0490 remaining_size -= size;
0491 }
0492 mutex_unlock(&mgr->lock);
0493
0494 if (cur_size != size) {
0495 struct drm_buddy_block *block;
0496 struct list_head *trim_list;
0497 u64 original_size;
0498 LIST_HEAD(temp);
0499
0500 trim_list = &vres->blocks;
0501 original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
0502
0503
0504
0505
0506
0507 if (!list_is_singular(&vres->blocks)) {
0508 block = list_last_entry(&vres->blocks, typeof(*block), link);
0509 list_move_tail(&block->link, &temp);
0510 trim_list = &temp;
0511
0512
0513
0514
0515 original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
0516 }
0517
0518 mutex_lock(&mgr->lock);
0519 drm_buddy_block_trim(mm,
0520 original_size,
0521 trim_list);
0522 mutex_unlock(&mgr->lock);
0523
0524 if (!list_empty(&temp))
0525 list_splice_tail(trim_list, &vres->blocks);
0526 }
0527
0528 vres->base.start = 0;
0529 list_for_each_entry(block, &vres->blocks, link) {
0530 unsigned long start;
0531
0532 start = amdgpu_vram_mgr_block_start(block) +
0533 amdgpu_vram_mgr_block_size(block);
0534 start >>= PAGE_SHIFT;
0535
0536 if (start > vres->base.num_pages)
0537 start -= vres->base.num_pages;
0538 else
0539 start = 0;
0540 vres->base.start = max(vres->base.start, start);
0541
0542 vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
0543 }
0544
0545 if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
0546 vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
0547
0548 if (adev->gmc.xgmi.connected_to_cpu)
0549 vres->base.bus.caching = ttm_cached;
0550 else
0551 vres->base.bus.caching = ttm_write_combined;
0552
0553 atomic64_add(vis_usage, &mgr->vis_usage);
0554 *res = &vres->base;
0555 return 0;
0556
0557 error_free_blocks:
0558 drm_buddy_free_list(mm, &vres->blocks);
0559 mutex_unlock(&mgr->lock);
0560 error_fini:
0561 ttm_resource_fini(man, &vres->base);
0562 kfree(vres);
0563
0564 return r;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
0576 struct ttm_resource *res)
0577 {
0578 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
0579 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
0580 struct amdgpu_device *adev = to_amdgpu_device(mgr);
0581 struct drm_buddy *mm = &mgr->mm;
0582 struct drm_buddy_block *block;
0583 uint64_t vis_usage = 0;
0584
0585 mutex_lock(&mgr->lock);
0586 list_for_each_entry(block, &vres->blocks, link)
0587 vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
0588
0589 amdgpu_vram_mgr_do_reserve(man);
0590
0591 drm_buddy_free_list(mm, &vres->blocks);
0592 mutex_unlock(&mgr->lock);
0593
0594 atomic64_sub(vis_usage, &mgr->vis_usage);
0595
0596 ttm_resource_fini(man, res);
0597 kfree(vres);
0598 }
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
0614 struct ttm_resource *res,
0615 u64 offset, u64 length,
0616 struct device *dev,
0617 enum dma_data_direction dir,
0618 struct sg_table **sgt)
0619 {
0620 struct amdgpu_res_cursor cursor;
0621 struct scatterlist *sg;
0622 int num_entries = 0;
0623 int i, r;
0624
0625 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
0626 if (!*sgt)
0627 return -ENOMEM;
0628
0629
0630 amdgpu_res_first(res, offset, length, &cursor);
0631 while (cursor.remaining) {
0632 num_entries++;
0633 amdgpu_res_next(&cursor, cursor.size);
0634 }
0635
0636 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
0637 if (r)
0638 goto error_free;
0639
0640
0641 for_each_sgtable_sg((*sgt), sg, i)
0642 sg->length = 0;
0643
0644
0645
0646
0647
0648
0649
0650 amdgpu_res_first(res, offset, length, &cursor);
0651 for_each_sgtable_sg((*sgt), sg, i) {
0652 phys_addr_t phys = cursor.start + adev->gmc.aper_base;
0653 size_t size = cursor.size;
0654 dma_addr_t addr;
0655
0656 addr = dma_map_resource(dev, phys, size, dir,
0657 DMA_ATTR_SKIP_CPU_SYNC);
0658 r = dma_mapping_error(dev, addr);
0659 if (r)
0660 goto error_unmap;
0661
0662 sg_set_page(sg, NULL, size, 0);
0663 sg_dma_address(sg) = addr;
0664 sg_dma_len(sg) = size;
0665
0666 amdgpu_res_next(&cursor, cursor.size);
0667 }
0668
0669 return 0;
0670
0671 error_unmap:
0672 for_each_sgtable_sg((*sgt), sg, i) {
0673 if (!sg->length)
0674 continue;
0675
0676 dma_unmap_resource(dev, sg->dma_address,
0677 sg->length, dir,
0678 DMA_ATTR_SKIP_CPU_SYNC);
0679 }
0680 sg_free_table(*sgt);
0681
0682 error_free:
0683 kfree(*sgt);
0684 return r;
0685 }
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696 void amdgpu_vram_mgr_free_sgt(struct device *dev,
0697 enum dma_data_direction dir,
0698 struct sg_table *sgt)
0699 {
0700 struct scatterlist *sg;
0701 int i;
0702
0703 for_each_sgtable_sg(sgt, sg, i)
0704 dma_unmap_resource(dev, sg->dma_address,
0705 sg->length, dir,
0706 DMA_ATTR_SKIP_CPU_SYNC);
0707 sg_free_table(sgt);
0708 kfree(sgt);
0709 }
0710
0711
0712
0713
0714
0715
0716
0717
0718 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
0719 {
0720 return atomic64_read(&mgr->vis_usage);
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731 static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
0732 struct drm_printer *printer)
0733 {
0734 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
0735 struct drm_buddy *mm = &mgr->mm;
0736 struct drm_buddy_block *block;
0737
0738 drm_printf(printer, " vis usage:%llu\n",
0739 amdgpu_vram_mgr_vis_usage(mgr));
0740
0741 mutex_lock(&mgr->lock);
0742 drm_printf(printer, "default_page_size: %lluKiB\n",
0743 mgr->default_page_size >> 10);
0744
0745 drm_buddy_print(mm, printer);
0746
0747 drm_printf(printer, "reserved:\n");
0748 list_for_each_entry(block, &mgr->reserved_pages, link)
0749 drm_buddy_block_print(mm, block, printer);
0750 mutex_unlock(&mgr->lock);
0751 }
0752
0753 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
0754 .alloc = amdgpu_vram_mgr_new,
0755 .free = amdgpu_vram_mgr_del,
0756 .debug = amdgpu_vram_mgr_debug
0757 };
0758
0759
0760
0761
0762
0763
0764
0765
0766 int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
0767 {
0768 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
0769 struct ttm_resource_manager *man = &mgr->manager;
0770 int err;
0771
0772 ttm_resource_manager_init(man, &adev->mman.bdev,
0773 adev->gmc.real_vram_size);
0774
0775 man->func = &amdgpu_vram_mgr_func;
0776
0777 err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
0778 if (err)
0779 return err;
0780
0781 mutex_init(&mgr->lock);
0782 INIT_LIST_HEAD(&mgr->reservations_pending);
0783 INIT_LIST_HEAD(&mgr->reserved_pages);
0784 mgr->default_page_size = PAGE_SIZE;
0785
0786 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
0787 ttm_resource_manager_set_used(man, true);
0788 return 0;
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
0800 {
0801 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
0802 struct ttm_resource_manager *man = &mgr->manager;
0803 int ret;
0804 struct amdgpu_vram_reservation *rsv, *temp;
0805
0806 ttm_resource_manager_set_used(man, false);
0807
0808 ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
0809 if (ret)
0810 return;
0811
0812 mutex_lock(&mgr->lock);
0813 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
0814 kfree(rsv);
0815
0816 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
0817 drm_buddy_free_list(&mgr->mm, &rsv->blocks);
0818 kfree(rsv);
0819 }
0820 drm_buddy_fini(&mgr->mm);
0821 mutex_unlock(&mgr->lock);
0822
0823 ttm_resource_manager_cleanup(man);
0824 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
0825 }