0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) "[TTM] " fmt
0033
0034 #include <drm/ttm/ttm_bo_driver.h>
0035 #include <drm/ttm/ttm_placement.h>
0036 #include <linux/jiffies.h>
0037 #include <linux/slab.h>
0038 #include <linux/sched.h>
0039 #include <linux/mm.h>
0040 #include <linux/file.h>
0041 #include <linux/module.h>
0042 #include <linux/atomic.h>
0043 #include <linux/dma-resv.h>
0044
0045 #include "ttm_module.h"
0046
0047 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
0048 struct ttm_placement *placement)
0049 {
0050 struct drm_printer p = drm_debug_printer(TTM_PFX);
0051 struct ttm_resource_manager *man;
0052 int i, mem_type;
0053
0054 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
0055 bo, bo->resource->num_pages, bo->base.size >> 10,
0056 bo->base.size >> 20);
0057 for (i = 0; i < placement->num_placement; i++) {
0058 mem_type = placement->placement[i].mem_type;
0059 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
0060 i, placement->placement[i].flags, mem_type);
0061 man = ttm_manager_type(bo->bdev, mem_type);
0062 ttm_resource_manager_debug(man, &p);
0063 }
0064 }
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
0076 {
0077 dma_resv_assert_held(bo->base.resv);
0078
0079 if (bo->resource)
0080 ttm_resource_move_to_lru_tail(bo->resource);
0081 }
0082 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
0098 struct ttm_lru_bulk_move *bulk)
0099 {
0100 dma_resv_assert_held(bo->base.resv);
0101
0102 if (bo->bulk_move == bulk)
0103 return;
0104
0105 spin_lock(&bo->bdev->lru_lock);
0106 if (bo->resource)
0107 ttm_resource_del_bulk_move(bo->resource, bo);
0108 bo->bulk_move = bulk;
0109 if (bo->resource)
0110 ttm_resource_add_bulk_move(bo->resource, bo);
0111 spin_unlock(&bo->bdev->lru_lock);
0112 }
0113 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
0114
0115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
0116 struct ttm_resource *mem, bool evict,
0117 struct ttm_operation_ctx *ctx,
0118 struct ttm_place *hop)
0119 {
0120 struct ttm_resource_manager *old_man, *new_man;
0121 struct ttm_device *bdev = bo->bdev;
0122 int ret;
0123
0124 old_man = ttm_manager_type(bdev, bo->resource->mem_type);
0125 new_man = ttm_manager_type(bdev, mem->mem_type);
0126
0127 ttm_bo_unmap_virtual(bo);
0128
0129
0130
0131
0132
0133 if (new_man->use_tt) {
0134
0135
0136
0137 ret = ttm_tt_create(bo, old_man->use_tt);
0138 if (ret)
0139 goto out_err;
0140
0141 if (mem->mem_type != TTM_PL_SYSTEM) {
0142 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
0143 if (ret)
0144 goto out_err;
0145 }
0146 }
0147
0148 ret = dma_resv_reserve_fences(bo->base.resv, 1);
0149 if (ret)
0150 goto out_err;
0151
0152 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
0153 if (ret) {
0154 if (ret == -EMULTIHOP)
0155 return ret;
0156 goto out_err;
0157 }
0158
0159 ctx->bytes_moved += bo->base.size;
0160 return 0;
0161
0162 out_err:
0163 new_man = ttm_manager_type(bdev, bo->resource->mem_type);
0164 if (!new_man->use_tt)
0165 ttm_bo_tt_destroy(bo);
0166
0167 return ret;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
0179 {
0180 if (bo->bdev->funcs->delete_mem_notify)
0181 bo->bdev->funcs->delete_mem_notify(bo);
0182
0183 ttm_bo_tt_destroy(bo);
0184 ttm_resource_free(bo, &bo->resource);
0185 }
0186
0187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
0188 {
0189 int r;
0190
0191 if (bo->base.resv == &bo->base._resv)
0192 return 0;
0193
0194 BUG_ON(!dma_resv_trylock(&bo->base._resv));
0195
0196 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
0197 dma_resv_unlock(&bo->base._resv);
0198 if (r)
0199 return r;
0200
0201 if (bo->type != ttm_bo_type_sg) {
0202
0203
0204
0205
0206 spin_lock(&bo->bdev->lru_lock);
0207 bo->base.resv = &bo->base._resv;
0208 spin_unlock(&bo->bdev->lru_lock);
0209 }
0210
0211 return r;
0212 }
0213
0214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
0215 {
0216 struct dma_resv *resv = &bo->base._resv;
0217 struct dma_resv_iter cursor;
0218 struct dma_fence *fence;
0219
0220 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
0221 dma_resv_for_each_fence_unlocked(&cursor, fence) {
0222 if (!fence->ops->signaled)
0223 dma_fence_enable_sw_signaling(fence);
0224 }
0225 dma_resv_iter_end(&cursor);
0226 }
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
0243 bool interruptible, bool no_wait_gpu,
0244 bool unlock_resv)
0245 {
0246 struct dma_resv *resv = &bo->base._resv;
0247 int ret;
0248
0249 if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
0250 ret = 0;
0251 else
0252 ret = -EBUSY;
0253
0254 if (ret && !no_wait_gpu) {
0255 long lret;
0256
0257 if (unlock_resv)
0258 dma_resv_unlock(bo->base.resv);
0259 spin_unlock(&bo->bdev->lru_lock);
0260
0261 lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
0262 interruptible,
0263 30 * HZ);
0264
0265 if (lret < 0)
0266 return lret;
0267 else if (lret == 0)
0268 return -EBUSY;
0269
0270 spin_lock(&bo->bdev->lru_lock);
0271 if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
0272
0273
0274
0275
0276
0277
0278
0279
0280 spin_unlock(&bo->bdev->lru_lock);
0281 return 0;
0282 }
0283 ret = 0;
0284 }
0285
0286 if (ret || unlikely(list_empty(&bo->ddestroy))) {
0287 if (unlock_resv)
0288 dma_resv_unlock(bo->base.resv);
0289 spin_unlock(&bo->bdev->lru_lock);
0290 return ret;
0291 }
0292
0293 list_del_init(&bo->ddestroy);
0294 spin_unlock(&bo->bdev->lru_lock);
0295 ttm_bo_cleanup_memtype_use(bo);
0296
0297 if (unlock_resv)
0298 dma_resv_unlock(bo->base.resv);
0299
0300 ttm_bo_put(bo);
0301
0302 return 0;
0303 }
0304
0305
0306
0307
0308
0309 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
0310 {
0311 struct list_head removed;
0312 bool empty;
0313
0314 INIT_LIST_HEAD(&removed);
0315
0316 spin_lock(&bdev->lru_lock);
0317 while (!list_empty(&bdev->ddestroy)) {
0318 struct ttm_buffer_object *bo;
0319
0320 bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
0321 ddestroy);
0322 list_move_tail(&bo->ddestroy, &removed);
0323 if (!ttm_bo_get_unless_zero(bo))
0324 continue;
0325
0326 if (remove_all || bo->base.resv != &bo->base._resv) {
0327 spin_unlock(&bdev->lru_lock);
0328 dma_resv_lock(bo->base.resv, NULL);
0329
0330 spin_lock(&bdev->lru_lock);
0331 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
0332
0333 } else if (dma_resv_trylock(bo->base.resv)) {
0334 ttm_bo_cleanup_refs(bo, false, !remove_all, true);
0335 } else {
0336 spin_unlock(&bdev->lru_lock);
0337 }
0338
0339 ttm_bo_put(bo);
0340 spin_lock(&bdev->lru_lock);
0341 }
0342 list_splice_tail(&removed, &bdev->ddestroy);
0343 empty = list_empty(&bdev->ddestroy);
0344 spin_unlock(&bdev->lru_lock);
0345
0346 return empty;
0347 }
0348
0349 static void ttm_bo_release(struct kref *kref)
0350 {
0351 struct ttm_buffer_object *bo =
0352 container_of(kref, struct ttm_buffer_object, kref);
0353 struct ttm_device *bdev = bo->bdev;
0354 int ret;
0355
0356 WARN_ON_ONCE(bo->pin_count);
0357 WARN_ON_ONCE(bo->bulk_move);
0358
0359 if (!bo->deleted) {
0360 ret = ttm_bo_individualize_resv(bo);
0361 if (ret) {
0362
0363
0364
0365 dma_resv_wait_timeout(bo->base.resv,
0366 DMA_RESV_USAGE_BOOKKEEP, false,
0367 30 * HZ);
0368 }
0369
0370 if (bo->bdev->funcs->release_notify)
0371 bo->bdev->funcs->release_notify(bo);
0372
0373 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
0374 ttm_mem_io_free(bdev, bo->resource);
0375 }
0376
0377 if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
0378 !dma_resv_trylock(bo->base.resv)) {
0379
0380 ttm_bo_flush_all_fences(bo);
0381 bo->deleted = true;
0382
0383 spin_lock(&bo->bdev->lru_lock);
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393 if (bo->pin_count) {
0394 bo->pin_count = 0;
0395 ttm_resource_move_to_lru_tail(bo->resource);
0396 }
0397
0398 kref_init(&bo->kref);
0399 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
0400 spin_unlock(&bo->bdev->lru_lock);
0401
0402 schedule_delayed_work(&bdev->wq,
0403 ((HZ / 100) < 1) ? 1 : HZ / 100);
0404 return;
0405 }
0406
0407 spin_lock(&bo->bdev->lru_lock);
0408 list_del(&bo->ddestroy);
0409 spin_unlock(&bo->bdev->lru_lock);
0410
0411 ttm_bo_cleanup_memtype_use(bo);
0412 dma_resv_unlock(bo->base.resv);
0413
0414 atomic_dec(&ttm_glob.bo_count);
0415 bo->destroy(bo);
0416 }
0417
0418 void ttm_bo_put(struct ttm_buffer_object *bo)
0419 {
0420 kref_put(&bo->kref, ttm_bo_release);
0421 }
0422 EXPORT_SYMBOL(ttm_bo_put);
0423
0424 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
0425 {
0426 return cancel_delayed_work_sync(&bdev->wq);
0427 }
0428 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
0429
0430 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
0431 {
0432 if (resched)
0433 schedule_delayed_work(&bdev->wq,
0434 ((HZ / 100) < 1) ? 1 : HZ / 100);
0435 }
0436 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
0437
0438 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
0439 struct ttm_resource **mem,
0440 struct ttm_operation_ctx *ctx,
0441 struct ttm_place *hop)
0442 {
0443 struct ttm_placement hop_placement;
0444 struct ttm_resource *hop_mem;
0445 int ret;
0446
0447 hop_placement.num_placement = hop_placement.num_busy_placement = 1;
0448 hop_placement.placement = hop_placement.busy_placement = hop;
0449
0450
0451 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
0452 if (ret)
0453 return ret;
0454
0455 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
0456 if (ret) {
0457 ttm_resource_free(bo, &hop_mem);
0458 return ret;
0459 }
0460 return 0;
0461 }
0462
0463 static int ttm_bo_evict(struct ttm_buffer_object *bo,
0464 struct ttm_operation_ctx *ctx)
0465 {
0466 struct ttm_device *bdev = bo->bdev;
0467 struct ttm_resource *evict_mem;
0468 struct ttm_placement placement;
0469 struct ttm_place hop;
0470 int ret = 0;
0471
0472 memset(&hop, 0, sizeof(hop));
0473
0474 dma_resv_assert_held(bo->base.resv);
0475
0476 placement.num_placement = 0;
0477 placement.num_busy_placement = 0;
0478 bdev->funcs->evict_flags(bo, &placement);
0479
0480 if (!placement.num_placement && !placement.num_busy_placement) {
0481 ret = ttm_bo_wait(bo, true, false);
0482 if (ret)
0483 return ret;
0484
0485
0486
0487
0488
0489 return ttm_bo_pipeline_gutting(bo);
0490 }
0491
0492 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
0493 if (ret) {
0494 if (ret != -ERESTARTSYS) {
0495 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
0496 bo);
0497 ttm_bo_mem_space_debug(bo, &placement);
0498 }
0499 goto out;
0500 }
0501
0502 bounce:
0503 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
0504 if (ret == -EMULTIHOP) {
0505 ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
0506 if (ret) {
0507 pr_err("Buffer eviction failed\n");
0508 ttm_resource_free(bo, &evict_mem);
0509 goto out;
0510 }
0511
0512 goto bounce;
0513 }
0514 out:
0515 return ret;
0516 }
0517
0518 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
0519 const struct ttm_place *place)
0520 {
0521 dma_resv_assert_held(bo->base.resv);
0522 if (bo->resource->mem_type == TTM_PL_SYSTEM)
0523 return true;
0524
0525
0526
0527
0528 if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
0529 (place->lpfn && place->lpfn <= bo->resource->start))
0530 return false;
0531
0532 return true;
0533 }
0534 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
0547 struct ttm_operation_ctx *ctx,
0548 const struct ttm_place *place,
0549 bool *locked, bool *busy)
0550 {
0551 bool ret = false;
0552
0553 if (bo->base.resv == ctx->resv) {
0554 dma_resv_assert_held(bo->base.resv);
0555 if (ctx->allow_res_evict)
0556 ret = true;
0557 *locked = false;
0558 if (busy)
0559 *busy = false;
0560 } else {
0561 ret = dma_resv_trylock(bo->base.resv);
0562 *locked = ret;
0563 if (busy)
0564 *busy = !ret;
0565 }
0566
0567 if (ret && place && (bo->resource->mem_type != place->mem_type ||
0568 !bo->bdev->funcs->eviction_valuable(bo, place))) {
0569 ret = false;
0570 if (*locked) {
0571 dma_resv_unlock(bo->base.resv);
0572 *locked = false;
0573 }
0574 }
0575
0576 return ret;
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587
0588 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
0589 struct ttm_operation_ctx *ctx,
0590 struct ww_acquire_ctx *ticket)
0591 {
0592 int r;
0593
0594 if (!busy_bo || !ticket)
0595 return -EBUSY;
0596
0597 if (ctx->interruptible)
0598 r = dma_resv_lock_interruptible(busy_bo->base.resv,
0599 ticket);
0600 else
0601 r = dma_resv_lock(busy_bo->base.resv, ticket);
0602
0603
0604
0605
0606
0607
0608 if (!r)
0609 dma_resv_unlock(busy_bo->base.resv);
0610
0611 return r == -EDEADLK ? -EBUSY : r;
0612 }
0613
0614 int ttm_mem_evict_first(struct ttm_device *bdev,
0615 struct ttm_resource_manager *man,
0616 const struct ttm_place *place,
0617 struct ttm_operation_ctx *ctx,
0618 struct ww_acquire_ctx *ticket)
0619 {
0620 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
0621 struct ttm_resource_cursor cursor;
0622 struct ttm_resource *res;
0623 bool locked = false;
0624 int ret;
0625
0626 spin_lock(&bdev->lru_lock);
0627 ttm_resource_manager_for_each_res(man, &cursor, res) {
0628 bool busy;
0629
0630 if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
0631 &locked, &busy)) {
0632 if (busy && !busy_bo && ticket !=
0633 dma_resv_locking_ctx(res->bo->base.resv))
0634 busy_bo = res->bo;
0635 continue;
0636 }
0637
0638 if (ttm_bo_get_unless_zero(res->bo)) {
0639 bo = res->bo;
0640 break;
0641 }
0642 if (locked)
0643 dma_resv_unlock(res->bo->base.resv);
0644 }
0645
0646 if (!bo) {
0647 if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
0648 busy_bo = NULL;
0649 spin_unlock(&bdev->lru_lock);
0650 ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
0651 if (busy_bo)
0652 ttm_bo_put(busy_bo);
0653 return ret;
0654 }
0655
0656 if (bo->deleted) {
0657 ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
0658 ctx->no_wait_gpu, locked);
0659 ttm_bo_put(bo);
0660 return ret;
0661 }
0662
0663 spin_unlock(&bdev->lru_lock);
0664
0665 ret = ttm_bo_evict(bo, ctx);
0666 if (locked)
0667 ttm_bo_unreserve(bo);
0668 else
0669 ttm_bo_move_to_lru_tail_unlocked(bo);
0670
0671 ttm_bo_put(bo);
0672 return ret;
0673 }
0674
0675
0676
0677
0678
0679
0680
0681
0682 void ttm_bo_pin(struct ttm_buffer_object *bo)
0683 {
0684 dma_resv_assert_held(bo->base.resv);
0685 WARN_ON_ONCE(!kref_read(&bo->kref));
0686 spin_lock(&bo->bdev->lru_lock);
0687 if (bo->resource)
0688 ttm_resource_del_bulk_move(bo->resource, bo);
0689 ++bo->pin_count;
0690 spin_unlock(&bo->bdev->lru_lock);
0691 }
0692 EXPORT_SYMBOL(ttm_bo_pin);
0693
0694
0695
0696
0697
0698
0699
0700 void ttm_bo_unpin(struct ttm_buffer_object *bo)
0701 {
0702 dma_resv_assert_held(bo->base.resv);
0703 WARN_ON_ONCE(!kref_read(&bo->kref));
0704 if (WARN_ON_ONCE(!bo->pin_count))
0705 return;
0706
0707 spin_lock(&bo->bdev->lru_lock);
0708 --bo->pin_count;
0709 if (bo->resource)
0710 ttm_resource_add_bulk_move(bo->resource, bo);
0711 spin_unlock(&bo->bdev->lru_lock);
0712 }
0713 EXPORT_SYMBOL(ttm_bo_unpin);
0714
0715
0716
0717
0718
0719 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
0720 struct ttm_resource_manager *man,
0721 struct ttm_resource *mem,
0722 bool no_wait_gpu)
0723 {
0724 struct dma_fence *fence;
0725 int ret;
0726
0727 spin_lock(&man->move_lock);
0728 fence = dma_fence_get(man->move);
0729 spin_unlock(&man->move_lock);
0730
0731 if (!fence)
0732 return 0;
0733
0734 if (no_wait_gpu) {
0735 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
0736 dma_fence_put(fence);
0737 return ret;
0738 }
0739
0740 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
0741
0742 ret = dma_resv_reserve_fences(bo->base.resv, 1);
0743 dma_fence_put(fence);
0744 return ret;
0745 }
0746
0747
0748
0749
0750
0751 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
0752 const struct ttm_place *place,
0753 struct ttm_resource **mem,
0754 struct ttm_operation_ctx *ctx)
0755 {
0756 struct ttm_device *bdev = bo->bdev;
0757 struct ttm_resource_manager *man;
0758 struct ww_acquire_ctx *ticket;
0759 int ret;
0760
0761 man = ttm_manager_type(bdev, place->mem_type);
0762 ticket = dma_resv_locking_ctx(bo->base.resv);
0763 do {
0764 ret = ttm_resource_alloc(bo, place, mem);
0765 if (likely(!ret))
0766 break;
0767 if (unlikely(ret != -ENOSPC))
0768 return ret;
0769 ret = ttm_mem_evict_first(bdev, man, place, ctx,
0770 ticket);
0771 if (unlikely(ret != 0))
0772 return ret;
0773 } while (1);
0774
0775 return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
0776 }
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
0787 struct ttm_placement *placement,
0788 struct ttm_resource **mem,
0789 struct ttm_operation_ctx *ctx)
0790 {
0791 struct ttm_device *bdev = bo->bdev;
0792 bool type_found = false;
0793 int i, ret;
0794
0795 ret = dma_resv_reserve_fences(bo->base.resv, 1);
0796 if (unlikely(ret))
0797 return ret;
0798
0799 for (i = 0; i < placement->num_placement; ++i) {
0800 const struct ttm_place *place = &placement->placement[i];
0801 struct ttm_resource_manager *man;
0802
0803 man = ttm_manager_type(bdev, place->mem_type);
0804 if (!man || !ttm_resource_manager_used(man))
0805 continue;
0806
0807 type_found = true;
0808 ret = ttm_resource_alloc(bo, place, mem);
0809 if (ret == -ENOSPC)
0810 continue;
0811 if (unlikely(ret))
0812 goto error;
0813
0814 ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
0815 if (unlikely(ret)) {
0816 ttm_resource_free(bo, mem);
0817 if (ret == -EBUSY)
0818 continue;
0819
0820 goto error;
0821 }
0822 return 0;
0823 }
0824
0825 for (i = 0; i < placement->num_busy_placement; ++i) {
0826 const struct ttm_place *place = &placement->busy_placement[i];
0827 struct ttm_resource_manager *man;
0828
0829 man = ttm_manager_type(bdev, place->mem_type);
0830 if (!man || !ttm_resource_manager_used(man))
0831 continue;
0832
0833 type_found = true;
0834 ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
0835 if (likely(!ret))
0836 return 0;
0837
0838 if (ret && ret != -EBUSY)
0839 goto error;
0840 }
0841
0842 ret = -ENOMEM;
0843 if (!type_found) {
0844 pr_err(TTM_PFX "No compatible memory type found\n");
0845 ret = -EINVAL;
0846 }
0847
0848 error:
0849 return ret;
0850 }
0851 EXPORT_SYMBOL(ttm_bo_mem_space);
0852
0853 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
0854 struct ttm_placement *placement,
0855 struct ttm_operation_ctx *ctx)
0856 {
0857 struct ttm_resource *mem;
0858 struct ttm_place hop;
0859 int ret;
0860
0861 dma_resv_assert_held(bo->base.resv);
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872 ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
0873 if (ret)
0874 return ret;
0875 bounce:
0876 ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
0877 if (ret == -EMULTIHOP) {
0878 ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
0879 if (ret)
0880 goto out;
0881
0882 goto bounce;
0883 }
0884 out:
0885 if (ret)
0886 ttm_resource_free(bo, &mem);
0887 return ret;
0888 }
0889
0890 int ttm_bo_validate(struct ttm_buffer_object *bo,
0891 struct ttm_placement *placement,
0892 struct ttm_operation_ctx *ctx)
0893 {
0894 int ret;
0895
0896 dma_resv_assert_held(bo->base.resv);
0897
0898
0899
0900
0901 if (!placement->num_placement && !placement->num_busy_placement)
0902 return ttm_bo_pipeline_gutting(bo);
0903
0904
0905
0906
0907 if (!ttm_resource_compat(bo->resource, placement)) {
0908 ret = ttm_bo_move_buffer(bo, placement, ctx);
0909 if (ret)
0910 return ret;
0911 }
0912
0913
0914
0915 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
0916 ret = ttm_tt_create(bo, true);
0917 if (ret)
0918 return ret;
0919 }
0920 return 0;
0921 }
0922 EXPORT_SYMBOL(ttm_bo_validate);
0923
0924 int ttm_bo_init_reserved(struct ttm_device *bdev,
0925 struct ttm_buffer_object *bo,
0926 size_t size,
0927 enum ttm_bo_type type,
0928 struct ttm_placement *placement,
0929 uint32_t page_alignment,
0930 struct ttm_operation_ctx *ctx,
0931 struct sg_table *sg,
0932 struct dma_resv *resv,
0933 void (*destroy) (struct ttm_buffer_object *))
0934 {
0935 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
0936 bool locked;
0937 int ret;
0938
0939 bo->destroy = destroy;
0940 kref_init(&bo->kref);
0941 INIT_LIST_HEAD(&bo->ddestroy);
0942 bo->bdev = bdev;
0943 bo->type = type;
0944 bo->page_alignment = page_alignment;
0945 bo->pin_count = 0;
0946 bo->sg = sg;
0947 bo->bulk_move = NULL;
0948 if (resv) {
0949 bo->base.resv = resv;
0950 dma_resv_assert_held(bo->base.resv);
0951 } else {
0952 bo->base.resv = &bo->base._resv;
0953 }
0954 atomic_inc(&ttm_glob.bo_count);
0955
0956 ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
0957 if (unlikely(ret)) {
0958 ttm_bo_put(bo);
0959 return ret;
0960 }
0961
0962
0963
0964
0965
0966 if (bo->type == ttm_bo_type_device ||
0967 bo->type == ttm_bo_type_sg)
0968 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
0969 bo->resource->num_pages);
0970
0971
0972
0973
0974 if (!resv) {
0975 locked = dma_resv_trylock(bo->base.resv);
0976 WARN_ON(!locked);
0977 }
0978
0979 if (likely(!ret))
0980 ret = ttm_bo_validate(bo, placement, ctx);
0981
0982 if (unlikely(ret)) {
0983 if (!resv)
0984 ttm_bo_unreserve(bo);
0985
0986 ttm_bo_put(bo);
0987 return ret;
0988 }
0989
0990 return ret;
0991 }
0992 EXPORT_SYMBOL(ttm_bo_init_reserved);
0993
0994 int ttm_bo_init(struct ttm_device *bdev,
0995 struct ttm_buffer_object *bo,
0996 size_t size,
0997 enum ttm_bo_type type,
0998 struct ttm_placement *placement,
0999 uint32_t page_alignment,
1000 bool interruptible,
1001 struct sg_table *sg,
1002 struct dma_resv *resv,
1003 void (*destroy) (struct ttm_buffer_object *))
1004 {
1005 struct ttm_operation_ctx ctx = { interruptible, false };
1006 int ret;
1007
1008 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1009 page_alignment, &ctx, sg, resv, destroy);
1010 if (ret)
1011 return ret;
1012
1013 if (!resv)
1014 ttm_bo_unreserve(bo);
1015
1016 return 0;
1017 }
1018 EXPORT_SYMBOL(ttm_bo_init);
1019
1020
1021
1022
1023
1024 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1025 {
1026 struct ttm_device *bdev = bo->bdev;
1027
1028 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1029 ttm_mem_io_free(bdev, bo->resource);
1030 }
1031 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1032
1033 int ttm_bo_wait(struct ttm_buffer_object *bo,
1034 bool interruptible, bool no_wait)
1035 {
1036 long timeout = 15 * HZ;
1037
1038 if (no_wait) {
1039 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1040 return 0;
1041 else
1042 return -EBUSY;
1043 }
1044
1045 timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1046 interruptible, timeout);
1047 if (timeout < 0)
1048 return timeout;
1049
1050 if (timeout == 0)
1051 return -EBUSY;
1052
1053 return 0;
1054 }
1055 EXPORT_SYMBOL(ttm_bo_wait);
1056
1057 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1058 gfp_t gfp_flags)
1059 {
1060 struct ttm_place place;
1061 bool locked;
1062 int ret;
1063
1064
1065
1066
1067
1068
1069
1070 memset(&place, 0, sizeof(place));
1071 place.mem_type = bo->resource->mem_type;
1072 if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1073 return -EBUSY;
1074
1075 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1076 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1077 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1078 !ttm_bo_get_unless_zero(bo)) {
1079 if (locked)
1080 dma_resv_unlock(bo->base.resv);
1081 return -EBUSY;
1082 }
1083
1084 if (bo->deleted) {
1085 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1086 ttm_bo_put(bo);
1087 return ret == -EBUSY ? -ENOSPC : ret;
1088 }
1089
1090
1091 spin_unlock(&bo->bdev->lru_lock);
1092
1093
1094
1095
1096 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1097 struct ttm_operation_ctx ctx = { false, false };
1098 struct ttm_resource *evict_mem;
1099 struct ttm_place hop;
1100
1101 memset(&hop, 0, sizeof(hop));
1102 place.mem_type = TTM_PL_SYSTEM;
1103 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1104 if (unlikely(ret))
1105 goto out;
1106
1107 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1108 if (unlikely(ret != 0)) {
1109 WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1110 goto out;
1111 }
1112 }
1113
1114
1115
1116
1117 ret = ttm_bo_wait(bo, false, false);
1118 if (unlikely(ret != 0))
1119 goto out;
1120
1121 ttm_bo_unmap_virtual(bo);
1122
1123
1124
1125
1126
1127 if (bo->bdev->funcs->swap_notify)
1128 bo->bdev->funcs->swap_notify(bo);
1129
1130 if (ttm_tt_is_populated(bo->ttm))
1131 ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1132 out:
1133
1134
1135
1136
1137
1138 if (locked)
1139 dma_resv_unlock(bo->base.resv);
1140 ttm_bo_put(bo);
1141 return ret == -EBUSY ? -ENOSPC : ret;
1142 }
1143
1144 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1145 {
1146 if (bo->ttm == NULL)
1147 return;
1148
1149 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1150 ttm_tt_destroy(bo->bdev, bo->ttm);
1151 bo->ttm = NULL;
1152 }