Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
0002 /**************************************************************************
0003  *
0004  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
0005  * All Rights Reserved.
0006  *
0007  * Permission is hereby granted, free of charge, to any person obtaining a
0008  * copy of this software and associated documentation files (the
0009  * "Software"), to deal in the Software without restriction, including
0010  * without limitation the rights to use, copy, modify, merge, publish,
0011  * distribute, sub license, and/or sell copies of the Software, and to
0012  * permit persons to whom the Software is furnished to do so, subject to
0013  * the following conditions:
0014  *
0015  * The above copyright notice and this permission notice (including the
0016  * next paragraph) shall be included in all copies or substantial portions
0017  * of the Software.
0018  *
0019  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0020  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0021  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
0022  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
0023  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
0024  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
0025  * USE OR OTHER DEALINGS IN THE SOFTWARE.
0026  *
0027  **************************************************************************/
0028 /*
0029  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
0030  */
0031 
0032 #define pr_fmt(fmt) "[TTM] " fmt
0033 
0034 #include <drm/ttm/ttm_bo_driver.h>
0035 #include <drm/ttm/ttm_placement.h>
0036 #include <linux/jiffies.h>
0037 #include <linux/slab.h>
0038 #include <linux/sched.h>
0039 #include <linux/mm.h>
0040 #include <linux/file.h>
0041 #include <linux/module.h>
0042 #include <linux/atomic.h>
0043 #include <linux/dma-resv.h>
0044 
0045 #include "ttm_module.h"
0046 
0047 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
0048                     struct ttm_placement *placement)
0049 {
0050     struct drm_printer p = drm_debug_printer(TTM_PFX);
0051     struct ttm_resource_manager *man;
0052     int i, mem_type;
0053 
0054     drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
0055            bo, bo->resource->num_pages, bo->base.size >> 10,
0056            bo->base.size >> 20);
0057     for (i = 0; i < placement->num_placement; i++) {
0058         mem_type = placement->placement[i].mem_type;
0059         drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
0060                i, placement->placement[i].flags, mem_type);
0061         man = ttm_manager_type(bo->bdev, mem_type);
0062         ttm_resource_manager_debug(man, &p);
0063     }
0064 }
0065 
0066 /**
0067  * ttm_bo_move_to_lru_tail
0068  *
0069  * @bo: The buffer object.
0070  *
0071  * Move this BO to the tail of all lru lists used to lookup and reserve an
0072  * object. This function must be called with struct ttm_global::lru_lock
0073  * held, and is used to make a BO less likely to be considered for eviction.
0074  */
0075 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
0076 {
0077     dma_resv_assert_held(bo->base.resv);
0078 
0079     if (bo->resource)
0080         ttm_resource_move_to_lru_tail(bo->resource);
0081 }
0082 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
0083 
0084 /**
0085  * ttm_bo_set_bulk_move - update BOs bulk move object
0086  *
0087  * @bo: The buffer object.
0088  *
0089  * Update the BOs bulk move object, making sure that resources are added/removed
0090  * as well. A bulk move allows to move many resource on the LRU at once,
0091  * resulting in much less overhead of maintaining the LRU.
0092  * The only requirement is that the resources stay together on the LRU and are
0093  * never separated. This is enforces by setting the bulk_move structure on a BO.
0094  * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
0095  * their LRU list.
0096  */
0097 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
0098               struct ttm_lru_bulk_move *bulk)
0099 {
0100     dma_resv_assert_held(bo->base.resv);
0101 
0102     if (bo->bulk_move == bulk)
0103         return;
0104 
0105     spin_lock(&bo->bdev->lru_lock);
0106     if (bo->resource)
0107         ttm_resource_del_bulk_move(bo->resource, bo);
0108     bo->bulk_move = bulk;
0109     if (bo->resource)
0110         ttm_resource_add_bulk_move(bo->resource, bo);
0111     spin_unlock(&bo->bdev->lru_lock);
0112 }
0113 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
0114 
0115 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
0116                   struct ttm_resource *mem, bool evict,
0117                   struct ttm_operation_ctx *ctx,
0118                   struct ttm_place *hop)
0119 {
0120     struct ttm_resource_manager *old_man, *new_man;
0121     struct ttm_device *bdev = bo->bdev;
0122     int ret;
0123 
0124     old_man = ttm_manager_type(bdev, bo->resource->mem_type);
0125     new_man = ttm_manager_type(bdev, mem->mem_type);
0126 
0127     ttm_bo_unmap_virtual(bo);
0128 
0129     /*
0130      * Create and bind a ttm if required.
0131      */
0132 
0133     if (new_man->use_tt) {
0134         /* Zero init the new TTM structure if the old location should
0135          * have used one as well.
0136          */
0137         ret = ttm_tt_create(bo, old_man->use_tt);
0138         if (ret)
0139             goto out_err;
0140 
0141         if (mem->mem_type != TTM_PL_SYSTEM) {
0142             ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
0143             if (ret)
0144                 goto out_err;
0145         }
0146     }
0147 
0148     ret = dma_resv_reserve_fences(bo->base.resv, 1);
0149     if (ret)
0150         goto out_err;
0151 
0152     ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
0153     if (ret) {
0154         if (ret == -EMULTIHOP)
0155             return ret;
0156         goto out_err;
0157     }
0158 
0159     ctx->bytes_moved += bo->base.size;
0160     return 0;
0161 
0162 out_err:
0163     new_man = ttm_manager_type(bdev, bo->resource->mem_type);
0164     if (!new_man->use_tt)
0165         ttm_bo_tt_destroy(bo);
0166 
0167     return ret;
0168 }
0169 
0170 /*
0171  * Call bo::reserved.
0172  * Will release GPU memory type usage on destruction.
0173  * This is the place to put in driver specific hooks to release
0174  * driver private resources.
0175  * Will release the bo::reserved lock.
0176  */
0177 
0178 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
0179 {
0180     if (bo->bdev->funcs->delete_mem_notify)
0181         bo->bdev->funcs->delete_mem_notify(bo);
0182 
0183     ttm_bo_tt_destroy(bo);
0184     ttm_resource_free(bo, &bo->resource);
0185 }
0186 
0187 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
0188 {
0189     int r;
0190 
0191     if (bo->base.resv == &bo->base._resv)
0192         return 0;
0193 
0194     BUG_ON(!dma_resv_trylock(&bo->base._resv));
0195 
0196     r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
0197     dma_resv_unlock(&bo->base._resv);
0198     if (r)
0199         return r;
0200 
0201     if (bo->type != ttm_bo_type_sg) {
0202         /* This works because the BO is about to be destroyed and nobody
0203          * reference it any more. The only tricky case is the trylock on
0204          * the resv object while holding the lru_lock.
0205          */
0206         spin_lock(&bo->bdev->lru_lock);
0207         bo->base.resv = &bo->base._resv;
0208         spin_unlock(&bo->bdev->lru_lock);
0209     }
0210 
0211     return r;
0212 }
0213 
0214 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
0215 {
0216     struct dma_resv *resv = &bo->base._resv;
0217     struct dma_resv_iter cursor;
0218     struct dma_fence *fence;
0219 
0220     dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
0221     dma_resv_for_each_fence_unlocked(&cursor, fence) {
0222         if (!fence->ops->signaled)
0223             dma_fence_enable_sw_signaling(fence);
0224     }
0225     dma_resv_iter_end(&cursor);
0226 }
0227 
0228 /**
0229  * ttm_bo_cleanup_refs
0230  * If bo idle, remove from lru lists, and unref.
0231  * If not idle, block if possible.
0232  *
0233  * Must be called with lru_lock and reservation held, this function
0234  * will drop the lru lock and optionally the reservation lock before returning.
0235  *
0236  * @bo:                    The buffer object to clean-up
0237  * @interruptible:         Any sleeps should occur interruptibly.
0238  * @no_wait_gpu:           Never wait for gpu. Return -EBUSY instead.
0239  * @unlock_resv:           Unlock the reservation lock as well.
0240  */
0241 
0242 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
0243                    bool interruptible, bool no_wait_gpu,
0244                    bool unlock_resv)
0245 {
0246     struct dma_resv *resv = &bo->base._resv;
0247     int ret;
0248 
0249     if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
0250         ret = 0;
0251     else
0252         ret = -EBUSY;
0253 
0254     if (ret && !no_wait_gpu) {
0255         long lret;
0256 
0257         if (unlock_resv)
0258             dma_resv_unlock(bo->base.resv);
0259         spin_unlock(&bo->bdev->lru_lock);
0260 
0261         lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
0262                          interruptible,
0263                          30 * HZ);
0264 
0265         if (lret < 0)
0266             return lret;
0267         else if (lret == 0)
0268             return -EBUSY;
0269 
0270         spin_lock(&bo->bdev->lru_lock);
0271         if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
0272             /*
0273              * We raced, and lost, someone else holds the reservation now,
0274              * and is probably busy in ttm_bo_cleanup_memtype_use.
0275              *
0276              * Even if it's not the case, because we finished waiting any
0277              * delayed destruction would succeed, so just return success
0278              * here.
0279              */
0280             spin_unlock(&bo->bdev->lru_lock);
0281             return 0;
0282         }
0283         ret = 0;
0284     }
0285 
0286     if (ret || unlikely(list_empty(&bo->ddestroy))) {
0287         if (unlock_resv)
0288             dma_resv_unlock(bo->base.resv);
0289         spin_unlock(&bo->bdev->lru_lock);
0290         return ret;
0291     }
0292 
0293     list_del_init(&bo->ddestroy);
0294     spin_unlock(&bo->bdev->lru_lock);
0295     ttm_bo_cleanup_memtype_use(bo);
0296 
0297     if (unlock_resv)
0298         dma_resv_unlock(bo->base.resv);
0299 
0300     ttm_bo_put(bo);
0301 
0302     return 0;
0303 }
0304 
0305 /*
0306  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
0307  * encountered buffers.
0308  */
0309 bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
0310 {
0311     struct list_head removed;
0312     bool empty;
0313 
0314     INIT_LIST_HEAD(&removed);
0315 
0316     spin_lock(&bdev->lru_lock);
0317     while (!list_empty(&bdev->ddestroy)) {
0318         struct ttm_buffer_object *bo;
0319 
0320         bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
0321                       ddestroy);
0322         list_move_tail(&bo->ddestroy, &removed);
0323         if (!ttm_bo_get_unless_zero(bo))
0324             continue;
0325 
0326         if (remove_all || bo->base.resv != &bo->base._resv) {
0327             spin_unlock(&bdev->lru_lock);
0328             dma_resv_lock(bo->base.resv, NULL);
0329 
0330             spin_lock(&bdev->lru_lock);
0331             ttm_bo_cleanup_refs(bo, false, !remove_all, true);
0332 
0333         } else if (dma_resv_trylock(bo->base.resv)) {
0334             ttm_bo_cleanup_refs(bo, false, !remove_all, true);
0335         } else {
0336             spin_unlock(&bdev->lru_lock);
0337         }
0338 
0339         ttm_bo_put(bo);
0340         spin_lock(&bdev->lru_lock);
0341     }
0342     list_splice_tail(&removed, &bdev->ddestroy);
0343     empty = list_empty(&bdev->ddestroy);
0344     spin_unlock(&bdev->lru_lock);
0345 
0346     return empty;
0347 }
0348 
0349 static void ttm_bo_release(struct kref *kref)
0350 {
0351     struct ttm_buffer_object *bo =
0352         container_of(kref, struct ttm_buffer_object, kref);
0353     struct ttm_device *bdev = bo->bdev;
0354     int ret;
0355 
0356     WARN_ON_ONCE(bo->pin_count);
0357     WARN_ON_ONCE(bo->bulk_move);
0358 
0359     if (!bo->deleted) {
0360         ret = ttm_bo_individualize_resv(bo);
0361         if (ret) {
0362             /* Last resort, if we fail to allocate memory for the
0363              * fences block for the BO to become idle
0364              */
0365             dma_resv_wait_timeout(bo->base.resv,
0366                           DMA_RESV_USAGE_BOOKKEEP, false,
0367                           30 * HZ);
0368         }
0369 
0370         if (bo->bdev->funcs->release_notify)
0371             bo->bdev->funcs->release_notify(bo);
0372 
0373         drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
0374         ttm_mem_io_free(bdev, bo->resource);
0375     }
0376 
0377     if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
0378         !dma_resv_trylock(bo->base.resv)) {
0379         /* The BO is not idle, resurrect it for delayed destroy */
0380         ttm_bo_flush_all_fences(bo);
0381         bo->deleted = true;
0382 
0383         spin_lock(&bo->bdev->lru_lock);
0384 
0385         /*
0386          * Make pinned bos immediately available to
0387          * shrinkers, now that they are queued for
0388          * destruction.
0389          *
0390          * FIXME: QXL is triggering this. Can be removed when the
0391          * driver is fixed.
0392          */
0393         if (bo->pin_count) {
0394             bo->pin_count = 0;
0395             ttm_resource_move_to_lru_tail(bo->resource);
0396         }
0397 
0398         kref_init(&bo->kref);
0399         list_add_tail(&bo->ddestroy, &bdev->ddestroy);
0400         spin_unlock(&bo->bdev->lru_lock);
0401 
0402         schedule_delayed_work(&bdev->wq,
0403                       ((HZ / 100) < 1) ? 1 : HZ / 100);
0404         return;
0405     }
0406 
0407     spin_lock(&bo->bdev->lru_lock);
0408     list_del(&bo->ddestroy);
0409     spin_unlock(&bo->bdev->lru_lock);
0410 
0411     ttm_bo_cleanup_memtype_use(bo);
0412     dma_resv_unlock(bo->base.resv);
0413 
0414     atomic_dec(&ttm_glob.bo_count);
0415     bo->destroy(bo);
0416 }
0417 
0418 void ttm_bo_put(struct ttm_buffer_object *bo)
0419 {
0420     kref_put(&bo->kref, ttm_bo_release);
0421 }
0422 EXPORT_SYMBOL(ttm_bo_put);
0423 
0424 int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
0425 {
0426     return cancel_delayed_work_sync(&bdev->wq);
0427 }
0428 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
0429 
0430 void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
0431 {
0432     if (resched)
0433         schedule_delayed_work(&bdev->wq,
0434                       ((HZ / 100) < 1) ? 1 : HZ / 100);
0435 }
0436 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
0437 
0438 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
0439                      struct ttm_resource **mem,
0440                      struct ttm_operation_ctx *ctx,
0441                      struct ttm_place *hop)
0442 {
0443     struct ttm_placement hop_placement;
0444     struct ttm_resource *hop_mem;
0445     int ret;
0446 
0447     hop_placement.num_placement = hop_placement.num_busy_placement = 1;
0448     hop_placement.placement = hop_placement.busy_placement = hop;
0449 
0450     /* find space in the bounce domain */
0451     ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
0452     if (ret)
0453         return ret;
0454     /* move to the bounce domain */
0455     ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
0456     if (ret) {
0457         ttm_resource_free(bo, &hop_mem);
0458         return ret;
0459     }
0460     return 0;
0461 }
0462 
0463 static int ttm_bo_evict(struct ttm_buffer_object *bo,
0464             struct ttm_operation_ctx *ctx)
0465 {
0466     struct ttm_device *bdev = bo->bdev;
0467     struct ttm_resource *evict_mem;
0468     struct ttm_placement placement;
0469     struct ttm_place hop;
0470     int ret = 0;
0471 
0472     memset(&hop, 0, sizeof(hop));
0473 
0474     dma_resv_assert_held(bo->base.resv);
0475 
0476     placement.num_placement = 0;
0477     placement.num_busy_placement = 0;
0478     bdev->funcs->evict_flags(bo, &placement);
0479 
0480     if (!placement.num_placement && !placement.num_busy_placement) {
0481         ret = ttm_bo_wait(bo, true, false);
0482         if (ret)
0483             return ret;
0484 
0485         /*
0486          * Since we've already synced, this frees backing store
0487          * immediately.
0488          */
0489         return ttm_bo_pipeline_gutting(bo);
0490     }
0491 
0492     ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
0493     if (ret) {
0494         if (ret != -ERESTARTSYS) {
0495             pr_err("Failed to find memory space for buffer 0x%p eviction\n",
0496                    bo);
0497             ttm_bo_mem_space_debug(bo, &placement);
0498         }
0499         goto out;
0500     }
0501 
0502 bounce:
0503     ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
0504     if (ret == -EMULTIHOP) {
0505         ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop);
0506         if (ret) {
0507             pr_err("Buffer eviction failed\n");
0508             ttm_resource_free(bo, &evict_mem);
0509             goto out;
0510         }
0511         /* try and move to final place now. */
0512         goto bounce;
0513     }
0514 out:
0515     return ret;
0516 }
0517 
0518 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
0519                   const struct ttm_place *place)
0520 {
0521     dma_resv_assert_held(bo->base.resv);
0522     if (bo->resource->mem_type == TTM_PL_SYSTEM)
0523         return true;
0524 
0525     /* Don't evict this BO if it's outside of the
0526      * requested placement range
0527      */
0528     if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
0529         (place->lpfn && place->lpfn <= bo->resource->start))
0530         return false;
0531 
0532     return true;
0533 }
0534 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
0535 
0536 /*
0537  * Check the target bo is allowable to be evicted or swapout, including cases:
0538  *
0539  * a. if share same reservation object with ctx->resv, have assumption
0540  * reservation objects should already be locked, so not lock again and
0541  * return true directly when either the opreation allow_reserved_eviction
0542  * or the target bo already is in delayed free list;
0543  *
0544  * b. Otherwise, trylock it.
0545  */
0546 static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
0547                        struct ttm_operation_ctx *ctx,
0548                        const struct ttm_place *place,
0549                        bool *locked, bool *busy)
0550 {
0551     bool ret = false;
0552 
0553     if (bo->base.resv == ctx->resv) {
0554         dma_resv_assert_held(bo->base.resv);
0555         if (ctx->allow_res_evict)
0556             ret = true;
0557         *locked = false;
0558         if (busy)
0559             *busy = false;
0560     } else {
0561         ret = dma_resv_trylock(bo->base.resv);
0562         *locked = ret;
0563         if (busy)
0564             *busy = !ret;
0565     }
0566 
0567     if (ret && place && (bo->resource->mem_type != place->mem_type ||
0568         !bo->bdev->funcs->eviction_valuable(bo, place))) {
0569         ret = false;
0570         if (*locked) {
0571             dma_resv_unlock(bo->base.resv);
0572             *locked = false;
0573         }
0574     }
0575 
0576     return ret;
0577 }
0578 
0579 /**
0580  * ttm_mem_evict_wait_busy - wait for a busy BO to become available
0581  *
0582  * @busy_bo: BO which couldn't be locked with trylock
0583  * @ctx: operation context
0584  * @ticket: acquire ticket
0585  *
0586  * Try to lock a busy buffer object to avoid failing eviction.
0587  */
0588 static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
0589                    struct ttm_operation_ctx *ctx,
0590                    struct ww_acquire_ctx *ticket)
0591 {
0592     int r;
0593 
0594     if (!busy_bo || !ticket)
0595         return -EBUSY;
0596 
0597     if (ctx->interruptible)
0598         r = dma_resv_lock_interruptible(busy_bo->base.resv,
0599                               ticket);
0600     else
0601         r = dma_resv_lock(busy_bo->base.resv, ticket);
0602 
0603     /*
0604      * TODO: It would be better to keep the BO locked until allocation is at
0605      * least tried one more time, but that would mean a much larger rework
0606      * of TTM.
0607      */
0608     if (!r)
0609         dma_resv_unlock(busy_bo->base.resv);
0610 
0611     return r == -EDEADLK ? -EBUSY : r;
0612 }
0613 
0614 int ttm_mem_evict_first(struct ttm_device *bdev,
0615             struct ttm_resource_manager *man,
0616             const struct ttm_place *place,
0617             struct ttm_operation_ctx *ctx,
0618             struct ww_acquire_ctx *ticket)
0619 {
0620     struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
0621     struct ttm_resource_cursor cursor;
0622     struct ttm_resource *res;
0623     bool locked = false;
0624     int ret;
0625 
0626     spin_lock(&bdev->lru_lock);
0627     ttm_resource_manager_for_each_res(man, &cursor, res) {
0628         bool busy;
0629 
0630         if (!ttm_bo_evict_swapout_allowable(res->bo, ctx, place,
0631                             &locked, &busy)) {
0632             if (busy && !busy_bo && ticket !=
0633                 dma_resv_locking_ctx(res->bo->base.resv))
0634                 busy_bo = res->bo;
0635             continue;
0636         }
0637 
0638         if (ttm_bo_get_unless_zero(res->bo)) {
0639             bo = res->bo;
0640             break;
0641         }
0642         if (locked)
0643             dma_resv_unlock(res->bo->base.resv);
0644     }
0645 
0646     if (!bo) {
0647         if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
0648             busy_bo = NULL;
0649         spin_unlock(&bdev->lru_lock);
0650         ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
0651         if (busy_bo)
0652             ttm_bo_put(busy_bo);
0653         return ret;
0654     }
0655 
0656     if (bo->deleted) {
0657         ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
0658                       ctx->no_wait_gpu, locked);
0659         ttm_bo_put(bo);
0660         return ret;
0661     }
0662 
0663     spin_unlock(&bdev->lru_lock);
0664 
0665     ret = ttm_bo_evict(bo, ctx);
0666     if (locked)
0667         ttm_bo_unreserve(bo);
0668     else
0669         ttm_bo_move_to_lru_tail_unlocked(bo);
0670 
0671     ttm_bo_put(bo);
0672     return ret;
0673 }
0674 
0675 /**
0676  * ttm_bo_pin - Pin the buffer object.
0677  * @bo: The buffer object to pin
0678  *
0679  * Make sure the buffer is not evicted any more during memory pressure.
0680  * @bo must be unpinned again by calling ttm_bo_unpin().
0681  */
0682 void ttm_bo_pin(struct ttm_buffer_object *bo)
0683 {
0684     dma_resv_assert_held(bo->base.resv);
0685     WARN_ON_ONCE(!kref_read(&bo->kref));
0686     spin_lock(&bo->bdev->lru_lock);
0687     if (bo->resource)
0688         ttm_resource_del_bulk_move(bo->resource, bo);
0689     ++bo->pin_count;
0690     spin_unlock(&bo->bdev->lru_lock);
0691 }
0692 EXPORT_SYMBOL(ttm_bo_pin);
0693 
0694 /**
0695  * ttm_bo_unpin - Unpin the buffer object.
0696  * @bo: The buffer object to unpin
0697  *
0698  * Allows the buffer object to be evicted again during memory pressure.
0699  */
0700 void ttm_bo_unpin(struct ttm_buffer_object *bo)
0701 {
0702     dma_resv_assert_held(bo->base.resv);
0703     WARN_ON_ONCE(!kref_read(&bo->kref));
0704     if (WARN_ON_ONCE(!bo->pin_count))
0705         return;
0706 
0707     spin_lock(&bo->bdev->lru_lock);
0708     --bo->pin_count;
0709     if (bo->resource)
0710         ttm_resource_add_bulk_move(bo->resource, bo);
0711     spin_unlock(&bo->bdev->lru_lock);
0712 }
0713 EXPORT_SYMBOL(ttm_bo_unpin);
0714 
0715 /*
0716  * Add the last move fence to the BO as kernel dependency and reserve a new
0717  * fence slot.
0718  */
0719 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
0720                  struct ttm_resource_manager *man,
0721                  struct ttm_resource *mem,
0722                  bool no_wait_gpu)
0723 {
0724     struct dma_fence *fence;
0725     int ret;
0726 
0727     spin_lock(&man->move_lock);
0728     fence = dma_fence_get(man->move);
0729     spin_unlock(&man->move_lock);
0730 
0731     if (!fence)
0732         return 0;
0733 
0734     if (no_wait_gpu) {
0735         ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
0736         dma_fence_put(fence);
0737         return ret;
0738     }
0739 
0740     dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
0741 
0742     ret = dma_resv_reserve_fences(bo->base.resv, 1);
0743     dma_fence_put(fence);
0744     return ret;
0745 }
0746 
0747 /*
0748  * Repeatedly evict memory from the LRU for @mem_type until we create enough
0749  * space, or we've evicted everything and there isn't enough space.
0750  */
0751 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
0752                   const struct ttm_place *place,
0753                   struct ttm_resource **mem,
0754                   struct ttm_operation_ctx *ctx)
0755 {
0756     struct ttm_device *bdev = bo->bdev;
0757     struct ttm_resource_manager *man;
0758     struct ww_acquire_ctx *ticket;
0759     int ret;
0760 
0761     man = ttm_manager_type(bdev, place->mem_type);
0762     ticket = dma_resv_locking_ctx(bo->base.resv);
0763     do {
0764         ret = ttm_resource_alloc(bo, place, mem);
0765         if (likely(!ret))
0766             break;
0767         if (unlikely(ret != -ENOSPC))
0768             return ret;
0769         ret = ttm_mem_evict_first(bdev, man, place, ctx,
0770                       ticket);
0771         if (unlikely(ret != 0))
0772             return ret;
0773     } while (1);
0774 
0775     return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
0776 }
0777 
0778 /*
0779  * Creates space for memory region @mem according to its type.
0780  *
0781  * This function first searches for free space in compatible memory types in
0782  * the priority order defined by the driver.  If free space isn't found, then
0783  * ttm_bo_mem_force_space is attempted in priority order to evict and find
0784  * space.
0785  */
0786 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
0787             struct ttm_placement *placement,
0788             struct ttm_resource **mem,
0789             struct ttm_operation_ctx *ctx)
0790 {
0791     struct ttm_device *bdev = bo->bdev;
0792     bool type_found = false;
0793     int i, ret;
0794 
0795     ret = dma_resv_reserve_fences(bo->base.resv, 1);
0796     if (unlikely(ret))
0797         return ret;
0798 
0799     for (i = 0; i < placement->num_placement; ++i) {
0800         const struct ttm_place *place = &placement->placement[i];
0801         struct ttm_resource_manager *man;
0802 
0803         man = ttm_manager_type(bdev, place->mem_type);
0804         if (!man || !ttm_resource_manager_used(man))
0805             continue;
0806 
0807         type_found = true;
0808         ret = ttm_resource_alloc(bo, place, mem);
0809         if (ret == -ENOSPC)
0810             continue;
0811         if (unlikely(ret))
0812             goto error;
0813 
0814         ret = ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
0815         if (unlikely(ret)) {
0816             ttm_resource_free(bo, mem);
0817             if (ret == -EBUSY)
0818                 continue;
0819 
0820             goto error;
0821         }
0822         return 0;
0823     }
0824 
0825     for (i = 0; i < placement->num_busy_placement; ++i) {
0826         const struct ttm_place *place = &placement->busy_placement[i];
0827         struct ttm_resource_manager *man;
0828 
0829         man = ttm_manager_type(bdev, place->mem_type);
0830         if (!man || !ttm_resource_manager_used(man))
0831             continue;
0832 
0833         type_found = true;
0834         ret = ttm_bo_mem_force_space(bo, place, mem, ctx);
0835         if (likely(!ret))
0836             return 0;
0837 
0838         if (ret && ret != -EBUSY)
0839             goto error;
0840     }
0841 
0842     ret = -ENOMEM;
0843     if (!type_found) {
0844         pr_err(TTM_PFX "No compatible memory type found\n");
0845         ret = -EINVAL;
0846     }
0847 
0848 error:
0849     return ret;
0850 }
0851 EXPORT_SYMBOL(ttm_bo_mem_space);
0852 
0853 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
0854                   struct ttm_placement *placement,
0855                   struct ttm_operation_ctx *ctx)
0856 {
0857     struct ttm_resource *mem;
0858     struct ttm_place hop;
0859     int ret;
0860 
0861     dma_resv_assert_held(bo->base.resv);
0862 
0863     /*
0864      * Determine where to move the buffer.
0865      *
0866      * If driver determines move is going to need
0867      * an extra step then it will return -EMULTIHOP
0868      * and the buffer will be moved to the temporary
0869      * stop and the driver will be called to make
0870      * the second hop.
0871      */
0872     ret = ttm_bo_mem_space(bo, placement, &mem, ctx);
0873     if (ret)
0874         return ret;
0875 bounce:
0876     ret = ttm_bo_handle_move_mem(bo, mem, false, ctx, &hop);
0877     if (ret == -EMULTIHOP) {
0878         ret = ttm_bo_bounce_temp_buffer(bo, &mem, ctx, &hop);
0879         if (ret)
0880             goto out;
0881         /* try and move to final place now. */
0882         goto bounce;
0883     }
0884 out:
0885     if (ret)
0886         ttm_resource_free(bo, &mem);
0887     return ret;
0888 }
0889 
0890 int ttm_bo_validate(struct ttm_buffer_object *bo,
0891             struct ttm_placement *placement,
0892             struct ttm_operation_ctx *ctx)
0893 {
0894     int ret;
0895 
0896     dma_resv_assert_held(bo->base.resv);
0897 
0898     /*
0899      * Remove the backing store if no placement is given.
0900      */
0901     if (!placement->num_placement && !placement->num_busy_placement)
0902         return ttm_bo_pipeline_gutting(bo);
0903 
0904     /*
0905      * Check whether we need to move buffer.
0906      */
0907     if (!ttm_resource_compat(bo->resource, placement)) {
0908         ret = ttm_bo_move_buffer(bo, placement, ctx);
0909         if (ret)
0910             return ret;
0911     }
0912     /*
0913      * We might need to add a TTM.
0914      */
0915     if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
0916         ret = ttm_tt_create(bo, true);
0917         if (ret)
0918             return ret;
0919     }
0920     return 0;
0921 }
0922 EXPORT_SYMBOL(ttm_bo_validate);
0923 
0924 int ttm_bo_init_reserved(struct ttm_device *bdev,
0925              struct ttm_buffer_object *bo,
0926              size_t size,
0927              enum ttm_bo_type type,
0928              struct ttm_placement *placement,
0929              uint32_t page_alignment,
0930              struct ttm_operation_ctx *ctx,
0931              struct sg_table *sg,
0932              struct dma_resv *resv,
0933              void (*destroy) (struct ttm_buffer_object *))
0934 {
0935     static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
0936     bool locked;
0937     int ret;
0938 
0939     bo->destroy = destroy;
0940     kref_init(&bo->kref);
0941     INIT_LIST_HEAD(&bo->ddestroy);
0942     bo->bdev = bdev;
0943     bo->type = type;
0944     bo->page_alignment = page_alignment;
0945     bo->pin_count = 0;
0946     bo->sg = sg;
0947     bo->bulk_move = NULL;
0948     if (resv) {
0949         bo->base.resv = resv;
0950         dma_resv_assert_held(bo->base.resv);
0951     } else {
0952         bo->base.resv = &bo->base._resv;
0953     }
0954     atomic_inc(&ttm_glob.bo_count);
0955 
0956     ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
0957     if (unlikely(ret)) {
0958         ttm_bo_put(bo);
0959         return ret;
0960     }
0961 
0962     /*
0963      * For ttm_bo_type_device buffers, allocate
0964      * address space from the device.
0965      */
0966     if (bo->type == ttm_bo_type_device ||
0967         bo->type == ttm_bo_type_sg)
0968         ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
0969                      bo->resource->num_pages);
0970 
0971     /* passed reservation objects should already be locked,
0972      * since otherwise lockdep will be angered in radeon.
0973      */
0974     if (!resv) {
0975         locked = dma_resv_trylock(bo->base.resv);
0976         WARN_ON(!locked);
0977     }
0978 
0979     if (likely(!ret))
0980         ret = ttm_bo_validate(bo, placement, ctx);
0981 
0982     if (unlikely(ret)) {
0983         if (!resv)
0984             ttm_bo_unreserve(bo);
0985 
0986         ttm_bo_put(bo);
0987         return ret;
0988     }
0989 
0990     return ret;
0991 }
0992 EXPORT_SYMBOL(ttm_bo_init_reserved);
0993 
0994 int ttm_bo_init(struct ttm_device *bdev,
0995         struct ttm_buffer_object *bo,
0996         size_t size,
0997         enum ttm_bo_type type,
0998         struct ttm_placement *placement,
0999         uint32_t page_alignment,
1000         bool interruptible,
1001         struct sg_table *sg,
1002         struct dma_resv *resv,
1003         void (*destroy) (struct ttm_buffer_object *))
1004 {
1005     struct ttm_operation_ctx ctx = { interruptible, false };
1006     int ret;
1007 
1008     ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1009                    page_alignment, &ctx, sg, resv, destroy);
1010     if (ret)
1011         return ret;
1012 
1013     if (!resv)
1014         ttm_bo_unreserve(bo);
1015 
1016     return 0;
1017 }
1018 EXPORT_SYMBOL(ttm_bo_init);
1019 
1020 /*
1021  * buffer object vm functions.
1022  */
1023 
1024 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1025 {
1026     struct ttm_device *bdev = bo->bdev;
1027 
1028     drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1029     ttm_mem_io_free(bdev, bo->resource);
1030 }
1031 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1032 
1033 int ttm_bo_wait(struct ttm_buffer_object *bo,
1034         bool interruptible, bool no_wait)
1035 {
1036     long timeout = 15 * HZ;
1037 
1038     if (no_wait) {
1039         if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
1040             return 0;
1041         else
1042             return -EBUSY;
1043     }
1044 
1045     timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1046                     interruptible, timeout);
1047     if (timeout < 0)
1048         return timeout;
1049 
1050     if (timeout == 0)
1051         return -EBUSY;
1052 
1053     return 0;
1054 }
1055 EXPORT_SYMBOL(ttm_bo_wait);
1056 
1057 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
1058            gfp_t gfp_flags)
1059 {
1060     struct ttm_place place;
1061     bool locked;
1062     int ret;
1063 
1064     /*
1065      * While the bo may already reside in SYSTEM placement, set
1066      * SYSTEM as new placement to cover also the move further below.
1067      * The driver may use the fact that we're moving from SYSTEM
1068      * as an indication that we're about to swap out.
1069      */
1070     memset(&place, 0, sizeof(place));
1071     place.mem_type = bo->resource->mem_type;
1072     if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
1073         return -EBUSY;
1074 
1075     if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1076         bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1077         bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
1078         !ttm_bo_get_unless_zero(bo)) {
1079         if (locked)
1080             dma_resv_unlock(bo->base.resv);
1081         return -EBUSY;
1082     }
1083 
1084     if (bo->deleted) {
1085         ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1086         ttm_bo_put(bo);
1087         return ret == -EBUSY ? -ENOSPC : ret;
1088     }
1089 
1090     /* TODO: Cleanup the locking */
1091     spin_unlock(&bo->bdev->lru_lock);
1092 
1093     /*
1094      * Move to system cached
1095      */
1096     if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1097         struct ttm_operation_ctx ctx = { false, false };
1098         struct ttm_resource *evict_mem;
1099         struct ttm_place hop;
1100 
1101         memset(&hop, 0, sizeof(hop));
1102         place.mem_type = TTM_PL_SYSTEM;
1103         ret = ttm_resource_alloc(bo, &place, &evict_mem);
1104         if (unlikely(ret))
1105             goto out;
1106 
1107         ret = ttm_bo_handle_move_mem(bo, evict_mem, true, &ctx, &hop);
1108         if (unlikely(ret != 0)) {
1109             WARN(ret == -EMULTIHOP, "Unexpected multihop in swaput - likely driver bug.\n");
1110             goto out;
1111         }
1112     }
1113 
1114     /*
1115      * Make sure BO is idle.
1116      */
1117     ret = ttm_bo_wait(bo, false, false);
1118     if (unlikely(ret != 0))
1119         goto out;
1120 
1121     ttm_bo_unmap_virtual(bo);
1122 
1123     /*
1124      * Swap out. Buffer will be swapped in again as soon as
1125      * anyone tries to access a ttm page.
1126      */
1127     if (bo->bdev->funcs->swap_notify)
1128         bo->bdev->funcs->swap_notify(bo);
1129 
1130     if (ttm_tt_is_populated(bo->ttm))
1131         ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
1132 out:
1133 
1134     /*
1135      * Unreserve without putting on LRU to avoid swapping out an
1136      * already swapped buffer.
1137      */
1138     if (locked)
1139         dma_resv_unlock(bo->base.resv);
1140     ttm_bo_put(bo);
1141     return ret == -EBUSY ? -ENOSPC : ret;
1142 }
1143 
1144 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1145 {
1146     if (bo->ttm == NULL)
1147         return;
1148 
1149     ttm_tt_unpopulate(bo->bdev, bo->ttm);
1150     ttm_tt_destroy(bo->bdev, bo->ttm);
1151     bo->ttm = NULL;
1152 }