Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: MIT
0002 /*
0003  * Copyright © 2021 Intel Corporation
0004  */
0005 
0006 #include <linux/shmem_fs.h>
0007 
0008 #include <drm/ttm/ttm_bo_driver.h>
0009 #include <drm/ttm/ttm_placement.h>
0010 #include <drm/drm_buddy.h>
0011 
0012 #include "i915_drv.h"
0013 #include "i915_ttm_buddy_manager.h"
0014 #include "intel_memory_region.h"
0015 #include "intel_region_ttm.h"
0016 
0017 #include "gem/i915_gem_mman.h"
0018 #include "gem/i915_gem_object.h"
0019 #include "gem/i915_gem_region.h"
0020 #include "gem/i915_gem_ttm.h"
0021 #include "gem/i915_gem_ttm_move.h"
0022 #include "gem/i915_gem_ttm_pm.h"
0023 #include "gt/intel_gpu_commands.h"
0024 
0025 #define I915_TTM_PRIO_PURGE     0
0026 #define I915_TTM_PRIO_NO_PAGES  1
0027 #define I915_TTM_PRIO_HAS_PAGES 2
0028 #define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
0029 
0030 /*
0031  * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
0032  */
0033 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
0034 
0035 /**
0036  * struct i915_ttm_tt - TTM page vector with additional private information
0037  * @ttm: The base TTM page vector.
0038  * @dev: The struct device used for dma mapping and unmapping.
0039  * @cached_rsgt: The cached scatter-gather table.
0040  * @is_shmem: Set if using shmem.
0041  * @filp: The shmem file, if using shmem backend.
0042  *
0043  * Note that DMA may be going on right up to the point where the page-
0044  * vector is unpopulated in delayed destroy. Hence keep the
0045  * scatter-gather table mapped and cached up to that point. This is
0046  * different from the cached gem object io scatter-gather table which
0047  * doesn't have an associated dma mapping.
0048  */
0049 struct i915_ttm_tt {
0050     struct ttm_tt ttm;
0051     struct device *dev;
0052     struct i915_refct_sgt cached_rsgt;
0053 
0054     bool is_shmem;
0055     struct file *filp;
0056 };
0057 
0058 static const struct ttm_place sys_placement_flags = {
0059     .fpfn = 0,
0060     .lpfn = 0,
0061     .mem_type = I915_PL_SYSTEM,
0062     .flags = 0,
0063 };
0064 
0065 static struct ttm_placement i915_sys_placement = {
0066     .num_placement = 1,
0067     .placement = &sys_placement_flags,
0068     .num_busy_placement = 1,
0069     .busy_placement = &sys_placement_flags,
0070 };
0071 
0072 /**
0073  * i915_ttm_sys_placement - Return the struct ttm_placement to be
0074  * used for an object in system memory.
0075  *
0076  * Rather than making the struct extern, use this
0077  * function.
0078  *
0079  * Return: A pointer to a static variable for sys placement.
0080  */
0081 struct ttm_placement *i915_ttm_sys_placement(void)
0082 {
0083     return &i915_sys_placement;
0084 }
0085 
0086 static int i915_ttm_err_to_gem(int err)
0087 {
0088     /* Fastpath */
0089     if (likely(!err))
0090         return 0;
0091 
0092     switch (err) {
0093     case -EBUSY:
0094         /*
0095          * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
0096          * restart the operation, since we don't record the contending
0097          * lock. We use -EAGAIN to restart.
0098          */
0099         return -EAGAIN;
0100     case -ENOSPC:
0101         /*
0102          * Memory type / region is full, and we can't evict.
0103          * Except possibly system, that returns -ENOMEM;
0104          */
0105         return -ENXIO;
0106     default:
0107         break;
0108     }
0109 
0110     return err;
0111 }
0112 
0113 static enum ttm_caching
0114 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
0115 {
0116     /*
0117      * Objects only allowed in system get cached cpu-mappings, or when
0118      * evicting lmem-only buffers to system for swapping. Other objects get
0119      * WC mapping for now. Even if in system.
0120      */
0121     if (obj->mm.n_placements <= 1)
0122         return ttm_cached;
0123 
0124     return ttm_write_combined;
0125 }
0126 
0127 static void
0128 i915_ttm_place_from_region(const struct intel_memory_region *mr,
0129                struct ttm_place *place,
0130                resource_size_t offset,
0131                resource_size_t size,
0132                unsigned int flags)
0133 {
0134     memset(place, 0, sizeof(*place));
0135     place->mem_type = intel_region_to_ttm_type(mr);
0136 
0137     if (mr->type == INTEL_MEMORY_SYSTEM)
0138         return;
0139 
0140     if (flags & I915_BO_ALLOC_CONTIGUOUS)
0141         place->flags |= TTM_PL_FLAG_CONTIGUOUS;
0142     if (offset != I915_BO_INVALID_OFFSET) {
0143         place->fpfn = offset >> PAGE_SHIFT;
0144         place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
0145     } else if (mr->io_size && mr->io_size < mr->total) {
0146         if (flags & I915_BO_ALLOC_GPU_ONLY) {
0147             place->flags |= TTM_PL_FLAG_TOPDOWN;
0148         } else {
0149             place->fpfn = 0;
0150             place->lpfn = mr->io_size >> PAGE_SHIFT;
0151         }
0152     }
0153 }
0154 
0155 static void
0156 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
0157                 struct ttm_place *requested,
0158                 struct ttm_place *busy,
0159                 struct ttm_placement *placement)
0160 {
0161     unsigned int num_allowed = obj->mm.n_placements;
0162     unsigned int flags = obj->flags;
0163     unsigned int i;
0164 
0165     placement->num_placement = 1;
0166     i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
0167                    obj->mm.region, requested, obj->bo_offset,
0168                    obj->base.size, flags);
0169 
0170     /* Cache this on object? */
0171     placement->num_busy_placement = num_allowed;
0172     for (i = 0; i < placement->num_busy_placement; ++i)
0173         i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
0174                        obj->bo_offset, obj->base.size, flags);
0175 
0176     if (num_allowed == 0) {
0177         *busy = *requested;
0178         placement->num_busy_placement = 1;
0179     }
0180 
0181     placement->placement = requested;
0182     placement->busy_placement = busy;
0183 }
0184 
0185 static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
0186                       struct ttm_tt *ttm,
0187                       struct ttm_operation_ctx *ctx)
0188 {
0189     struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
0190     struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
0191     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0192     const unsigned int max_segment = i915_sg_segment_size();
0193     const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
0194     struct file *filp = i915_tt->filp;
0195     struct sgt_iter sgt_iter;
0196     struct sg_table *st;
0197     struct page *page;
0198     unsigned long i;
0199     int err;
0200 
0201     if (!filp) {
0202         struct address_space *mapping;
0203         gfp_t mask;
0204 
0205         filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
0206         if (IS_ERR(filp))
0207             return PTR_ERR(filp);
0208 
0209         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
0210 
0211         mapping = filp->f_mapping;
0212         mapping_set_gfp_mask(mapping, mask);
0213         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
0214 
0215         i915_tt->filp = filp;
0216     }
0217 
0218     st = &i915_tt->cached_rsgt.table;
0219     err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
0220                    max_segment);
0221     if (err)
0222         return err;
0223 
0224     err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
0225                   DMA_ATTR_SKIP_CPU_SYNC);
0226     if (err)
0227         goto err_free_st;
0228 
0229     i = 0;
0230     for_each_sgt_page(page, sgt_iter, st)
0231         ttm->pages[i++] = page;
0232 
0233     if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
0234         ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
0235 
0236     return 0;
0237 
0238 err_free_st:
0239     shmem_sg_free_table(st, filp->f_mapping, false, false);
0240 
0241     return err;
0242 }
0243 
0244 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
0245 {
0246     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0247     bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
0248     struct sg_table *st = &i915_tt->cached_rsgt.table;
0249 
0250     shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
0251                 backup, backup);
0252 }
0253 
0254 static void i915_ttm_tt_release(struct kref *ref)
0255 {
0256     struct i915_ttm_tt *i915_tt =
0257         container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
0258     struct sg_table *st = &i915_tt->cached_rsgt.table;
0259 
0260     GEM_WARN_ON(st->sgl);
0261 
0262     kfree(i915_tt);
0263 }
0264 
0265 static const struct i915_refct_sgt_ops tt_rsgt_ops = {
0266     .release = i915_ttm_tt_release
0267 };
0268 
0269 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
0270                      uint32_t page_flags)
0271 {
0272     struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
0273                              bdev);
0274     struct ttm_resource_manager *man =
0275         ttm_manager_type(bo->bdev, bo->resource->mem_type);
0276     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0277     unsigned long ccs_pages = 0;
0278     enum ttm_caching caching;
0279     struct i915_ttm_tt *i915_tt;
0280     int ret;
0281 
0282     if (!obj)
0283         return NULL;
0284 
0285     i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
0286     if (!i915_tt)
0287         return NULL;
0288 
0289     if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
0290         man->use_tt)
0291         page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
0292 
0293     caching = i915_ttm_select_tt_caching(obj);
0294     if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
0295         page_flags |= TTM_TT_FLAG_EXTERNAL |
0296                   TTM_TT_FLAG_EXTERNAL_MAPPABLE;
0297         i915_tt->is_shmem = true;
0298     }
0299 
0300     if (i915_gem_object_needs_ccs_pages(obj))
0301         ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
0302                               NUM_BYTES_PER_CCS_BYTE),
0303                      PAGE_SIZE);
0304 
0305     ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
0306     if (ret)
0307         goto err_free;
0308 
0309     __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
0310                   &tt_rsgt_ops);
0311 
0312     i915_tt->dev = obj->base.dev->dev;
0313 
0314     return &i915_tt->ttm;
0315 
0316 err_free:
0317     kfree(i915_tt);
0318     return NULL;
0319 }
0320 
0321 static int i915_ttm_tt_populate(struct ttm_device *bdev,
0322                 struct ttm_tt *ttm,
0323                 struct ttm_operation_ctx *ctx)
0324 {
0325     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0326 
0327     if (i915_tt->is_shmem)
0328         return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
0329 
0330     return ttm_pool_alloc(&bdev->pool, ttm, ctx);
0331 }
0332 
0333 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
0334 {
0335     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0336     struct sg_table *st = &i915_tt->cached_rsgt.table;
0337 
0338     if (st->sgl)
0339         dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
0340 
0341     if (i915_tt->is_shmem) {
0342         i915_ttm_tt_shmem_unpopulate(ttm);
0343     } else {
0344         sg_free_table(st);
0345         ttm_pool_free(&bdev->pool, ttm);
0346     }
0347 }
0348 
0349 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
0350 {
0351     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0352 
0353     if (i915_tt->filp)
0354         fput(i915_tt->filp);
0355 
0356     ttm_tt_fini(ttm);
0357     i915_refct_sgt_put(&i915_tt->cached_rsgt);
0358 }
0359 
0360 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
0361                        const struct ttm_place *place)
0362 {
0363     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0364     struct ttm_resource *res = bo->resource;
0365 
0366     if (!obj)
0367         return false;
0368 
0369     /*
0370      * EXTERNAL objects should never be swapped out by TTM, instead we need
0371      * to handle that ourselves. TTM will already skip such objects for us,
0372      * but we would like to avoid grabbing locks for no good reason.
0373      */
0374     if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
0375         return false;
0376 
0377     /* Will do for now. Our pinned objects are still on TTM's LRU lists */
0378     if (!i915_gem_object_evictable(obj))
0379         return false;
0380 
0381     switch (res->mem_type) {
0382     case I915_PL_LMEM0: {
0383         struct ttm_resource_manager *man =
0384             ttm_manager_type(bo->bdev, res->mem_type);
0385         struct i915_ttm_buddy_resource *bman_res =
0386             to_ttm_buddy_resource(res);
0387         struct drm_buddy *mm = bman_res->mm;
0388         struct drm_buddy_block *block;
0389 
0390         if (!place->fpfn && !place->lpfn)
0391             return true;
0392 
0393         GEM_BUG_ON(!place->lpfn);
0394 
0395         /*
0396          * If we just want something mappable then we can quickly check
0397          * if the current victim resource is using any of the CPU
0398          * visible portion.
0399          */
0400         if (!place->fpfn &&
0401             place->lpfn == i915_ttm_buddy_man_visible_size(man))
0402             return bman_res->used_visible_size > 0;
0403 
0404         /* Real range allocation */
0405         list_for_each_entry(block, &bman_res->blocks, link) {
0406             unsigned long fpfn =
0407                 drm_buddy_block_offset(block) >> PAGE_SHIFT;
0408             unsigned long lpfn = fpfn +
0409                 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
0410 
0411             if (place->fpfn < lpfn && place->lpfn > fpfn)
0412                 return true;
0413         }
0414         return false;
0415     } default:
0416         break;
0417     }
0418 
0419     return true;
0420 }
0421 
0422 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
0423                  struct ttm_placement *placement)
0424 {
0425     *placement = i915_sys_placement;
0426 }
0427 
0428 /**
0429  * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
0430  * @obj: The GEM object
0431  * This function frees any LMEM-related information that is cached on
0432  * the object. For example the radix tree for fast page lookup and the
0433  * cached refcounted sg-table
0434  */
0435 void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
0436 {
0437     struct radix_tree_iter iter;
0438     void __rcu **slot;
0439 
0440     if (!obj->ttm.cached_io_rsgt)
0441         return;
0442 
0443     rcu_read_lock();
0444     radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
0445         radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
0446     rcu_read_unlock();
0447 
0448     i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
0449     obj->ttm.cached_io_rsgt = NULL;
0450 }
0451 
0452 /**
0453  * i915_ttm_purge - Clear an object of its memory
0454  * @obj: The object
0455  *
0456  * This function is called to clear an object of it's memory when it is
0457  * marked as not needed anymore.
0458  *
0459  * Return: 0 on success, negative error code on failure.
0460  */
0461 int i915_ttm_purge(struct drm_i915_gem_object *obj)
0462 {
0463     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0464     struct i915_ttm_tt *i915_tt =
0465         container_of(bo->ttm, typeof(*i915_tt), ttm);
0466     struct ttm_operation_ctx ctx = {
0467         .interruptible = true,
0468         .no_wait_gpu = false,
0469     };
0470     struct ttm_placement place = {};
0471     int ret;
0472 
0473     if (obj->mm.madv == __I915_MADV_PURGED)
0474         return 0;
0475 
0476     ret = ttm_bo_validate(bo, &place, &ctx);
0477     if (ret)
0478         return ret;
0479 
0480     if (bo->ttm && i915_tt->filp) {
0481         /*
0482          * The below fput(which eventually calls shmem_truncate) might
0483          * be delayed by worker, so when directly called to purge the
0484          * pages(like by the shrinker) we should try to be more
0485          * aggressive and release the pages immediately.
0486          */
0487         shmem_truncate_range(file_inode(i915_tt->filp),
0488                      0, (loff_t)-1);
0489         fput(fetch_and_zero(&i915_tt->filp));
0490     }
0491 
0492     obj->write_domain = 0;
0493     obj->read_domains = 0;
0494     i915_ttm_adjust_gem_after_move(obj);
0495     i915_ttm_free_cached_io_rsgt(obj);
0496     obj->mm.madv = __I915_MADV_PURGED;
0497 
0498     return 0;
0499 }
0500 
0501 static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
0502 {
0503     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0504     struct i915_ttm_tt *i915_tt =
0505         container_of(bo->ttm, typeof(*i915_tt), ttm);
0506     struct ttm_operation_ctx ctx = {
0507         .interruptible = true,
0508         .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
0509     };
0510     struct ttm_placement place = {};
0511     int ret;
0512 
0513     if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
0514         return 0;
0515 
0516     GEM_BUG_ON(!i915_tt->is_shmem);
0517 
0518     if (!i915_tt->filp)
0519         return 0;
0520 
0521     ret = ttm_bo_wait_ctx(bo, &ctx);
0522     if (ret)
0523         return ret;
0524 
0525     switch (obj->mm.madv) {
0526     case I915_MADV_DONTNEED:
0527         return i915_ttm_purge(obj);
0528     case __I915_MADV_PURGED:
0529         return 0;
0530     }
0531 
0532     if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
0533         return 0;
0534 
0535     bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
0536     ret = ttm_bo_validate(bo, &place, &ctx);
0537     if (ret) {
0538         bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
0539         return ret;
0540     }
0541 
0542     if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
0543         __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
0544 
0545     return 0;
0546 }
0547 
0548 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
0549 {
0550     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0551 
0552     if (likely(obj)) {
0553         __i915_gem_object_pages_fini(obj);
0554         i915_ttm_free_cached_io_rsgt(obj);
0555     }
0556 }
0557 
0558 static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
0559 {
0560     struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0561     struct sg_table *st;
0562     int ret;
0563 
0564     if (i915_tt->cached_rsgt.table.sgl)
0565         return i915_refct_sgt_get(&i915_tt->cached_rsgt);
0566 
0567     st = &i915_tt->cached_rsgt.table;
0568     ret = sg_alloc_table_from_pages_segment(st,
0569             ttm->pages, ttm->num_pages,
0570             0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
0571             i915_sg_segment_size(), GFP_KERNEL);
0572     if (ret) {
0573         st->sgl = NULL;
0574         return ERR_PTR(ret);
0575     }
0576 
0577     ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
0578     if (ret) {
0579         sg_free_table(st);
0580         return ERR_PTR(ret);
0581     }
0582 
0583     return i915_refct_sgt_get(&i915_tt->cached_rsgt);
0584 }
0585 
0586 /**
0587  * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the
0588  * resource memory
0589  * @obj: The GEM object used for sg-table caching
0590  * @res: The struct ttm_resource for which an sg-table is requested.
0591  *
0592  * This function returns a refcounted sg-table representing the memory
0593  * pointed to by @res. If @res is the object's current resource it may also
0594  * cache the sg_table on the object or attempt to access an already cached
0595  * sg-table. The refcounted sg-table needs to be put when no-longer in use.
0596  *
0597  * Return: A valid pointer to a struct i915_refct_sgt or error pointer on
0598  * failure.
0599  */
0600 struct i915_refct_sgt *
0601 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
0602              struct ttm_resource *res)
0603 {
0604     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0605     u32 page_alignment;
0606 
0607     if (!i915_ttm_gtt_binds_lmem(res))
0608         return i915_ttm_tt_get_st(bo->ttm);
0609 
0610     page_alignment = bo->page_alignment << PAGE_SHIFT;
0611     if (!page_alignment)
0612         page_alignment = obj->mm.region->min_page_size;
0613 
0614     /*
0615      * If CPU mapping differs, we need to add the ttm_tt pages to
0616      * the resulting st. Might make sense for GGTT.
0617      */
0618     GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
0619     if (bo->resource == res) {
0620         if (!obj->ttm.cached_io_rsgt) {
0621             struct i915_refct_sgt *rsgt;
0622 
0623             rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
0624                                  res,
0625                                  page_alignment);
0626             if (IS_ERR(rsgt))
0627                 return rsgt;
0628 
0629             obj->ttm.cached_io_rsgt = rsgt;
0630         }
0631         return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
0632     }
0633 
0634     return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
0635                          page_alignment);
0636 }
0637 
0638 static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
0639 {
0640     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0641     int err;
0642 
0643     WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
0644 
0645     err = i915_ttm_move_notify(bo);
0646     if (err)
0647         return err;
0648 
0649     return i915_ttm_purge(obj);
0650 }
0651 
0652 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
0653 {
0654     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0655     int ret;
0656 
0657     if (!obj)
0658         return;
0659 
0660     ret = i915_ttm_move_notify(bo);
0661     GEM_WARN_ON(ret);
0662     GEM_WARN_ON(obj->ttm.cached_io_rsgt);
0663     if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
0664         i915_ttm_purge(obj);
0665 }
0666 
0667 /**
0668  * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
0669  * accessible.
0670  * @res: The TTM resource to check.
0671  *
0672  * This is interesting on small-BAR systems where we may encounter lmem objects
0673  * that can't be accessed via the CPU.
0674  */
0675 bool i915_ttm_resource_mappable(struct ttm_resource *res)
0676 {
0677     struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
0678 
0679     if (!i915_ttm_cpu_maps_iomem(res))
0680         return true;
0681 
0682     return bman_res->used_visible_size == bman_res->base.num_pages;
0683 }
0684 
0685 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
0686 {
0687     struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
0688     bool unknown_state;
0689 
0690     if (!obj)
0691         return -EINVAL;
0692 
0693     if (!kref_get_unless_zero(&obj->base.refcount))
0694         return -EINVAL;
0695 
0696     assert_object_held(obj);
0697 
0698     unknown_state = i915_gem_object_has_unknown_state(obj);
0699     i915_gem_object_put(obj);
0700     if (unknown_state)
0701         return -EINVAL;
0702 
0703     if (!i915_ttm_cpu_maps_iomem(mem))
0704         return 0;
0705 
0706     if (!i915_ttm_resource_mappable(mem))
0707         return -EINVAL;
0708 
0709     mem->bus.caching = ttm_write_combined;
0710     mem->bus.is_iomem = true;
0711 
0712     return 0;
0713 }
0714 
0715 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
0716                      unsigned long page_offset)
0717 {
0718     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0719     struct scatterlist *sg;
0720     unsigned long base;
0721     unsigned int ofs;
0722 
0723     GEM_BUG_ON(!obj);
0724     GEM_WARN_ON(bo->ttm);
0725 
0726     base = obj->mm.region->iomap.base - obj->mm.region->region.start;
0727     sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
0728 
0729     return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
0730 }
0731 
0732 /*
0733  * All callbacks need to take care not to downcast a struct ttm_buffer_object
0734  * without checking its subclass, since it might be a TTM ghost object.
0735  */
0736 static struct ttm_device_funcs i915_ttm_bo_driver = {
0737     .ttm_tt_create = i915_ttm_tt_create,
0738     .ttm_tt_populate = i915_ttm_tt_populate,
0739     .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
0740     .ttm_tt_destroy = i915_ttm_tt_destroy,
0741     .eviction_valuable = i915_ttm_eviction_valuable,
0742     .evict_flags = i915_ttm_evict_flags,
0743     .move = i915_ttm_move,
0744     .swap_notify = i915_ttm_swap_notify,
0745     .delete_mem_notify = i915_ttm_delete_mem_notify,
0746     .io_mem_reserve = i915_ttm_io_mem_reserve,
0747     .io_mem_pfn = i915_ttm_io_mem_pfn,
0748 };
0749 
0750 /**
0751  * i915_ttm_driver - Return a pointer to the TTM device funcs
0752  *
0753  * Return: Pointer to statically allocated TTM device funcs.
0754  */
0755 struct ttm_device_funcs *i915_ttm_driver(void)
0756 {
0757     return &i915_ttm_bo_driver;
0758 }
0759 
0760 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
0761                 struct ttm_placement *placement)
0762 {
0763     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0764     struct ttm_operation_ctx ctx = {
0765         .interruptible = true,
0766         .no_wait_gpu = false,
0767     };
0768     int real_num_busy;
0769     int ret;
0770 
0771     /* First try only the requested placement. No eviction. */
0772     real_num_busy = fetch_and_zero(&placement->num_busy_placement);
0773     ret = ttm_bo_validate(bo, placement, &ctx);
0774     if (ret) {
0775         ret = i915_ttm_err_to_gem(ret);
0776         /*
0777          * Anything that wants to restart the operation gets to
0778          * do that.
0779          */
0780         if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
0781             ret == -EAGAIN)
0782             return ret;
0783 
0784         /*
0785          * If the initial attempt fails, allow all accepted placements,
0786          * evicting if necessary.
0787          */
0788         placement->num_busy_placement = real_num_busy;
0789         ret = ttm_bo_validate(bo, placement, &ctx);
0790         if (ret)
0791             return i915_ttm_err_to_gem(ret);
0792     }
0793 
0794     if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
0795         ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
0796         if (ret)
0797             return ret;
0798 
0799         i915_ttm_adjust_domains_after_move(obj);
0800         i915_ttm_adjust_gem_after_move(obj);
0801     }
0802 
0803     if (!i915_gem_object_has_pages(obj)) {
0804         struct i915_refct_sgt *rsgt =
0805             i915_ttm_resource_get_st(obj, bo->resource);
0806 
0807         if (IS_ERR(rsgt))
0808             return PTR_ERR(rsgt);
0809 
0810         GEM_BUG_ON(obj->mm.rsgt);
0811         obj->mm.rsgt = rsgt;
0812         __i915_gem_object_set_pages(obj, &rsgt->table,
0813                         i915_sg_dma_sizes(rsgt->table.sgl));
0814     }
0815 
0816     GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
0817     i915_ttm_adjust_lru(obj);
0818     return ret;
0819 }
0820 
0821 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
0822 {
0823     struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
0824     struct ttm_placement placement;
0825 
0826     GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
0827 
0828     /* Move to the requested placement. */
0829     i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
0830 
0831     return __i915_ttm_get_pages(obj, &placement);
0832 }
0833 
0834 /**
0835  * DOC: Migration vs eviction
0836  *
0837  * GEM migration may not be the same as TTM migration / eviction. If
0838  * the TTM core decides to evict an object it may be evicted to a
0839  * TTM memory type that is not in the object's allowable GEM regions, or
0840  * in fact theoretically to a TTM memory type that doesn't correspond to
0841  * a GEM memory region. In that case the object's GEM region is not
0842  * updated, and the data is migrated back to the GEM region at
0843  * get_pages time. TTM may however set up CPU ptes to the object even
0844  * when it is evicted.
0845  * Gem forced migration using the i915_ttm_migrate() op, is allowed even
0846  * to regions that are not in the object's list of allowable placements.
0847  */
0848 static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
0849                   struct intel_memory_region *mr,
0850                   unsigned int flags)
0851 {
0852     struct ttm_place requested;
0853     struct ttm_placement placement;
0854     int ret;
0855 
0856     i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
0857                    obj->base.size, flags);
0858     placement.num_placement = 1;
0859     placement.num_busy_placement = 1;
0860     placement.placement = &requested;
0861     placement.busy_placement = &requested;
0862 
0863     ret = __i915_ttm_get_pages(obj, &placement);
0864     if (ret)
0865         return ret;
0866 
0867     /*
0868      * Reinitialize the region bindings. This is primarily
0869      * required for objects where the new region is not in
0870      * its allowable placements.
0871      */
0872     if (obj->mm.region != mr) {
0873         i915_gem_object_release_memory_region(obj);
0874         i915_gem_object_init_memory_region(obj, mr);
0875     }
0876 
0877     return 0;
0878 }
0879 
0880 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
0881                 struct intel_memory_region *mr)
0882 {
0883     return __i915_ttm_migrate(obj, mr, obj->flags);
0884 }
0885 
0886 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
0887                    struct sg_table *st)
0888 {
0889     /*
0890      * We're currently not called from a shrinker, so put_pages()
0891      * typically means the object is about to destroyed, or called
0892      * from move_notify(). So just avoid doing much for now.
0893      * If the object is not destroyed next, The TTM eviction logic
0894      * and shrinkers will move it out if needed.
0895      */
0896 
0897     if (obj->mm.rsgt)
0898         i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
0899 }
0900 
0901 /**
0902  * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists.
0903  * @obj: The object
0904  */
0905 void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
0906 {
0907     struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0908     struct i915_ttm_tt *i915_tt =
0909         container_of(bo->ttm, typeof(*i915_tt), ttm);
0910     bool shrinkable =
0911         bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
0912 
0913     /*
0914      * Don't manipulate the TTM LRUs while in TTM bo destruction.
0915      * We're called through i915_ttm_delete_mem_notify().
0916      */
0917     if (!kref_read(&bo->kref))
0918         return;
0919 
0920     /*
0921      * We skip managing the shrinker LRU in set_pages() and just manage
0922      * everything here. This does at least solve the issue with having
0923      * temporary shmem mappings(like with evicted lmem) not being visible to
0924      * the shrinker. Only our shmem objects are shrinkable, everything else
0925      * we keep as unshrinkable.
0926      *
0927      * To make sure everything plays nice we keep an extra shrink pin in TTM
0928      * if the underlying pages are not currently shrinkable. Once we release
0929      * our pin, like when the pages are moved to shmem, the pages will then
0930      * be added to the shrinker LRU, assuming the caller isn't also holding
0931      * a pin.
0932      *
0933      * TODO: consider maybe also bumping the shrinker list here when we have
0934      * already unpinned it, which should give us something more like an LRU.
0935      *
0936      * TODO: There is a small window of opportunity for this function to
0937      * get called from eviction after we've dropped the last GEM refcount,
0938      * but before the TTM deleted flag is set on the object. Avoid
0939      * adjusting the shrinker list in such cases, since the object is
0940      * not available to the shrinker anyway due to its zero refcount.
0941      * To fix this properly we should move to a TTM shrinker LRU list for
0942      * these objects.
0943      */
0944     if (kref_get_unless_zero(&obj->base.refcount)) {
0945         if (shrinkable != obj->mm.ttm_shrinkable) {
0946             if (shrinkable) {
0947                 if (obj->mm.madv == I915_MADV_WILLNEED)
0948                     __i915_gem_object_make_shrinkable(obj);
0949                 else
0950                     __i915_gem_object_make_purgeable(obj);
0951             } else {
0952                 i915_gem_object_make_unshrinkable(obj);
0953             }
0954 
0955             obj->mm.ttm_shrinkable = shrinkable;
0956         }
0957         i915_gem_object_put(obj);
0958     }
0959 
0960     /*
0961      * Put on the correct LRU list depending on the MADV status
0962      */
0963     spin_lock(&bo->bdev->lru_lock);
0964     if (shrinkable) {
0965         /* Try to keep shmem_tt from being considered for shrinking. */
0966         bo->priority = TTM_MAX_BO_PRIORITY - 1;
0967     } else if (obj->mm.madv != I915_MADV_WILLNEED) {
0968         bo->priority = I915_TTM_PRIO_PURGE;
0969     } else if (!i915_gem_object_has_pages(obj)) {
0970         bo->priority = I915_TTM_PRIO_NO_PAGES;
0971     } else {
0972         struct ttm_resource_manager *man =
0973             ttm_manager_type(bo->bdev, bo->resource->mem_type);
0974 
0975         /*
0976          * If we need to place an LMEM resource which doesn't need CPU
0977          * access then we should try not to victimize mappable objects
0978          * first, since we likely end up stealing more of the mappable
0979          * portion. And likewise when we try to find space for a mappble
0980          * object, we know not to ever victimize objects that don't
0981          * occupy any mappable pages.
0982          */
0983         if (i915_ttm_cpu_maps_iomem(bo->resource) &&
0984             i915_ttm_buddy_man_visible_size(man) < man->size &&
0985             !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
0986             bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
0987         else
0988             bo->priority = I915_TTM_PRIO_HAS_PAGES;
0989     }
0990 
0991     ttm_bo_move_to_lru_tail(bo);
0992     spin_unlock(&bo->bdev->lru_lock);
0993 }
0994 
0995 /*
0996  * TTM-backed gem object destruction requires some clarification.
0997  * Basically we have two possibilities here. We can either rely on the
0998  * i915 delayed destruction and put the TTM object when the object
0999  * is idle. This would be detected by TTM which would bypass the
1000  * TTM delayed destroy handling. The other approach is to put the TTM
1001  * object early and rely on the TTM destroyed handling, and then free
1002  * the leftover parts of the GEM object once TTM's destroyed list handling is
1003  * complete. For now, we rely on the latter for two reasons:
1004  * a) TTM can evict an object even when it's on the delayed destroy list,
1005  * which in theory allows for complete eviction.
1006  * b) There is work going on in TTM to allow freeing an object even when
1007  * it's not idle, and using the TTM destroyed list handling could help us
1008  * benefit from that.
1009  */
1010 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
1011 {
1012     GEM_BUG_ON(!obj->ttm.created);
1013 
1014     ttm_bo_put(i915_gem_to_ttm(obj));
1015 }
1016 
1017 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
1018 {
1019     struct vm_area_struct *area = vmf->vma;
1020     struct ttm_buffer_object *bo = area->vm_private_data;
1021     struct drm_device *dev = bo->base.dev;
1022     struct drm_i915_gem_object *obj;
1023     vm_fault_t ret;
1024     int idx;
1025 
1026     obj = i915_ttm_to_gem(bo);
1027     if (!obj)
1028         return VM_FAULT_SIGBUS;
1029 
1030     /* Sanity check that we allow writing into this object */
1031     if (unlikely(i915_gem_object_is_readonly(obj) &&
1032              area->vm_flags & VM_WRITE))
1033         return VM_FAULT_SIGBUS;
1034 
1035     ret = ttm_bo_vm_reserve(bo, vmf);
1036     if (ret)
1037         return ret;
1038 
1039     if (obj->mm.madv != I915_MADV_WILLNEED) {
1040         dma_resv_unlock(bo->base.resv);
1041         return VM_FAULT_SIGBUS;
1042     }
1043 
1044     if (!i915_ttm_resource_mappable(bo->resource)) {
1045         int err = -ENODEV;
1046         int i;
1047 
1048         for (i = 0; i < obj->mm.n_placements; i++) {
1049             struct intel_memory_region *mr = obj->mm.placements[i];
1050             unsigned int flags;
1051 
1052             if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
1053                 continue;
1054 
1055             flags = obj->flags;
1056             flags &= ~I915_BO_ALLOC_GPU_ONLY;
1057             err = __i915_ttm_migrate(obj, mr, flags);
1058             if (!err)
1059                 break;
1060         }
1061 
1062         if (err) {
1063             drm_dbg(dev, "Unable to make resource CPU accessible\n");
1064             dma_resv_unlock(bo->base.resv);
1065             return VM_FAULT_SIGBUS;
1066         }
1067     }
1068 
1069     if (drm_dev_enter(dev, &idx)) {
1070         ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1071                            TTM_BO_VM_NUM_PREFAULT);
1072         drm_dev_exit(idx);
1073     } else {
1074         ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1075     }
1076     if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1077         return ret;
1078 
1079     i915_ttm_adjust_lru(obj);
1080 
1081     dma_resv_unlock(bo->base.resv);
1082     return ret;
1083 }
1084 
1085 static int
1086 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
1087           void *buf, int len, int write)
1088 {
1089     struct drm_i915_gem_object *obj =
1090         i915_ttm_to_gem(area->vm_private_data);
1091 
1092     if (i915_gem_object_is_readonly(obj) && write)
1093         return -EACCES;
1094 
1095     return ttm_bo_vm_access(area, addr, buf, len, write);
1096 }
1097 
1098 static void ttm_vm_open(struct vm_area_struct *vma)
1099 {
1100     struct drm_i915_gem_object *obj =
1101         i915_ttm_to_gem(vma->vm_private_data);
1102 
1103     GEM_BUG_ON(!obj);
1104     i915_gem_object_get(obj);
1105 }
1106 
1107 static void ttm_vm_close(struct vm_area_struct *vma)
1108 {
1109     struct drm_i915_gem_object *obj =
1110         i915_ttm_to_gem(vma->vm_private_data);
1111 
1112     GEM_BUG_ON(!obj);
1113     i915_gem_object_put(obj);
1114 }
1115 
1116 static const struct vm_operations_struct vm_ops_ttm = {
1117     .fault = vm_fault_ttm,
1118     .access = vm_access_ttm,
1119     .open = ttm_vm_open,
1120     .close = ttm_vm_close,
1121 };
1122 
1123 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
1124 {
1125     /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
1126     GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
1127 
1128     return drm_vma_node_offset_addr(&obj->base.vma_node);
1129 }
1130 
1131 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
1132 {
1133     ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
1134 }
1135 
1136 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
1137     .name = "i915_gem_object_ttm",
1138     .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
1139          I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
1140 
1141     .get_pages = i915_ttm_get_pages,
1142     .put_pages = i915_ttm_put_pages,
1143     .truncate = i915_ttm_truncate,
1144     .shrink = i915_ttm_shrink,
1145 
1146     .adjust_lru = i915_ttm_adjust_lru,
1147     .delayed_free = i915_ttm_delayed_free,
1148     .migrate = i915_ttm_migrate,
1149 
1150     .mmap_offset = i915_ttm_mmap_offset,
1151     .unmap_virtual = i915_ttm_unmap_virtual,
1152     .mmap_ops = &vm_ops_ttm,
1153 };
1154 
1155 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
1156 {
1157     struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1158 
1159     i915_gem_object_release_memory_region(obj);
1160     mutex_destroy(&obj->ttm.get_io_page.lock);
1161 
1162     if (obj->ttm.created) {
1163         /*
1164          * We freely manage the shrinker LRU outide of the mm.pages life
1165          * cycle. As a result when destroying the object we should be
1166          * extra paranoid and ensure we remove it from the LRU, before
1167          * we free the object.
1168          *
1169          * Touching the ttm_shrinkable outside of the object lock here
1170          * should be safe now that the last GEM object ref was dropped.
1171          */
1172         if (obj->mm.ttm_shrinkable)
1173             i915_gem_object_make_unshrinkable(obj);
1174 
1175         i915_ttm_backup_free(obj);
1176 
1177         /* This releases all gem object bindings to the backend. */
1178         __i915_gem_free_object(obj);
1179 
1180         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
1181     } else {
1182         __i915_gem_object_fini(obj);
1183     }
1184 }
1185 
1186 /**
1187  * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
1188  * @mem: The initial memory region for the object.
1189  * @obj: The gem object.
1190  * @size: Object size in bytes.
1191  * @flags: gem object flags.
1192  *
1193  * Return: 0 on success, negative error code on failure.
1194  */
1195 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
1196                    struct drm_i915_gem_object *obj,
1197                    resource_size_t offset,
1198                    resource_size_t size,
1199                    resource_size_t page_size,
1200                    unsigned int flags)
1201 {
1202     static struct lock_class_key lock_class;
1203     struct drm_i915_private *i915 = mem->i915;
1204     struct ttm_operation_ctx ctx = {
1205         .interruptible = true,
1206         .no_wait_gpu = false,
1207     };
1208     enum ttm_bo_type bo_type;
1209     int ret;
1210 
1211     drm_gem_private_object_init(&i915->drm, &obj->base, size);
1212     i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
1213 
1214     obj->bo_offset = offset;
1215 
1216     /* Don't put on a region list until we're either locked or fully initialized. */
1217     obj->mm.region = mem;
1218     INIT_LIST_HEAD(&obj->mm.region_link);
1219 
1220     INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
1221     mutex_init(&obj->ttm.get_io_page.lock);
1222     bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
1223         ttm_bo_type_kernel;
1224 
1225     obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
1226 
1227     /* Forcing the page size is kernel internal only */
1228     GEM_BUG_ON(page_size && obj->mm.n_placements);
1229 
1230     /*
1231      * Keep an extra shrink pin to prevent the object from being made
1232      * shrinkable too early. If the ttm_tt is ever allocated in shmem, we
1233      * drop the pin. The TTM backend manages the shrinker LRU itself,
1234      * outside of the normal mm.pages life cycle.
1235      */
1236     i915_gem_object_make_unshrinkable(obj);
1237 
1238     /*
1239      * If this function fails, it will call the destructor, but
1240      * our caller still owns the object. So no freeing in the
1241      * destructor until obj->ttm.created is true.
1242      * Similarly, in delayed_destroy, we can't call ttm_bo_put()
1243      * until successful initialization.
1244      */
1245     ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
1246                    bo_type, &i915_sys_placement,
1247                    page_size >> PAGE_SHIFT,
1248                    &ctx, NULL, NULL, i915_ttm_bo_destroy);
1249     if (ret)
1250         return i915_ttm_err_to_gem(ret);
1251 
1252     obj->ttm.created = true;
1253     i915_gem_object_release_memory_region(obj);
1254     i915_gem_object_init_memory_region(obj, mem);
1255     i915_ttm_adjust_domains_after_move(obj);
1256     i915_ttm_adjust_gem_after_move(obj);
1257     i915_gem_object_unlock(obj);
1258 
1259     return 0;
1260 }
1261 
1262 static const struct intel_memory_region_ops ttm_system_region_ops = {
1263     .init_object = __i915_gem_ttm_object_init,
1264     .release = intel_region_ttm_fini,
1265 };
1266 
1267 struct intel_memory_region *
1268 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
1269               u16 type, u16 instance)
1270 {
1271     struct intel_memory_region *mr;
1272 
1273     mr = intel_memory_region_create(i915, 0,
1274                     totalram_pages() << PAGE_SHIFT,
1275                     PAGE_SIZE, 0, 0,
1276                     type, instance,
1277                     &ttm_system_region_ops);
1278     if (IS_ERR(mr))
1279         return mr;
1280 
1281     intel_memory_region_set_name(mr, "system-ttm");
1282     return mr;
1283 }