0001
0002
0003
0004
0005
0006 #include <linux/shmem_fs.h>
0007
0008 #include <drm/ttm/ttm_bo_driver.h>
0009 #include <drm/ttm/ttm_placement.h>
0010 #include <drm/drm_buddy.h>
0011
0012 #include "i915_drv.h"
0013 #include "i915_ttm_buddy_manager.h"
0014 #include "intel_memory_region.h"
0015 #include "intel_region_ttm.h"
0016
0017 #include "gem/i915_gem_mman.h"
0018 #include "gem/i915_gem_object.h"
0019 #include "gem/i915_gem_region.h"
0020 #include "gem/i915_gem_ttm.h"
0021 #include "gem/i915_gem_ttm_move.h"
0022 #include "gem/i915_gem_ttm_pm.h"
0023 #include "gt/intel_gpu_commands.h"
0024
0025 #define I915_TTM_PRIO_PURGE 0
0026 #define I915_TTM_PRIO_NO_PAGES 1
0027 #define I915_TTM_PRIO_HAS_PAGES 2
0028 #define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
0029
0030
0031
0032
0033 #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049 struct i915_ttm_tt {
0050 struct ttm_tt ttm;
0051 struct device *dev;
0052 struct i915_refct_sgt cached_rsgt;
0053
0054 bool is_shmem;
0055 struct file *filp;
0056 };
0057
0058 static const struct ttm_place sys_placement_flags = {
0059 .fpfn = 0,
0060 .lpfn = 0,
0061 .mem_type = I915_PL_SYSTEM,
0062 .flags = 0,
0063 };
0064
0065 static struct ttm_placement i915_sys_placement = {
0066 .num_placement = 1,
0067 .placement = &sys_placement_flags,
0068 .num_busy_placement = 1,
0069 .busy_placement = &sys_placement_flags,
0070 };
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 struct ttm_placement *i915_ttm_sys_placement(void)
0082 {
0083 return &i915_sys_placement;
0084 }
0085
0086 static int i915_ttm_err_to_gem(int err)
0087 {
0088
0089 if (likely(!err))
0090 return 0;
0091
0092 switch (err) {
0093 case -EBUSY:
0094
0095
0096
0097
0098
0099 return -EAGAIN;
0100 case -ENOSPC:
0101
0102
0103
0104
0105 return -ENXIO;
0106 default:
0107 break;
0108 }
0109
0110 return err;
0111 }
0112
0113 static enum ttm_caching
0114 i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
0115 {
0116
0117
0118
0119
0120
0121 if (obj->mm.n_placements <= 1)
0122 return ttm_cached;
0123
0124 return ttm_write_combined;
0125 }
0126
0127 static void
0128 i915_ttm_place_from_region(const struct intel_memory_region *mr,
0129 struct ttm_place *place,
0130 resource_size_t offset,
0131 resource_size_t size,
0132 unsigned int flags)
0133 {
0134 memset(place, 0, sizeof(*place));
0135 place->mem_type = intel_region_to_ttm_type(mr);
0136
0137 if (mr->type == INTEL_MEMORY_SYSTEM)
0138 return;
0139
0140 if (flags & I915_BO_ALLOC_CONTIGUOUS)
0141 place->flags |= TTM_PL_FLAG_CONTIGUOUS;
0142 if (offset != I915_BO_INVALID_OFFSET) {
0143 place->fpfn = offset >> PAGE_SHIFT;
0144 place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
0145 } else if (mr->io_size && mr->io_size < mr->total) {
0146 if (flags & I915_BO_ALLOC_GPU_ONLY) {
0147 place->flags |= TTM_PL_FLAG_TOPDOWN;
0148 } else {
0149 place->fpfn = 0;
0150 place->lpfn = mr->io_size >> PAGE_SHIFT;
0151 }
0152 }
0153 }
0154
0155 static void
0156 i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
0157 struct ttm_place *requested,
0158 struct ttm_place *busy,
0159 struct ttm_placement *placement)
0160 {
0161 unsigned int num_allowed = obj->mm.n_placements;
0162 unsigned int flags = obj->flags;
0163 unsigned int i;
0164
0165 placement->num_placement = 1;
0166 i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
0167 obj->mm.region, requested, obj->bo_offset,
0168 obj->base.size, flags);
0169
0170
0171 placement->num_busy_placement = num_allowed;
0172 for (i = 0; i < placement->num_busy_placement; ++i)
0173 i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
0174 obj->bo_offset, obj->base.size, flags);
0175
0176 if (num_allowed == 0) {
0177 *busy = *requested;
0178 placement->num_busy_placement = 1;
0179 }
0180
0181 placement->placement = requested;
0182 placement->busy_placement = busy;
0183 }
0184
0185 static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
0186 struct ttm_tt *ttm,
0187 struct ttm_operation_ctx *ctx)
0188 {
0189 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
0190 struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
0191 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0192 const unsigned int max_segment = i915_sg_segment_size();
0193 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
0194 struct file *filp = i915_tt->filp;
0195 struct sgt_iter sgt_iter;
0196 struct sg_table *st;
0197 struct page *page;
0198 unsigned long i;
0199 int err;
0200
0201 if (!filp) {
0202 struct address_space *mapping;
0203 gfp_t mask;
0204
0205 filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE);
0206 if (IS_ERR(filp))
0207 return PTR_ERR(filp);
0208
0209 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
0210
0211 mapping = filp->f_mapping;
0212 mapping_set_gfp_mask(mapping, mask);
0213 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
0214
0215 i915_tt->filp = filp;
0216 }
0217
0218 st = &i915_tt->cached_rsgt.table;
0219 err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping,
0220 max_segment);
0221 if (err)
0222 return err;
0223
0224 err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL,
0225 DMA_ATTR_SKIP_CPU_SYNC);
0226 if (err)
0227 goto err_free_st;
0228
0229 i = 0;
0230 for_each_sgt_page(page, sgt_iter, st)
0231 ttm->pages[i++] = page;
0232
0233 if (ttm->page_flags & TTM_TT_FLAG_SWAPPED)
0234 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
0235
0236 return 0;
0237
0238 err_free_st:
0239 shmem_sg_free_table(st, filp->f_mapping, false, false);
0240
0241 return err;
0242 }
0243
0244 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm)
0245 {
0246 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0247 bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED;
0248 struct sg_table *st = &i915_tt->cached_rsgt.table;
0249
0250 shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping,
0251 backup, backup);
0252 }
0253
0254 static void i915_ttm_tt_release(struct kref *ref)
0255 {
0256 struct i915_ttm_tt *i915_tt =
0257 container_of(ref, typeof(*i915_tt), cached_rsgt.kref);
0258 struct sg_table *st = &i915_tt->cached_rsgt.table;
0259
0260 GEM_WARN_ON(st->sgl);
0261
0262 kfree(i915_tt);
0263 }
0264
0265 static const struct i915_refct_sgt_ops tt_rsgt_ops = {
0266 .release = i915_ttm_tt_release
0267 };
0268
0269 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
0270 uint32_t page_flags)
0271 {
0272 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
0273 bdev);
0274 struct ttm_resource_manager *man =
0275 ttm_manager_type(bo->bdev, bo->resource->mem_type);
0276 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0277 unsigned long ccs_pages = 0;
0278 enum ttm_caching caching;
0279 struct i915_ttm_tt *i915_tt;
0280 int ret;
0281
0282 if (!obj)
0283 return NULL;
0284
0285 i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
0286 if (!i915_tt)
0287 return NULL;
0288
0289 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
0290 man->use_tt)
0291 page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
0292
0293 caching = i915_ttm_select_tt_caching(obj);
0294 if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
0295 page_flags |= TTM_TT_FLAG_EXTERNAL |
0296 TTM_TT_FLAG_EXTERNAL_MAPPABLE;
0297 i915_tt->is_shmem = true;
0298 }
0299
0300 if (i915_gem_object_needs_ccs_pages(obj))
0301 ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
0302 NUM_BYTES_PER_CCS_BYTE),
0303 PAGE_SIZE);
0304
0305 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
0306 if (ret)
0307 goto err_free;
0308
0309 __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size,
0310 &tt_rsgt_ops);
0311
0312 i915_tt->dev = obj->base.dev->dev;
0313
0314 return &i915_tt->ttm;
0315
0316 err_free:
0317 kfree(i915_tt);
0318 return NULL;
0319 }
0320
0321 static int i915_ttm_tt_populate(struct ttm_device *bdev,
0322 struct ttm_tt *ttm,
0323 struct ttm_operation_ctx *ctx)
0324 {
0325 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0326
0327 if (i915_tt->is_shmem)
0328 return i915_ttm_tt_shmem_populate(bdev, ttm, ctx);
0329
0330 return ttm_pool_alloc(&bdev->pool, ttm, ctx);
0331 }
0332
0333 static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
0334 {
0335 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0336 struct sg_table *st = &i915_tt->cached_rsgt.table;
0337
0338 if (st->sgl)
0339 dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
0340
0341 if (i915_tt->is_shmem) {
0342 i915_ttm_tt_shmem_unpopulate(ttm);
0343 } else {
0344 sg_free_table(st);
0345 ttm_pool_free(&bdev->pool, ttm);
0346 }
0347 }
0348
0349 static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
0350 {
0351 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0352
0353 if (i915_tt->filp)
0354 fput(i915_tt->filp);
0355
0356 ttm_tt_fini(ttm);
0357 i915_refct_sgt_put(&i915_tt->cached_rsgt);
0358 }
0359
0360 static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
0361 const struct ttm_place *place)
0362 {
0363 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0364 struct ttm_resource *res = bo->resource;
0365
0366 if (!obj)
0367 return false;
0368
0369
0370
0371
0372
0373
0374 if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
0375 return false;
0376
0377
0378 if (!i915_gem_object_evictable(obj))
0379 return false;
0380
0381 switch (res->mem_type) {
0382 case I915_PL_LMEM0: {
0383 struct ttm_resource_manager *man =
0384 ttm_manager_type(bo->bdev, res->mem_type);
0385 struct i915_ttm_buddy_resource *bman_res =
0386 to_ttm_buddy_resource(res);
0387 struct drm_buddy *mm = bman_res->mm;
0388 struct drm_buddy_block *block;
0389
0390 if (!place->fpfn && !place->lpfn)
0391 return true;
0392
0393 GEM_BUG_ON(!place->lpfn);
0394
0395
0396
0397
0398
0399
0400 if (!place->fpfn &&
0401 place->lpfn == i915_ttm_buddy_man_visible_size(man))
0402 return bman_res->used_visible_size > 0;
0403
0404
0405 list_for_each_entry(block, &bman_res->blocks, link) {
0406 unsigned long fpfn =
0407 drm_buddy_block_offset(block) >> PAGE_SHIFT;
0408 unsigned long lpfn = fpfn +
0409 (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
0410
0411 if (place->fpfn < lpfn && place->lpfn > fpfn)
0412 return true;
0413 }
0414 return false;
0415 } default:
0416 break;
0417 }
0418
0419 return true;
0420 }
0421
0422 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
0423 struct ttm_placement *placement)
0424 {
0425 *placement = i915_sys_placement;
0426 }
0427
0428
0429
0430
0431
0432
0433
0434
0435 void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj)
0436 {
0437 struct radix_tree_iter iter;
0438 void __rcu **slot;
0439
0440 if (!obj->ttm.cached_io_rsgt)
0441 return;
0442
0443 rcu_read_lock();
0444 radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
0445 radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
0446 rcu_read_unlock();
0447
0448 i915_refct_sgt_put(obj->ttm.cached_io_rsgt);
0449 obj->ttm.cached_io_rsgt = NULL;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 int i915_ttm_purge(struct drm_i915_gem_object *obj)
0462 {
0463 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0464 struct i915_ttm_tt *i915_tt =
0465 container_of(bo->ttm, typeof(*i915_tt), ttm);
0466 struct ttm_operation_ctx ctx = {
0467 .interruptible = true,
0468 .no_wait_gpu = false,
0469 };
0470 struct ttm_placement place = {};
0471 int ret;
0472
0473 if (obj->mm.madv == __I915_MADV_PURGED)
0474 return 0;
0475
0476 ret = ttm_bo_validate(bo, &place, &ctx);
0477 if (ret)
0478 return ret;
0479
0480 if (bo->ttm && i915_tt->filp) {
0481
0482
0483
0484
0485
0486
0487 shmem_truncate_range(file_inode(i915_tt->filp),
0488 0, (loff_t)-1);
0489 fput(fetch_and_zero(&i915_tt->filp));
0490 }
0491
0492 obj->write_domain = 0;
0493 obj->read_domains = 0;
0494 i915_ttm_adjust_gem_after_move(obj);
0495 i915_ttm_free_cached_io_rsgt(obj);
0496 obj->mm.madv = __I915_MADV_PURGED;
0497
0498 return 0;
0499 }
0500
0501 static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
0502 {
0503 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0504 struct i915_ttm_tt *i915_tt =
0505 container_of(bo->ttm, typeof(*i915_tt), ttm);
0506 struct ttm_operation_ctx ctx = {
0507 .interruptible = true,
0508 .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT,
0509 };
0510 struct ttm_placement place = {};
0511 int ret;
0512
0513 if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
0514 return 0;
0515
0516 GEM_BUG_ON(!i915_tt->is_shmem);
0517
0518 if (!i915_tt->filp)
0519 return 0;
0520
0521 ret = ttm_bo_wait_ctx(bo, &ctx);
0522 if (ret)
0523 return ret;
0524
0525 switch (obj->mm.madv) {
0526 case I915_MADV_DONTNEED:
0527 return i915_ttm_purge(obj);
0528 case __I915_MADV_PURGED:
0529 return 0;
0530 }
0531
0532 if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED)
0533 return 0;
0534
0535 bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
0536 ret = ttm_bo_validate(bo, &place, &ctx);
0537 if (ret) {
0538 bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
0539 return ret;
0540 }
0541
0542 if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK)
0543 __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
0544
0545 return 0;
0546 }
0547
0548 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
0549 {
0550 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0551
0552 if (likely(obj)) {
0553 __i915_gem_object_pages_fini(obj);
0554 i915_ttm_free_cached_io_rsgt(obj);
0555 }
0556 }
0557
0558 static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
0559 {
0560 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
0561 struct sg_table *st;
0562 int ret;
0563
0564 if (i915_tt->cached_rsgt.table.sgl)
0565 return i915_refct_sgt_get(&i915_tt->cached_rsgt);
0566
0567 st = &i915_tt->cached_rsgt.table;
0568 ret = sg_alloc_table_from_pages_segment(st,
0569 ttm->pages, ttm->num_pages,
0570 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
0571 i915_sg_segment_size(), GFP_KERNEL);
0572 if (ret) {
0573 st->sgl = NULL;
0574 return ERR_PTR(ret);
0575 }
0576
0577 ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
0578 if (ret) {
0579 sg_free_table(st);
0580 return ERR_PTR(ret);
0581 }
0582
0583 return i915_refct_sgt_get(&i915_tt->cached_rsgt);
0584 }
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600 struct i915_refct_sgt *
0601 i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
0602 struct ttm_resource *res)
0603 {
0604 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0605 u32 page_alignment;
0606
0607 if (!i915_ttm_gtt_binds_lmem(res))
0608 return i915_ttm_tt_get_st(bo->ttm);
0609
0610 page_alignment = bo->page_alignment << PAGE_SHIFT;
0611 if (!page_alignment)
0612 page_alignment = obj->mm.region->min_page_size;
0613
0614
0615
0616
0617
0618 GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res));
0619 if (bo->resource == res) {
0620 if (!obj->ttm.cached_io_rsgt) {
0621 struct i915_refct_sgt *rsgt;
0622
0623 rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
0624 res,
0625 page_alignment);
0626 if (IS_ERR(rsgt))
0627 return rsgt;
0628
0629 obj->ttm.cached_io_rsgt = rsgt;
0630 }
0631 return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);
0632 }
0633
0634 return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
0635 page_alignment);
0636 }
0637
0638 static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
0639 {
0640 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0641 int err;
0642
0643 WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
0644
0645 err = i915_ttm_move_notify(bo);
0646 if (err)
0647 return err;
0648
0649 return i915_ttm_purge(obj);
0650 }
0651
0652 static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
0653 {
0654 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0655 int ret;
0656
0657 if (!obj)
0658 return;
0659
0660 ret = i915_ttm_move_notify(bo);
0661 GEM_WARN_ON(ret);
0662 GEM_WARN_ON(obj->ttm.cached_io_rsgt);
0663 if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
0664 i915_ttm_purge(obj);
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675 bool i915_ttm_resource_mappable(struct ttm_resource *res)
0676 {
0677 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
0678
0679 if (!i915_ttm_cpu_maps_iomem(res))
0680 return true;
0681
0682 return bman_res->used_visible_size == bman_res->base.num_pages;
0683 }
0684
0685 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
0686 {
0687 struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
0688 bool unknown_state;
0689
0690 if (!obj)
0691 return -EINVAL;
0692
0693 if (!kref_get_unless_zero(&obj->base.refcount))
0694 return -EINVAL;
0695
0696 assert_object_held(obj);
0697
0698 unknown_state = i915_gem_object_has_unknown_state(obj);
0699 i915_gem_object_put(obj);
0700 if (unknown_state)
0701 return -EINVAL;
0702
0703 if (!i915_ttm_cpu_maps_iomem(mem))
0704 return 0;
0705
0706 if (!i915_ttm_resource_mappable(mem))
0707 return -EINVAL;
0708
0709 mem->bus.caching = ttm_write_combined;
0710 mem->bus.is_iomem = true;
0711
0712 return 0;
0713 }
0714
0715 static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
0716 unsigned long page_offset)
0717 {
0718 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
0719 struct scatterlist *sg;
0720 unsigned long base;
0721 unsigned int ofs;
0722
0723 GEM_BUG_ON(!obj);
0724 GEM_WARN_ON(bo->ttm);
0725
0726 base = obj->mm.region->iomap.base - obj->mm.region->region.start;
0727 sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
0728
0729 return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
0730 }
0731
0732
0733
0734
0735
0736 static struct ttm_device_funcs i915_ttm_bo_driver = {
0737 .ttm_tt_create = i915_ttm_tt_create,
0738 .ttm_tt_populate = i915_ttm_tt_populate,
0739 .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
0740 .ttm_tt_destroy = i915_ttm_tt_destroy,
0741 .eviction_valuable = i915_ttm_eviction_valuable,
0742 .evict_flags = i915_ttm_evict_flags,
0743 .move = i915_ttm_move,
0744 .swap_notify = i915_ttm_swap_notify,
0745 .delete_mem_notify = i915_ttm_delete_mem_notify,
0746 .io_mem_reserve = i915_ttm_io_mem_reserve,
0747 .io_mem_pfn = i915_ttm_io_mem_pfn,
0748 };
0749
0750
0751
0752
0753
0754
0755 struct ttm_device_funcs *i915_ttm_driver(void)
0756 {
0757 return &i915_ttm_bo_driver;
0758 }
0759
0760 static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
0761 struct ttm_placement *placement)
0762 {
0763 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0764 struct ttm_operation_ctx ctx = {
0765 .interruptible = true,
0766 .no_wait_gpu = false,
0767 };
0768 int real_num_busy;
0769 int ret;
0770
0771
0772 real_num_busy = fetch_and_zero(&placement->num_busy_placement);
0773 ret = ttm_bo_validate(bo, placement, &ctx);
0774 if (ret) {
0775 ret = i915_ttm_err_to_gem(ret);
0776
0777
0778
0779
0780 if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
0781 ret == -EAGAIN)
0782 return ret;
0783
0784
0785
0786
0787
0788 placement->num_busy_placement = real_num_busy;
0789 ret = ttm_bo_validate(bo, placement, &ctx);
0790 if (ret)
0791 return i915_ttm_err_to_gem(ret);
0792 }
0793
0794 if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
0795 ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
0796 if (ret)
0797 return ret;
0798
0799 i915_ttm_adjust_domains_after_move(obj);
0800 i915_ttm_adjust_gem_after_move(obj);
0801 }
0802
0803 if (!i915_gem_object_has_pages(obj)) {
0804 struct i915_refct_sgt *rsgt =
0805 i915_ttm_resource_get_st(obj, bo->resource);
0806
0807 if (IS_ERR(rsgt))
0808 return PTR_ERR(rsgt);
0809
0810 GEM_BUG_ON(obj->mm.rsgt);
0811 obj->mm.rsgt = rsgt;
0812 __i915_gem_object_set_pages(obj, &rsgt->table,
0813 i915_sg_dma_sizes(rsgt->table.sgl));
0814 }
0815
0816 GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
0817 i915_ttm_adjust_lru(obj);
0818 return ret;
0819 }
0820
0821 static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
0822 {
0823 struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
0824 struct ttm_placement placement;
0825
0826 GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
0827
0828
0829 i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
0830
0831 return __i915_ttm_get_pages(obj, &placement);
0832 }
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848 static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
0849 struct intel_memory_region *mr,
0850 unsigned int flags)
0851 {
0852 struct ttm_place requested;
0853 struct ttm_placement placement;
0854 int ret;
0855
0856 i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
0857 obj->base.size, flags);
0858 placement.num_placement = 1;
0859 placement.num_busy_placement = 1;
0860 placement.placement = &requested;
0861 placement.busy_placement = &requested;
0862
0863 ret = __i915_ttm_get_pages(obj, &placement);
0864 if (ret)
0865 return ret;
0866
0867
0868
0869
0870
0871
0872 if (obj->mm.region != mr) {
0873 i915_gem_object_release_memory_region(obj);
0874 i915_gem_object_init_memory_region(obj, mr);
0875 }
0876
0877 return 0;
0878 }
0879
0880 static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
0881 struct intel_memory_region *mr)
0882 {
0883 return __i915_ttm_migrate(obj, mr, obj->flags);
0884 }
0885
0886 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
0887 struct sg_table *st)
0888 {
0889
0890
0891
0892
0893
0894
0895
0896
0897 if (obj->mm.rsgt)
0898 i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
0899 }
0900
0901
0902
0903
0904
0905 void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
0906 {
0907 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
0908 struct i915_ttm_tt *i915_tt =
0909 container_of(bo->ttm, typeof(*i915_tt), ttm);
0910 bool shrinkable =
0911 bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm);
0912
0913
0914
0915
0916
0917 if (!kref_read(&bo->kref))
0918 return;
0919
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944 if (kref_get_unless_zero(&obj->base.refcount)) {
0945 if (shrinkable != obj->mm.ttm_shrinkable) {
0946 if (shrinkable) {
0947 if (obj->mm.madv == I915_MADV_WILLNEED)
0948 __i915_gem_object_make_shrinkable(obj);
0949 else
0950 __i915_gem_object_make_purgeable(obj);
0951 } else {
0952 i915_gem_object_make_unshrinkable(obj);
0953 }
0954
0955 obj->mm.ttm_shrinkable = shrinkable;
0956 }
0957 i915_gem_object_put(obj);
0958 }
0959
0960
0961
0962
0963 spin_lock(&bo->bdev->lru_lock);
0964 if (shrinkable) {
0965
0966 bo->priority = TTM_MAX_BO_PRIORITY - 1;
0967 } else if (obj->mm.madv != I915_MADV_WILLNEED) {
0968 bo->priority = I915_TTM_PRIO_PURGE;
0969 } else if (!i915_gem_object_has_pages(obj)) {
0970 bo->priority = I915_TTM_PRIO_NO_PAGES;
0971 } else {
0972 struct ttm_resource_manager *man =
0973 ttm_manager_type(bo->bdev, bo->resource->mem_type);
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983 if (i915_ttm_cpu_maps_iomem(bo->resource) &&
0984 i915_ttm_buddy_man_visible_size(man) < man->size &&
0985 !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
0986 bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
0987 else
0988 bo->priority = I915_TTM_PRIO_HAS_PAGES;
0989 }
0990
0991 ttm_bo_move_to_lru_tail(bo);
0992 spin_unlock(&bo->bdev->lru_lock);
0993 }
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010 static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
1011 {
1012 GEM_BUG_ON(!obj->ttm.created);
1013
1014 ttm_bo_put(i915_gem_to_ttm(obj));
1015 }
1016
1017 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
1018 {
1019 struct vm_area_struct *area = vmf->vma;
1020 struct ttm_buffer_object *bo = area->vm_private_data;
1021 struct drm_device *dev = bo->base.dev;
1022 struct drm_i915_gem_object *obj;
1023 vm_fault_t ret;
1024 int idx;
1025
1026 obj = i915_ttm_to_gem(bo);
1027 if (!obj)
1028 return VM_FAULT_SIGBUS;
1029
1030
1031 if (unlikely(i915_gem_object_is_readonly(obj) &&
1032 area->vm_flags & VM_WRITE))
1033 return VM_FAULT_SIGBUS;
1034
1035 ret = ttm_bo_vm_reserve(bo, vmf);
1036 if (ret)
1037 return ret;
1038
1039 if (obj->mm.madv != I915_MADV_WILLNEED) {
1040 dma_resv_unlock(bo->base.resv);
1041 return VM_FAULT_SIGBUS;
1042 }
1043
1044 if (!i915_ttm_resource_mappable(bo->resource)) {
1045 int err = -ENODEV;
1046 int i;
1047
1048 for (i = 0; i < obj->mm.n_placements; i++) {
1049 struct intel_memory_region *mr = obj->mm.placements[i];
1050 unsigned int flags;
1051
1052 if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM)
1053 continue;
1054
1055 flags = obj->flags;
1056 flags &= ~I915_BO_ALLOC_GPU_ONLY;
1057 err = __i915_ttm_migrate(obj, mr, flags);
1058 if (!err)
1059 break;
1060 }
1061
1062 if (err) {
1063 drm_dbg(dev, "Unable to make resource CPU accessible\n");
1064 dma_resv_unlock(bo->base.resv);
1065 return VM_FAULT_SIGBUS;
1066 }
1067 }
1068
1069 if (drm_dev_enter(dev, &idx)) {
1070 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1071 TTM_BO_VM_NUM_PREFAULT);
1072 drm_dev_exit(idx);
1073 } else {
1074 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1075 }
1076 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1077 return ret;
1078
1079 i915_ttm_adjust_lru(obj);
1080
1081 dma_resv_unlock(bo->base.resv);
1082 return ret;
1083 }
1084
1085 static int
1086 vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
1087 void *buf, int len, int write)
1088 {
1089 struct drm_i915_gem_object *obj =
1090 i915_ttm_to_gem(area->vm_private_data);
1091
1092 if (i915_gem_object_is_readonly(obj) && write)
1093 return -EACCES;
1094
1095 return ttm_bo_vm_access(area, addr, buf, len, write);
1096 }
1097
1098 static void ttm_vm_open(struct vm_area_struct *vma)
1099 {
1100 struct drm_i915_gem_object *obj =
1101 i915_ttm_to_gem(vma->vm_private_data);
1102
1103 GEM_BUG_ON(!obj);
1104 i915_gem_object_get(obj);
1105 }
1106
1107 static void ttm_vm_close(struct vm_area_struct *vma)
1108 {
1109 struct drm_i915_gem_object *obj =
1110 i915_ttm_to_gem(vma->vm_private_data);
1111
1112 GEM_BUG_ON(!obj);
1113 i915_gem_object_put(obj);
1114 }
1115
1116 static const struct vm_operations_struct vm_ops_ttm = {
1117 .fault = vm_fault_ttm,
1118 .access = vm_access_ttm,
1119 .open = ttm_vm_open,
1120 .close = ttm_vm_close,
1121 };
1122
1123 static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
1124 {
1125
1126 GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
1127
1128 return drm_vma_node_offset_addr(&obj->base.vma_node);
1129 }
1130
1131 static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
1132 {
1133 ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
1134 }
1135
1136 static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
1137 .name = "i915_gem_object_ttm",
1138 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
1139 I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST,
1140
1141 .get_pages = i915_ttm_get_pages,
1142 .put_pages = i915_ttm_put_pages,
1143 .truncate = i915_ttm_truncate,
1144 .shrink = i915_ttm_shrink,
1145
1146 .adjust_lru = i915_ttm_adjust_lru,
1147 .delayed_free = i915_ttm_delayed_free,
1148 .migrate = i915_ttm_migrate,
1149
1150 .mmap_offset = i915_ttm_mmap_offset,
1151 .unmap_virtual = i915_ttm_unmap_virtual,
1152 .mmap_ops = &vm_ops_ttm,
1153 };
1154
1155 void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
1156 {
1157 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
1158
1159 i915_gem_object_release_memory_region(obj);
1160 mutex_destroy(&obj->ttm.get_io_page.lock);
1161
1162 if (obj->ttm.created) {
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 if (obj->mm.ttm_shrinkable)
1173 i915_gem_object_make_unshrinkable(obj);
1174
1175 i915_ttm_backup_free(obj);
1176
1177
1178 __i915_gem_free_object(obj);
1179
1180 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
1181 } else {
1182 __i915_gem_object_fini(obj);
1183 }
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
1196 struct drm_i915_gem_object *obj,
1197 resource_size_t offset,
1198 resource_size_t size,
1199 resource_size_t page_size,
1200 unsigned int flags)
1201 {
1202 static struct lock_class_key lock_class;
1203 struct drm_i915_private *i915 = mem->i915;
1204 struct ttm_operation_ctx ctx = {
1205 .interruptible = true,
1206 .no_wait_gpu = false,
1207 };
1208 enum ttm_bo_type bo_type;
1209 int ret;
1210
1211 drm_gem_private_object_init(&i915->drm, &obj->base, size);
1212 i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
1213
1214 obj->bo_offset = offset;
1215
1216
1217 obj->mm.region = mem;
1218 INIT_LIST_HEAD(&obj->mm.region_link);
1219
1220 INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
1221 mutex_init(&obj->ttm.get_io_page.lock);
1222 bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
1223 ttm_bo_type_kernel;
1224
1225 obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
1226
1227
1228 GEM_BUG_ON(page_size && obj->mm.n_placements);
1229
1230
1231
1232
1233
1234
1235
1236 i915_gem_object_make_unshrinkable(obj);
1237
1238
1239
1240
1241
1242
1243
1244
1245 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
1246 bo_type, &i915_sys_placement,
1247 page_size >> PAGE_SHIFT,
1248 &ctx, NULL, NULL, i915_ttm_bo_destroy);
1249 if (ret)
1250 return i915_ttm_err_to_gem(ret);
1251
1252 obj->ttm.created = true;
1253 i915_gem_object_release_memory_region(obj);
1254 i915_gem_object_init_memory_region(obj, mem);
1255 i915_ttm_adjust_domains_after_move(obj);
1256 i915_ttm_adjust_gem_after_move(obj);
1257 i915_gem_object_unlock(obj);
1258
1259 return 0;
1260 }
1261
1262 static const struct intel_memory_region_ops ttm_system_region_ops = {
1263 .init_object = __i915_gem_ttm_object_init,
1264 .release = intel_region_ttm_fini,
1265 };
1266
1267 struct intel_memory_region *
1268 i915_gem_ttm_system_setup(struct drm_i915_private *i915,
1269 u16 type, u16 instance)
1270 {
1271 struct intel_memory_region *mr;
1272
1273 mr = intel_memory_region_create(i915, 0,
1274 totalram_pages() << PAGE_SHIFT,
1275 PAGE_SIZE, 0, 0,
1276 type, instance,
1277 &ttm_system_region_ops);
1278 if (IS_ERR(mr))
1279 return mr;
1280
1281 intel_memory_region_set_name(mr, "system-ttm");
1282 return mr;
1283 }