0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/highmem.h>
0026 #include <linux/sched/mm.h>
0027
0028 #include <drm/drm_cache.h>
0029
0030 #include "display/intel_frontbuffer.h"
0031 #include "pxp/intel_pxp.h"
0032
0033 #include "i915_drv.h"
0034 #include "i915_file_private.h"
0035 #include "i915_gem_clflush.h"
0036 #include "i915_gem_context.h"
0037 #include "i915_gem_dmabuf.h"
0038 #include "i915_gem_mman.h"
0039 #include "i915_gem_object.h"
0040 #include "i915_gem_ttm.h"
0041 #include "i915_memcpy.h"
0042 #include "i915_trace.h"
0043
0044 static struct kmem_cache *slab_objects;
0045
0046 static const struct drm_gem_object_funcs i915_gem_object_funcs;
0047
0048 struct drm_i915_gem_object *i915_gem_object_alloc(void)
0049 {
0050 struct drm_i915_gem_object *obj;
0051
0052 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
0053 if (!obj)
0054 return NULL;
0055 obj->base.funcs = &i915_gem_object_funcs;
0056
0057 return obj;
0058 }
0059
0060 void i915_gem_object_free(struct drm_i915_gem_object *obj)
0061 {
0062 return kmem_cache_free(slab_objects, obj);
0063 }
0064
0065 void i915_gem_object_init(struct drm_i915_gem_object *obj,
0066 const struct drm_i915_gem_object_ops *ops,
0067 struct lock_class_key *key, unsigned flags)
0068 {
0069
0070
0071
0072
0073 BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
0074 offsetof(typeof(*obj), __do_not_access.base));
0075
0076 spin_lock_init(&obj->vma.lock);
0077 INIT_LIST_HEAD(&obj->vma.list);
0078
0079 INIT_LIST_HEAD(&obj->mm.link);
0080
0081 INIT_LIST_HEAD(&obj->lut_list);
0082 spin_lock_init(&obj->lut_lock);
0083
0084 spin_lock_init(&obj->mmo.lock);
0085 obj->mmo.offsets = RB_ROOT;
0086
0087 init_rcu_head(&obj->rcu);
0088
0089 obj->ops = ops;
0090 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
0091 obj->flags = flags;
0092
0093 obj->mm.madv = I915_MADV_WILLNEED;
0094 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
0095 mutex_init(&obj->mm.get_page.lock);
0096 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
0097 mutex_init(&obj->mm.get_dma_page.lock);
0098 }
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109 void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
0110 {
0111 mutex_destroy(&obj->mm.get_page.lock);
0112 mutex_destroy(&obj->mm.get_dma_page.lock);
0113 dma_resv_fini(&obj->base._resv);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
0123 unsigned int cache_level)
0124 {
0125 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0126
0127 obj->cache_level = cache_level;
0128
0129 if (cache_level != I915_CACHE_NONE)
0130 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
0131 I915_BO_CACHE_COHERENT_FOR_WRITE);
0132 else if (HAS_LLC(i915))
0133 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
0134 else
0135 obj->cache_coherent = 0;
0136
0137 obj->cache_dirty =
0138 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
0139 !IS_DGFX(i915);
0140 }
0141
0142 bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
0143 {
0144 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0145
0146
0147
0148
0149
0150 if (!(obj->flags & I915_BO_ALLOC_USER))
0151 return false;
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165 return IS_JSL_EHL(i915);
0166 }
0167
0168 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
0169 {
0170 struct drm_i915_gem_object *obj = to_intel_bo(gem);
0171 struct drm_i915_file_private *fpriv = file->driver_priv;
0172 struct i915_lut_handle bookmark = {};
0173 struct i915_mmap_offset *mmo, *mn;
0174 struct i915_lut_handle *lut, *ln;
0175 LIST_HEAD(close);
0176
0177 spin_lock(&obj->lut_lock);
0178 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
0179 struct i915_gem_context *ctx = lut->ctx;
0180
0181 if (ctx && ctx->file_priv == fpriv) {
0182 i915_gem_context_get(ctx);
0183 list_move(&lut->obj_link, &close);
0184 }
0185
0186
0187 if (&ln->obj_link != &obj->lut_list) {
0188 list_add_tail(&bookmark.obj_link, &ln->obj_link);
0189 if (cond_resched_lock(&obj->lut_lock))
0190 list_safe_reset_next(&bookmark, ln, obj_link);
0191 __list_del_entry(&bookmark.obj_link);
0192 }
0193 }
0194 spin_unlock(&obj->lut_lock);
0195
0196 spin_lock(&obj->mmo.lock);
0197 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
0198 drm_vma_node_revoke(&mmo->vma_node, file);
0199 spin_unlock(&obj->mmo.lock);
0200
0201 list_for_each_entry_safe(lut, ln, &close, obj_link) {
0202 struct i915_gem_context *ctx = lut->ctx;
0203 struct i915_vma *vma;
0204
0205
0206
0207
0208
0209
0210 mutex_lock(&ctx->lut_mutex);
0211 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
0212 if (vma) {
0213 GEM_BUG_ON(vma->obj != obj);
0214 GEM_BUG_ON(!atomic_read(&vma->open_count));
0215 i915_vma_close(vma);
0216 }
0217 mutex_unlock(&ctx->lut_mutex);
0218
0219 i915_gem_context_put(lut->ctx);
0220 i915_lut_handle_free(lut);
0221 i915_gem_object_put(obj);
0222 }
0223 }
0224
0225 void __i915_gem_free_object_rcu(struct rcu_head *head)
0226 {
0227 struct drm_i915_gem_object *obj =
0228 container_of(head, typeof(*obj), rcu);
0229 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0230
0231 i915_gem_object_free(obj);
0232
0233 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
0234 atomic_dec(&i915->mm.free_count);
0235 }
0236
0237 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
0238 {
0239
0240
0241 if (obj->userfault_count)
0242 i915_gem_object_release_mmap_gtt(obj);
0243
0244 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
0245 struct i915_mmap_offset *mmo, *mn;
0246
0247 i915_gem_object_release_mmap_offset(obj);
0248
0249 rbtree_postorder_for_each_entry_safe(mmo, mn,
0250 &obj->mmo.offsets,
0251 offset) {
0252 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
0253 &mmo->vma_node);
0254 kfree(mmo);
0255 }
0256 obj->mmo.offsets = RB_ROOT;
0257 }
0258 }
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269 void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
0270 {
0271 assert_object_held_shared(obj);
0272
0273 if (!list_empty(&obj->vma.list)) {
0274 struct i915_vma *vma;
0275
0276 spin_lock(&obj->vma.lock);
0277 while ((vma = list_first_entry_or_null(&obj->vma.list,
0278 struct i915_vma,
0279 obj_link))) {
0280 GEM_BUG_ON(vma->obj != obj);
0281 spin_unlock(&obj->vma.lock);
0282
0283 i915_vma_destroy(vma);
0284
0285 spin_lock(&obj->vma.lock);
0286 }
0287 spin_unlock(&obj->vma.lock);
0288 }
0289
0290 __i915_gem_object_free_mmaps(obj);
0291
0292 atomic_set(&obj->mm.pages_pin_count, 0);
0293 __i915_gem_object_put_pages(obj);
0294 GEM_BUG_ON(i915_gem_object_has_pages(obj));
0295 }
0296
0297 void __i915_gem_free_object(struct drm_i915_gem_object *obj)
0298 {
0299 trace_i915_gem_object_destroy(obj);
0300
0301 GEM_BUG_ON(!list_empty(&obj->lut_list));
0302
0303 bitmap_free(obj->bit_17);
0304
0305 if (obj->base.import_attach)
0306 drm_prime_gem_destroy(&obj->base, NULL);
0307
0308 drm_gem_free_mmap_offset(&obj->base);
0309
0310 if (obj->ops->release)
0311 obj->ops->release(obj);
0312
0313 if (obj->mm.n_placements > 1)
0314 kfree(obj->mm.placements);
0315
0316 if (obj->shares_resv_from)
0317 i915_vm_resv_put(obj->shares_resv_from);
0318
0319 __i915_gem_object_fini(obj);
0320 }
0321
0322 static void __i915_gem_free_objects(struct drm_i915_private *i915,
0323 struct llist_node *freed)
0324 {
0325 struct drm_i915_gem_object *obj, *on;
0326
0327 llist_for_each_entry_safe(obj, on, freed, freed) {
0328 might_sleep();
0329 if (obj->ops->delayed_free) {
0330 obj->ops->delayed_free(obj);
0331 continue;
0332 }
0333
0334 __i915_gem_object_pages_fini(obj);
0335 __i915_gem_free_object(obj);
0336
0337
0338 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
0339 cond_resched();
0340 }
0341 }
0342
0343 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
0344 {
0345 struct llist_node *freed = llist_del_all(&i915->mm.free_list);
0346
0347 if (unlikely(freed))
0348 __i915_gem_free_objects(i915, freed);
0349 }
0350
0351 static void __i915_gem_free_work(struct work_struct *work)
0352 {
0353 struct drm_i915_private *i915 =
0354 container_of(work, struct drm_i915_private, mm.free_work);
0355
0356 i915_gem_flush_free_objects(i915);
0357 }
0358
0359 static void i915_gem_free_object(struct drm_gem_object *gem_obj)
0360 {
0361 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
0362 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0363
0364 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
0365
0366
0367
0368
0369
0370
0371
0372 atomic_inc(&i915->mm.free_count);
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385 if (llist_add(&obj->freed, &i915->mm.free_list))
0386 queue_work(i915->wq, &i915->mm.free_work);
0387 }
0388
0389 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
0390 enum fb_op_origin origin)
0391 {
0392 struct intel_frontbuffer *front;
0393
0394 front = __intel_frontbuffer_get(obj);
0395 if (front) {
0396 intel_frontbuffer_flush(front, origin);
0397 intel_frontbuffer_put(front);
0398 }
0399 }
0400
0401 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
0402 enum fb_op_origin origin)
0403 {
0404 struct intel_frontbuffer *front;
0405
0406 front = __intel_frontbuffer_get(obj);
0407 if (front) {
0408 intel_frontbuffer_invalidate(front, origin);
0409 intel_frontbuffer_put(front);
0410 }
0411 }
0412
0413 static void
0414 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
0415 {
0416 void *src_map;
0417 void *src_ptr;
0418
0419 src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
0420
0421 src_ptr = src_map + offset_in_page(offset);
0422 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
0423 drm_clflush_virt_range(src_ptr, size);
0424 memcpy(dst, src_ptr, size);
0425
0426 kunmap_atomic(src_map);
0427 }
0428
0429 static void
0430 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
0431 {
0432 void __iomem *src_map;
0433 void __iomem *src_ptr;
0434 dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
0435
0436 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
0437 dma - obj->mm.region->region.start,
0438 PAGE_SIZE);
0439
0440 src_ptr = src_map + offset_in_page(offset);
0441 if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
0442 memcpy_fromio(dst, src_ptr, size);
0443
0444 io_mapping_unmap(src_map);
0445 }
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
0462 {
0463 GEM_BUG_ON(offset >= obj->base.size);
0464 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
0465 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0466
0467 if (i915_gem_object_has_struct_page(obj))
0468 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
0469 else if (i915_gem_object_has_iomem(obj))
0470 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
0471 else
0472 return -ENODEV;
0473
0474 return 0;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490 bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
0491 {
0492 struct i915_vma *vma;
0493 int pin_count = atomic_read(&obj->mm.pages_pin_count);
0494
0495 if (!pin_count)
0496 return true;
0497
0498 spin_lock(&obj->vma.lock);
0499 list_for_each_entry(vma, &obj->vma.list, obj_link) {
0500 if (i915_vma_is_pinned(vma)) {
0501 spin_unlock(&obj->vma.lock);
0502 return false;
0503 }
0504 if (atomic_read(&vma->pages_count))
0505 pin_count--;
0506 }
0507 spin_unlock(&obj->vma.lock);
0508 GEM_WARN_ON(pin_count < 0);
0509
0510 return pin_count == 0;
0511 }
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
0522 {
0523 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
0524
0525 if (!mr)
0526 return false;
0527
0528 return obj->mm.n_placements > 1;
0529 }
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540 bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
0541 {
0542 #ifdef CONFIG_LOCKDEP
0543 if (IS_DGFX(to_i915(obj->base.dev)) &&
0544 i915_gem_object_evictable((void __force *)obj))
0545 assert_object_held_shared(obj);
0546 #endif
0547 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
0548 }
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559 bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
0560 {
0561 #ifdef CONFIG_LOCKDEP
0562 if (IS_DGFX(to_i915(obj->base.dev)) &&
0563 i915_gem_object_evictable((void __force *)obj))
0564 assert_object_held_shared(obj);
0565 #endif
0566 return obj->mem_flags & I915_BO_FLAG_IOMEM;
0567 }
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
0587 enum intel_region_id id)
0588 {
0589 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0590 unsigned int num_allowed = obj->mm.n_placements;
0591 struct intel_memory_region *mr;
0592 unsigned int i;
0593
0594 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
0595 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
0596
0597 mr = i915->mm.regions[id];
0598 if (!mr)
0599 return false;
0600
0601 if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
0602 return false;
0603
0604 if (obj->mm.region == mr)
0605 return true;
0606
0607 if (!i915_gem_object_evictable(obj))
0608 return false;
0609
0610 if (!obj->ops->migrate)
0611 return false;
0612
0613 if (!(obj->flags & I915_BO_ALLOC_USER))
0614 return true;
0615
0616 if (num_allowed == 0)
0617 return false;
0618
0619 for (i = 0; i < num_allowed; ++i) {
0620 if (mr == obj->mm.placements[i])
0621 return true;
0622 }
0623
0624 return false;
0625 }
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
0653 struct i915_gem_ww_ctx *ww,
0654 enum intel_region_id id)
0655 {
0656 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0657 struct intel_memory_region *mr;
0658
0659 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
0660 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
0661 assert_object_held(obj);
0662
0663 mr = i915->mm.regions[id];
0664 GEM_BUG_ON(!mr);
0665
0666 if (!i915_gem_object_can_migrate(obj, id))
0667 return -EINVAL;
0668
0669 if (!obj->ops->migrate) {
0670 if (GEM_WARN_ON(obj->mm.region != mr))
0671 return -EINVAL;
0672 return 0;
0673 }
0674
0675 return obj->ops->migrate(obj, mr);
0676 }
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686 bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
0687 enum intel_memory_type type)
0688 {
0689 unsigned int i;
0690
0691 if (!obj->mm.n_placements) {
0692 switch (type) {
0693 case INTEL_MEMORY_LOCAL:
0694 return i915_gem_object_has_iomem(obj);
0695 case INTEL_MEMORY_SYSTEM:
0696 return i915_gem_object_has_pages(obj);
0697 default:
0698
0699 GEM_BUG_ON(1);
0700 return false;
0701 }
0702 }
0703
0704 for (i = 0; i < obj->mm.n_placements; i++) {
0705 if (obj->mm.placements[i]->type == type)
0706 return true;
0707 }
0708
0709 return false;
0710 }
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
0722 {
0723 bool lmem_placement = false;
0724 int i;
0725
0726 if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
0727 return false;
0728
0729 for (i = 0; i < obj->mm.n_placements; i++) {
0730
0731 if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
0732 return false;
0733 if (!lmem_placement &&
0734 obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
0735 lmem_placement = true;
0736 }
0737
0738 return lmem_placement;
0739 }
0740
0741 void i915_gem_init__objects(struct drm_i915_private *i915)
0742 {
0743 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
0744 }
0745
0746 void i915_objects_module_exit(void)
0747 {
0748 kmem_cache_destroy(slab_objects);
0749 }
0750
0751 int __init i915_objects_module_init(void)
0752 {
0753 slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
0754 if (!slab_objects)
0755 return -ENOMEM;
0756
0757 return 0;
0758 }
0759
0760 static const struct drm_gem_object_funcs i915_gem_object_funcs = {
0761 .free = i915_gem_free_object,
0762 .close = i915_gem_close_object,
0763 .export = i915_gem_prime_export,
0764 };
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777 int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
0778 struct dma_fence **fence)
0779 {
0780 return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
0781 fence);
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
0797 bool intr)
0798 {
0799 long ret;
0800
0801 assert_object_held(obj);
0802
0803 ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
0804 intr, MAX_SCHEDULE_TIMEOUT);
0805 if (!ret)
0806 ret = -ETIME;
0807 else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
0808 ret = -EIO;
0809
0810 return ret < 0 ? ret : 0;
0811 }
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821 bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
0822 {
0823
0824
0825
0826
0827
0828 smp_rmb();
0829 return obj->mm.unknown_state;
0830 }
0831
0832 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
0833 #include "selftests/huge_gem_object.c"
0834 #include "selftests/huge_pages.c"
0835 #include "selftests/i915_gem_migrate.c"
0836 #include "selftests/i915_gem_object.c"
0837 #include "selftests/i915_gem_coherency.c"
0838 #endif