0001
0002
0003
0004
0005
0006
0007 #include <drm/drm_cache.h>
0008
0009 #include "gt/intel_gt.h"
0010 #include "gt/intel_gt_pm.h"
0011
0012 #include "i915_drv.h"
0013 #include "i915_gem_object.h"
0014 #include "i915_scatterlist.h"
0015 #include "i915_gem_lmem.h"
0016 #include "i915_gem_mman.h"
0017
0018 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
0019 struct sg_table *pages,
0020 unsigned int sg_page_sizes)
0021 {
0022 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0023 unsigned long supported = INTEL_INFO(i915)->page_sizes;
0024 bool shrinkable;
0025 int i;
0026
0027 assert_object_held_shared(obj);
0028
0029 if (i915_gem_object_is_volatile(obj))
0030 obj->mm.madv = I915_MADV_DONTNEED;
0031
0032
0033 if (obj->cache_dirty) {
0034 WARN_ON_ONCE(IS_DGFX(i915));
0035 obj->write_domain = 0;
0036 if (i915_gem_object_has_struct_page(obj))
0037 drm_clflush_sg(pages);
0038 obj->cache_dirty = false;
0039 }
0040
0041 obj->mm.get_page.sg_pos = pages->sgl;
0042 obj->mm.get_page.sg_idx = 0;
0043 obj->mm.get_dma_page.sg_pos = pages->sgl;
0044 obj->mm.get_dma_page.sg_idx = 0;
0045
0046 obj->mm.pages = pages;
0047
0048 GEM_BUG_ON(!sg_page_sizes);
0049 obj->mm.page_sizes.phys = sg_page_sizes;
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 obj->mm.page_sizes.sg = 0;
0060 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
0061 if (obj->mm.page_sizes.phys & ~0u << i)
0062 obj->mm.page_sizes.sg |= BIT(i);
0063 }
0064 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
0065
0066 shrinkable = i915_gem_object_is_shrinkable(obj);
0067
0068 if (i915_gem_object_is_tiled(obj) &&
0069 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
0070 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
0071 i915_gem_object_set_tiling_quirk(obj);
0072 GEM_BUG_ON(!list_empty(&obj->mm.link));
0073 atomic_inc(&obj->mm.shrink_pin);
0074 shrinkable = false;
0075 }
0076
0077 if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
0078 struct list_head *list;
0079 unsigned long flags;
0080
0081 assert_object_held(obj);
0082 spin_lock_irqsave(&i915->mm.obj_lock, flags);
0083
0084 i915->mm.shrink_count++;
0085 i915->mm.shrink_memory += obj->base.size;
0086
0087 if (obj->mm.madv != I915_MADV_WILLNEED)
0088 list = &i915->mm.purge_list;
0089 else
0090 list = &i915->mm.shrink_list;
0091 list_add_tail(&obj->mm.link, list);
0092
0093 atomic_set(&obj->mm.shrink_pin, 0);
0094 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
0095 }
0096 }
0097
0098 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
0099 {
0100 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0101 int err;
0102
0103 assert_object_held_shared(obj);
0104
0105 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
0106 drm_dbg(&i915->drm,
0107 "Attempting to obtain a purgeable object\n");
0108 return -EFAULT;
0109 }
0110
0111 err = obj->ops->get_pages(obj);
0112 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
0113
0114 return err;
0115 }
0116
0117
0118
0119
0120
0121
0122
0123
0124 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
0125 {
0126 int err;
0127
0128 assert_object_held(obj);
0129
0130 assert_object_held_shared(obj);
0131
0132 if (unlikely(!i915_gem_object_has_pages(obj))) {
0133 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
0134
0135 err = ____i915_gem_object_get_pages(obj);
0136 if (err)
0137 return err;
0138
0139 smp_mb__before_atomic();
0140 }
0141 atomic_inc(&obj->mm.pages_pin_count);
0142
0143 return 0;
0144 }
0145
0146 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
0147 {
0148 struct i915_gem_ww_ctx ww;
0149 int err;
0150
0151 i915_gem_ww_ctx_init(&ww, true);
0152 retry:
0153 err = i915_gem_object_lock(obj, &ww);
0154 if (!err)
0155 err = i915_gem_object_pin_pages(obj);
0156
0157 if (err == -EDEADLK) {
0158 err = i915_gem_ww_ctx_backoff(&ww);
0159 if (!err)
0160 goto retry;
0161 }
0162 i915_gem_ww_ctx_fini(&ww);
0163 return err;
0164 }
0165
0166
0167 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
0168 {
0169 if (obj->ops->truncate)
0170 return obj->ops->truncate(obj);
0171
0172 return 0;
0173 }
0174
0175 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
0176 {
0177 struct radix_tree_iter iter;
0178 void __rcu **slot;
0179
0180 rcu_read_lock();
0181 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
0182 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
0183 radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
0184 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
0185 rcu_read_unlock();
0186 }
0187
0188 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
0189 {
0190 if (is_vmalloc_addr(ptr))
0191 vunmap(ptr);
0192 }
0193
0194 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
0195 {
0196 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0197 struct intel_gt *gt = to_gt(i915);
0198
0199 if (!obj->mm.tlb)
0200 return;
0201
0202 intel_gt_invalidate_tlb(gt, obj->mm.tlb);
0203 obj->mm.tlb = 0;
0204 }
0205
0206 struct sg_table *
0207 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
0208 {
0209 struct sg_table *pages;
0210
0211 assert_object_held_shared(obj);
0212
0213 pages = fetch_and_zero(&obj->mm.pages);
0214 if (IS_ERR_OR_NULL(pages))
0215 return pages;
0216
0217 if (i915_gem_object_is_volatile(obj))
0218 obj->mm.madv = I915_MADV_WILLNEED;
0219
0220 if (!i915_gem_object_has_self_managed_shrink_list(obj))
0221 i915_gem_object_make_unshrinkable(obj);
0222
0223 if (obj->mm.mapping) {
0224 unmap_object(obj, page_mask_bits(obj->mm.mapping));
0225 obj->mm.mapping = NULL;
0226 }
0227
0228 __i915_gem_object_reset_page_iter(obj);
0229 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
0230
0231 flush_tlb_invalidate(obj);
0232
0233 return pages;
0234 }
0235
0236 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
0237 {
0238 struct sg_table *pages;
0239
0240 if (i915_gem_object_has_pinned_pages(obj))
0241 return -EBUSY;
0242
0243
0244 assert_object_held_shared(obj);
0245
0246 i915_gem_object_release_mmap_offset(obj);
0247
0248
0249
0250
0251
0252
0253 pages = __i915_gem_object_unset_pages(obj);
0254
0255
0256
0257
0258
0259
0260
0261 if (!IS_ERR_OR_NULL(pages))
0262 obj->ops->put_pages(obj, pages);
0263
0264 return 0;
0265 }
0266
0267
0268 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
0269 enum i915_map_type type)
0270 {
0271 unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
0272 struct page *stack[32], **pages = stack, *page;
0273 struct sgt_iter iter;
0274 pgprot_t pgprot;
0275 void *vaddr;
0276
0277 switch (type) {
0278 default:
0279 MISSING_CASE(type);
0280 fallthrough;
0281 case I915_MAP_WB:
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
0300 return page_address(sg_page(obj->mm.pages->sgl));
0301 pgprot = PAGE_KERNEL;
0302 break;
0303 case I915_MAP_WC:
0304 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
0305 break;
0306 }
0307
0308 if (n_pages > ARRAY_SIZE(stack)) {
0309
0310 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
0311 if (!pages)
0312 return ERR_PTR(-ENOMEM);
0313 }
0314
0315 i = 0;
0316 for_each_sgt_page(page, iter, obj->mm.pages)
0317 pages[i++] = page;
0318 vaddr = vmap(pages, n_pages, 0, pgprot);
0319 if (pages != stack)
0320 kvfree(pages);
0321
0322 return vaddr ?: ERR_PTR(-ENOMEM);
0323 }
0324
0325 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
0326 enum i915_map_type type)
0327 {
0328 resource_size_t iomap = obj->mm.region->iomap.base -
0329 obj->mm.region->region.start;
0330 unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
0331 unsigned long stack[32], *pfns = stack, i;
0332 struct sgt_iter iter;
0333 dma_addr_t addr;
0334 void *vaddr;
0335
0336 GEM_BUG_ON(type != I915_MAP_WC);
0337
0338 if (n_pfn > ARRAY_SIZE(stack)) {
0339
0340 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
0341 if (!pfns)
0342 return ERR_PTR(-ENOMEM);
0343 }
0344
0345 i = 0;
0346 for_each_sgt_daddr(addr, iter, obj->mm.pages)
0347 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
0348 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
0349 if (pfns != stack)
0350 kvfree(pfns);
0351
0352 return vaddr ?: ERR_PTR(-ENOMEM);
0353 }
0354
0355
0356 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
0357 enum i915_map_type type)
0358 {
0359 enum i915_map_type has_type;
0360 bool pinned;
0361 void *ptr;
0362 int err;
0363
0364 if (!i915_gem_object_has_struct_page(obj) &&
0365 !i915_gem_object_has_iomem(obj))
0366 return ERR_PTR(-ENXIO);
0367
0368 if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
0369 return ERR_PTR(-EINVAL);
0370
0371 assert_object_held(obj);
0372
0373 pinned = !(type & I915_MAP_OVERRIDE);
0374 type &= ~I915_MAP_OVERRIDE;
0375
0376 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
0377 if (unlikely(!i915_gem_object_has_pages(obj))) {
0378 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
0379
0380 err = ____i915_gem_object_get_pages(obj);
0381 if (err)
0382 return ERR_PTR(err);
0383
0384 smp_mb__before_atomic();
0385 }
0386 atomic_inc(&obj->mm.pages_pin_count);
0387 pinned = false;
0388 }
0389 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408 if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
0409 if (type != I915_MAP_WC && !obj->mm.n_placements) {
0410 ptr = ERR_PTR(-ENODEV);
0411 goto err_unpin;
0412 }
0413
0414 type = I915_MAP_WC;
0415 } else if (IS_DGFX(to_i915(obj->base.dev))) {
0416 type = I915_MAP_WB;
0417 }
0418
0419 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
0420 if (ptr && has_type != type) {
0421 if (pinned) {
0422 ptr = ERR_PTR(-EBUSY);
0423 goto err_unpin;
0424 }
0425
0426 unmap_object(obj, ptr);
0427
0428 ptr = obj->mm.mapping = NULL;
0429 }
0430
0431 if (!ptr) {
0432 err = i915_gem_object_wait_moving_fence(obj, true);
0433 if (err) {
0434 ptr = ERR_PTR(err);
0435 goto err_unpin;
0436 }
0437
0438 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
0439 ptr = ERR_PTR(-ENODEV);
0440 else if (i915_gem_object_has_struct_page(obj))
0441 ptr = i915_gem_object_map_page(obj, type);
0442 else
0443 ptr = i915_gem_object_map_pfn(obj, type);
0444 if (IS_ERR(ptr))
0445 goto err_unpin;
0446
0447 obj->mm.mapping = page_pack_bits(ptr, type);
0448 }
0449
0450 return ptr;
0451
0452 err_unpin:
0453 atomic_dec(&obj->mm.pages_pin_count);
0454 return ptr;
0455 }
0456
0457 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
0458 enum i915_map_type type)
0459 {
0460 void *ret;
0461
0462 i915_gem_object_lock(obj, NULL);
0463 ret = i915_gem_object_pin_map(obj, type);
0464 i915_gem_object_unlock(obj);
0465
0466 return ret;
0467 }
0468
0469 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
0470 unsigned long offset,
0471 unsigned long size)
0472 {
0473 enum i915_map_type has_type;
0474 void *ptr;
0475
0476 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0477 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
0478 offset, size, obj->base.size));
0479
0480 wmb();
0481 obj->mm.dirty = true;
0482
0483 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
0484 return;
0485
0486 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
0487 if (has_type == I915_MAP_WC)
0488 return;
0489
0490 drm_clflush_virt_range(ptr + offset, size);
0491 if (size == obj->base.size) {
0492 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
0493 obj->cache_dirty = false;
0494 }
0495 }
0496
0497 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
0498 {
0499 GEM_BUG_ON(!obj->mm.mapping);
0500
0501
0502
0503
0504
0505
0506
0507 unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
0508
0509 i915_gem_object_unpin_map(obj);
0510 }
0511
0512 struct scatterlist *
0513 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
0514 struct i915_gem_object_page_iter *iter,
0515 unsigned int n,
0516 unsigned int *offset,
0517 bool dma)
0518 {
0519 struct scatterlist *sg;
0520 unsigned int idx, count;
0521
0522 might_sleep();
0523 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
0524 if (!i915_gem_object_has_pinned_pages(obj))
0525 assert_object_held(obj);
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536 if (n < READ_ONCE(iter->sg_idx))
0537 goto lookup;
0538
0539 mutex_lock(&iter->lock);
0540
0541
0542
0543
0544
0545
0546 sg = iter->sg_pos;
0547 idx = iter->sg_idx;
0548 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
0549
0550 while (idx + count <= n) {
0551 void *entry;
0552 unsigned long i;
0553 int ret;
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 ret = radix_tree_insert(&iter->radix, idx, sg);
0564 if (ret && ret != -EEXIST)
0565 goto scan;
0566
0567 entry = xa_mk_value(idx);
0568 for (i = 1; i < count; i++) {
0569 ret = radix_tree_insert(&iter->radix, idx + i, entry);
0570 if (ret && ret != -EEXIST)
0571 goto scan;
0572 }
0573
0574 idx += count;
0575 sg = ____sg_next(sg);
0576 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
0577 }
0578
0579 scan:
0580 iter->sg_pos = sg;
0581 iter->sg_idx = idx;
0582
0583 mutex_unlock(&iter->lock);
0584
0585 if (unlikely(n < idx))
0586 goto lookup;
0587
0588
0589
0590
0591 while (idx + count <= n) {
0592 idx += count;
0593 sg = ____sg_next(sg);
0594 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
0595 }
0596
0597 *offset = n - idx;
0598 return sg;
0599
0600 lookup:
0601 rcu_read_lock();
0602
0603 sg = radix_tree_lookup(&iter->radix, n);
0604 GEM_BUG_ON(!sg);
0605
0606
0607
0608
0609
0610
0611
0612 *offset = 0;
0613 if (unlikely(xa_is_value(sg))) {
0614 unsigned long base = xa_to_value(sg);
0615
0616 sg = radix_tree_lookup(&iter->radix, base);
0617 GEM_BUG_ON(!sg);
0618
0619 *offset = n - base;
0620 }
0621
0622 rcu_read_unlock();
0623
0624 return sg;
0625 }
0626
0627 struct page *
0628 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
0629 {
0630 struct scatterlist *sg;
0631 unsigned int offset;
0632
0633 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
0634
0635 sg = i915_gem_object_get_sg(obj, n, &offset);
0636 return nth_page(sg_page(sg), offset);
0637 }
0638
0639
0640 struct page *
0641 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
0642 unsigned int n)
0643 {
0644 struct page *page;
0645
0646 page = i915_gem_object_get_page(obj, n);
0647 if (!obj->mm.dirty)
0648 set_page_dirty(page);
0649
0650 return page;
0651 }
0652
0653 dma_addr_t
0654 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
0655 unsigned long n,
0656 unsigned int *len)
0657 {
0658 struct scatterlist *sg;
0659 unsigned int offset;
0660
0661 sg = i915_gem_object_get_sg_dma(obj, n, &offset);
0662
0663 if (len)
0664 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
0665
0666 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
0667 }
0668
0669 dma_addr_t
0670 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
0671 unsigned long n)
0672 {
0673 return i915_gem_object_get_dma_address_len(obj, n, NULL);
0674 }