0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/sched/mm.h>
0026 #include <linux/dma-fence-array.h>
0027 #include <drm/drm_gem.h>
0028
0029 #include "display/intel_frontbuffer.h"
0030 #include "gem/i915_gem_lmem.h"
0031 #include "gem/i915_gem_tiling.h"
0032 #include "gt/intel_engine.h"
0033 #include "gt/intel_engine_heartbeat.h"
0034 #include "gt/intel_gt.h"
0035 #include "gt/intel_gt_requests.h"
0036
0037 #include "i915_drv.h"
0038 #include "i915_gem_evict.h"
0039 #include "i915_sw_fence_work.h"
0040 #include "i915_trace.h"
0041 #include "i915_vma.h"
0042 #include "i915_vma_resource.h"
0043
0044 static inline void assert_vma_held_evict(const struct i915_vma *vma)
0045 {
0046
0047
0048
0049
0050
0051 if (kref_read(&vma->vm->ref))
0052 assert_object_held_shared(vma->obj);
0053 }
0054
0055 static struct kmem_cache *slab_vmas;
0056
0057 static struct i915_vma *i915_vma_alloc(void)
0058 {
0059 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
0060 }
0061
0062 static void i915_vma_free(struct i915_vma *vma)
0063 {
0064 return kmem_cache_free(slab_vmas, vma);
0065 }
0066
0067 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
0068
0069 #include <linux/stackdepot.h>
0070
0071 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
0072 {
0073 char buf[512];
0074
0075 if (!vma->node.stack) {
0076 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
0077 vma->node.start, vma->node.size, reason);
0078 return;
0079 }
0080
0081 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
0082 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
0083 vma->node.start, vma->node.size, reason, buf);
0084 }
0085
0086 #else
0087
0088 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
0089 {
0090 }
0091
0092 #endif
0093
0094 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
0095 {
0096 return container_of(ref, typeof(struct i915_vma), active);
0097 }
0098
0099 static int __i915_vma_active(struct i915_active *ref)
0100 {
0101 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
0102 }
0103
0104 static void __i915_vma_retire(struct i915_active *ref)
0105 {
0106 i915_vma_put(active_to_vma(ref));
0107 }
0108
0109 static struct i915_vma *
0110 vma_create(struct drm_i915_gem_object *obj,
0111 struct i915_address_space *vm,
0112 const struct i915_ggtt_view *view)
0113 {
0114 struct i915_vma *pos = ERR_PTR(-E2BIG);
0115 struct i915_vma *vma;
0116 struct rb_node *rb, **p;
0117 int err;
0118
0119
0120 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
0121
0122 vma = i915_vma_alloc();
0123 if (vma == NULL)
0124 return ERR_PTR(-ENOMEM);
0125
0126 vma->ops = &vm->vma_ops;
0127 vma->obj = obj;
0128 vma->size = obj->base.size;
0129 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
0130
0131 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
0132
0133
0134 if (IS_ENABLED(CONFIG_LOCKDEP)) {
0135 fs_reclaim_acquire(GFP_KERNEL);
0136 might_lock(&vma->active.mutex);
0137 fs_reclaim_release(GFP_KERNEL);
0138 }
0139
0140 INIT_LIST_HEAD(&vma->closed_link);
0141 INIT_LIST_HEAD(&vma->obj_link);
0142 RB_CLEAR_NODE(&vma->obj_node);
0143
0144 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
0145 vma->ggtt_view = *view;
0146 if (view->type == I915_GGTT_VIEW_PARTIAL) {
0147 GEM_BUG_ON(range_overflows_t(u64,
0148 view->partial.offset,
0149 view->partial.size,
0150 obj->base.size >> PAGE_SHIFT));
0151 vma->size = view->partial.size;
0152 vma->size <<= PAGE_SHIFT;
0153 GEM_BUG_ON(vma->size > obj->base.size);
0154 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
0155 vma->size = intel_rotation_info_size(&view->rotated);
0156 vma->size <<= PAGE_SHIFT;
0157 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
0158 vma->size = intel_remapped_info_size(&view->remapped);
0159 vma->size <<= PAGE_SHIFT;
0160 }
0161 }
0162
0163 if (unlikely(vma->size > vm->total))
0164 goto err_vma;
0165
0166 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
0167
0168 err = mutex_lock_interruptible(&vm->mutex);
0169 if (err) {
0170 pos = ERR_PTR(err);
0171 goto err_vma;
0172 }
0173
0174 vma->vm = vm;
0175 list_add_tail(&vma->vm_link, &vm->unbound_list);
0176
0177 spin_lock(&obj->vma.lock);
0178 if (i915_is_ggtt(vm)) {
0179 if (unlikely(overflows_type(vma->size, u32)))
0180 goto err_unlock;
0181
0182 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
0183 i915_gem_object_get_tiling(obj),
0184 i915_gem_object_get_stride(obj));
0185 if (unlikely(vma->fence_size < vma->size ||
0186 vma->fence_size > vm->total))
0187 goto err_unlock;
0188
0189 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
0190
0191 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
0192 i915_gem_object_get_tiling(obj),
0193 i915_gem_object_get_stride(obj));
0194 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
0195
0196 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
0197 }
0198
0199 rb = NULL;
0200 p = &obj->vma.tree.rb_node;
0201 while (*p) {
0202 long cmp;
0203
0204 rb = *p;
0205 pos = rb_entry(rb, struct i915_vma, obj_node);
0206
0207
0208
0209
0210
0211
0212 cmp = i915_vma_compare(pos, vm, view);
0213 if (cmp < 0)
0214 p = &rb->rb_right;
0215 else if (cmp > 0)
0216 p = &rb->rb_left;
0217 else
0218 goto err_unlock;
0219 }
0220 rb_link_node(&vma->obj_node, rb, p);
0221 rb_insert_color(&vma->obj_node, &obj->vma.tree);
0222
0223 if (i915_vma_is_ggtt(vma))
0224
0225
0226
0227
0228
0229
0230 list_add(&vma->obj_link, &obj->vma.list);
0231 else
0232 list_add_tail(&vma->obj_link, &obj->vma.list);
0233
0234 spin_unlock(&obj->vma.lock);
0235 mutex_unlock(&vm->mutex);
0236
0237 return vma;
0238
0239 err_unlock:
0240 spin_unlock(&obj->vma.lock);
0241 list_del_init(&vma->vm_link);
0242 mutex_unlock(&vm->mutex);
0243 err_vma:
0244 i915_vma_free(vma);
0245 return pos;
0246 }
0247
0248 static struct i915_vma *
0249 i915_vma_lookup(struct drm_i915_gem_object *obj,
0250 struct i915_address_space *vm,
0251 const struct i915_ggtt_view *view)
0252 {
0253 struct rb_node *rb;
0254
0255 rb = obj->vma.tree.rb_node;
0256 while (rb) {
0257 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
0258 long cmp;
0259
0260 cmp = i915_vma_compare(vma, vm, view);
0261 if (cmp == 0)
0262 return vma;
0263
0264 if (cmp < 0)
0265 rb = rb->rb_right;
0266 else
0267 rb = rb->rb_left;
0268 }
0269
0270 return NULL;
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 struct i915_vma *
0287 i915_vma_instance(struct drm_i915_gem_object *obj,
0288 struct i915_address_space *vm,
0289 const struct i915_ggtt_view *view)
0290 {
0291 struct i915_vma *vma;
0292
0293 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
0294 GEM_BUG_ON(!kref_read(&vm->ref));
0295
0296 spin_lock(&obj->vma.lock);
0297 vma = i915_vma_lookup(obj, vm, view);
0298 spin_unlock(&obj->vma.lock);
0299
0300
0301 if (unlikely(!vma))
0302 vma = vma_create(obj, vm, view);
0303
0304 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
0305 return vma;
0306 }
0307
0308 struct i915_vma_work {
0309 struct dma_fence_work base;
0310 struct i915_address_space *vm;
0311 struct i915_vm_pt_stash stash;
0312 struct i915_vma_resource *vma_res;
0313 struct drm_i915_gem_object *obj;
0314 struct i915_sw_dma_fence_cb cb;
0315 enum i915_cache_level cache_level;
0316 unsigned int flags;
0317 };
0318
0319 static void __vma_bind(struct dma_fence_work *work)
0320 {
0321 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
0322 struct i915_vma_resource *vma_res = vw->vma_res;
0323
0324
0325
0326
0327
0328
0329
0330 if (i915_gem_object_has_unknown_state(vw->obj))
0331 return;
0332
0333 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
0334 vma_res, vw->cache_level, vw->flags);
0335 }
0336
0337 static void __vma_release(struct dma_fence_work *work)
0338 {
0339 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
0340
0341 if (vw->obj)
0342 i915_gem_object_put(vw->obj);
0343
0344 i915_vm_free_pt_stash(vw->vm, &vw->stash);
0345 if (vw->vma_res)
0346 i915_vma_resource_put(vw->vma_res);
0347 }
0348
0349 static const struct dma_fence_work_ops bind_ops = {
0350 .name = "bind",
0351 .work = __vma_bind,
0352 .release = __vma_release,
0353 };
0354
0355 struct i915_vma_work *i915_vma_work(void)
0356 {
0357 struct i915_vma_work *vw;
0358
0359 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
0360 if (!vw)
0361 return NULL;
0362
0363 dma_fence_work_init(&vw->base, &bind_ops);
0364 vw->base.dma.error = -EAGAIN;
0365
0366 return vw;
0367 }
0368
0369 int i915_vma_wait_for_bind(struct i915_vma *vma)
0370 {
0371 int err = 0;
0372
0373 if (rcu_access_pointer(vma->active.excl.fence)) {
0374 struct dma_fence *fence;
0375
0376 rcu_read_lock();
0377 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
0378 rcu_read_unlock();
0379 if (fence) {
0380 err = dma_fence_wait(fence, true);
0381 dma_fence_put(fence);
0382 }
0383 }
0384
0385 return err;
0386 }
0387
0388 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
0389 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
0390 {
0391 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
0392 int err;
0393
0394 if (!fence)
0395 return 0;
0396
0397 if (dma_fence_is_signaled(fence))
0398 err = fence->error;
0399 else
0400 err = -EBUSY;
0401
0402 dma_fence_put(fence);
0403
0404 return err;
0405 }
0406 #else
0407 #define i915_vma_verify_bind_complete(_vma) 0
0408 #endif
0409
0410 I915_SELFTEST_EXPORT void
0411 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
0412 struct i915_vma *vma)
0413 {
0414 struct drm_i915_gem_object *obj = vma->obj;
0415
0416 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
0417 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
0418 i915_gem_object_is_lmem(obj), obj->mm.region,
0419 vma->ops, vma->private, vma->node.start,
0420 vma->node.size, vma->size);
0421 }
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436 int i915_vma_bind(struct i915_vma *vma,
0437 enum i915_cache_level cache_level,
0438 u32 flags,
0439 struct i915_vma_work *work,
0440 struct i915_vma_resource *vma_res)
0441 {
0442 u32 bind_flags;
0443 u32 vma_flags;
0444 int ret;
0445
0446 lockdep_assert_held(&vma->vm->mutex);
0447 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
0448 GEM_BUG_ON(vma->size > vma->node.size);
0449
0450 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
0451 vma->node.size,
0452 vma->vm->total))) {
0453 i915_vma_resource_free(vma_res);
0454 return -ENODEV;
0455 }
0456
0457 if (GEM_DEBUG_WARN_ON(!flags)) {
0458 i915_vma_resource_free(vma_res);
0459 return -EINVAL;
0460 }
0461
0462 bind_flags = flags;
0463 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0464
0465 vma_flags = atomic_read(&vma->flags);
0466 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0467
0468 bind_flags &= ~vma_flags;
0469 if (bind_flags == 0) {
0470 i915_vma_resource_free(vma_res);
0471 return 0;
0472 }
0473
0474 GEM_BUG_ON(!atomic_read(&vma->pages_count));
0475
0476
0477 if (work && bind_flags & vma->vm->bind_async_flags)
0478 ret = i915_vma_resource_bind_dep_await(vma->vm,
0479 &work->base.chain,
0480 vma->node.start,
0481 vma->node.size,
0482 true,
0483 GFP_NOWAIT |
0484 __GFP_RETRY_MAYFAIL |
0485 __GFP_NOWARN);
0486 else
0487 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
0488 vma->node.size, true);
0489 if (ret) {
0490 i915_vma_resource_free(vma_res);
0491 return ret;
0492 }
0493
0494 if (vma->resource || !vma_res) {
0495
0496 GEM_WARN_ON(!vma_flags);
0497 i915_vma_resource_free(vma_res);
0498 } else {
0499 i915_vma_resource_init_from_vma(vma_res, vma);
0500 vma->resource = vma_res;
0501 }
0502 trace_i915_vma_bind(vma, bind_flags);
0503 if (work && bind_flags & vma->vm->bind_async_flags) {
0504 struct dma_fence *prev;
0505
0506 work->vma_res = i915_vma_resource_get(vma->resource);
0507 work->cache_level = cache_level;
0508 work->flags = bind_flags;
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
0520 if (prev) {
0521 __i915_sw_fence_await_dma_fence(&work->base.chain,
0522 prev,
0523 &work->cb);
0524 dma_fence_put(prev);
0525 }
0526
0527 work->base.dma.error = 0;
0528 work->obj = i915_gem_object_get(vma->obj);
0529 } else {
0530 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
0531 if (ret) {
0532 i915_vma_resource_free(vma->resource);
0533 vma->resource = NULL;
0534
0535 return ret;
0536 }
0537 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
0538 bind_flags);
0539 }
0540
0541 atomic_or(bind_flags, &vma->flags);
0542 return 0;
0543 }
0544
0545 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
0546 {
0547 void __iomem *ptr;
0548 int err;
0549
0550 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
0551 return IOMEM_ERR_PTR(-EINVAL);
0552
0553 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
0554 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
0555 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
0556
0557 ptr = READ_ONCE(vma->iomap);
0558 if (ptr == NULL) {
0559
0560
0561
0562
0563
0564
0565 if (i915_gem_object_is_lmem(vma->obj)) {
0566 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
0567 vma->obj->base.size);
0568 } else if (i915_vma_is_map_and_fenceable(vma)) {
0569 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
0570 vma->node.start,
0571 vma->node.size);
0572 } else {
0573 ptr = (void __iomem *)
0574 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
0575 if (IS_ERR(ptr)) {
0576 err = PTR_ERR(ptr);
0577 goto err;
0578 }
0579 ptr = page_pack_bits(ptr, 1);
0580 }
0581
0582 if (ptr == NULL) {
0583 err = -ENOMEM;
0584 goto err;
0585 }
0586
0587 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
0588 if (page_unmask_bits(ptr))
0589 __i915_gem_object_release_map(vma->obj);
0590 else
0591 io_mapping_unmap(ptr);
0592 ptr = vma->iomap;
0593 }
0594 }
0595
0596 __i915_vma_pin(vma);
0597
0598 err = i915_vma_pin_fence(vma);
0599 if (err)
0600 goto err_unpin;
0601
0602 i915_vma_set_ggtt_write(vma);
0603
0604
0605 return page_mask_bits(ptr);
0606
0607 err_unpin:
0608 __i915_vma_unpin(vma);
0609 err:
0610 return IOMEM_ERR_PTR(err);
0611 }
0612
0613 void i915_vma_flush_writes(struct i915_vma *vma)
0614 {
0615 if (i915_vma_unset_ggtt_write(vma))
0616 intel_gt_flush_ggtt_writes(vma->vm->gt);
0617 }
0618
0619 void i915_vma_unpin_iomap(struct i915_vma *vma)
0620 {
0621 GEM_BUG_ON(vma->iomap == NULL);
0622
0623
0624
0625 i915_vma_flush_writes(vma);
0626
0627 i915_vma_unpin_fence(vma);
0628 i915_vma_unpin(vma);
0629 }
0630
0631 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
0632 {
0633 struct i915_vma *vma;
0634 struct drm_i915_gem_object *obj;
0635
0636 vma = fetch_and_zero(p_vma);
0637 if (!vma)
0638 return;
0639
0640 obj = vma->obj;
0641 GEM_BUG_ON(!obj);
0642
0643 i915_vma_unpin(vma);
0644
0645 if (flags & I915_VMA_RELEASE_MAP)
0646 i915_gem_object_unpin_map(obj);
0647
0648 i915_gem_object_put(obj);
0649 }
0650
0651 bool i915_vma_misplaced(const struct i915_vma *vma,
0652 u64 size, u64 alignment, u64 flags)
0653 {
0654 if (!drm_mm_node_allocated(&vma->node))
0655 return false;
0656
0657 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
0658 return true;
0659
0660 if (vma->node.size < size)
0661 return true;
0662
0663 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
0664 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
0665 return true;
0666
0667 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
0668 return true;
0669
0670 if (flags & PIN_OFFSET_BIAS &&
0671 vma->node.start < (flags & PIN_OFFSET_MASK))
0672 return true;
0673
0674 if (flags & PIN_OFFSET_FIXED &&
0675 vma->node.start != (flags & PIN_OFFSET_MASK))
0676 return true;
0677
0678 return false;
0679 }
0680
0681 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
0682 {
0683 bool mappable, fenceable;
0684
0685 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
0686 GEM_BUG_ON(!vma->fence_size);
0687
0688 fenceable = (vma->node.size >= vma->fence_size &&
0689 IS_ALIGNED(vma->node.start, vma->fence_alignment));
0690
0691 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
0692
0693 if (mappable && fenceable)
0694 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
0695 else
0696 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
0697 }
0698
0699 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
0700 {
0701 struct drm_mm_node *node = &vma->node;
0702 struct drm_mm_node *other;
0703
0704
0705
0706
0707
0708
0709
0710
0711 if (!i915_vm_has_cache_coloring(vma->vm))
0712 return true;
0713
0714
0715 GEM_BUG_ON(!drm_mm_node_allocated(node));
0716 GEM_BUG_ON(list_empty(&node->node_list));
0717
0718 other = list_prev_entry(node, node_list);
0719 if (i915_node_color_differs(other, color) &&
0720 !drm_mm_hole_follows(other))
0721 return false;
0722
0723 other = list_next_entry(node, node_list);
0724 if (i915_node_color_differs(other, color) &&
0725 !drm_mm_hole_follows(node))
0726 return false;
0727
0728 return true;
0729 }
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745 static int
0746 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
0747 u64 size, u64 alignment, u64 flags)
0748 {
0749 unsigned long color;
0750 u64 start, end;
0751 int ret;
0752
0753 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
0754 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
0755
0756 size = max(size, vma->size);
0757 alignment = max(alignment, vma->display_alignment);
0758 if (flags & PIN_MAPPABLE) {
0759 size = max_t(typeof(size), size, vma->fence_size);
0760 alignment = max_t(typeof(alignment),
0761 alignment, vma->fence_alignment);
0762 }
0763
0764 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
0765 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
0766 GEM_BUG_ON(!is_power_of_2(alignment));
0767
0768 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
0769 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
0770
0771 end = vma->vm->total;
0772 if (flags & PIN_MAPPABLE)
0773 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
0774 if (flags & PIN_ZONE_4G)
0775 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
0776 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
0777
0778 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
0779
0780
0781
0782
0783 if (NEEDS_COMPACT_PT(vma->vm->i915))
0784 size = round_up(size, alignment);
0785
0786
0787
0788
0789
0790 if (size > end) {
0791 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
0792 size, flags & PIN_MAPPABLE ? "mappable" : "total",
0793 end);
0794 return -ENOSPC;
0795 }
0796
0797 color = 0;
0798
0799 if (i915_vm_has_cache_coloring(vma->vm))
0800 color = vma->obj->cache_level;
0801
0802 if (flags & PIN_OFFSET_FIXED) {
0803 u64 offset = flags & PIN_OFFSET_MASK;
0804 if (!IS_ALIGNED(offset, alignment) ||
0805 range_overflows(offset, size, end))
0806 return -EINVAL;
0807
0808 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
0809 size, offset, color,
0810 flags);
0811 if (ret)
0812 return ret;
0813 } else {
0814
0815
0816
0817
0818
0819
0820
0821
0822 if (upper_32_bits(end - 1) &&
0823 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
0824
0825
0826
0827
0828
0829
0830 u64 page_alignment =
0831 rounddown_pow_of_two(vma->page_sizes.sg |
0832 I915_GTT_PAGE_SIZE_2M);
0833
0834
0835
0836
0837
0838
0839 GEM_BUG_ON(i915_vma_is_ggtt(vma));
0840
0841 alignment = max(alignment, page_alignment);
0842
0843 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
0844 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
0845 }
0846
0847 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
0848 size, alignment, color,
0849 start, end, flags);
0850 if (ret)
0851 return ret;
0852
0853 GEM_BUG_ON(vma->node.start < start);
0854 GEM_BUG_ON(vma->node.start + vma->node.size > end);
0855 }
0856 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
0857 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
0858
0859 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
0860
0861 return 0;
0862 }
0863
0864 static void
0865 i915_vma_detach(struct i915_vma *vma)
0866 {
0867 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
0868 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
0869
0870
0871
0872
0873
0874
0875 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
0876 }
0877
0878 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
0879 {
0880 unsigned int bound;
0881
0882 bound = atomic_read(&vma->flags);
0883
0884 if (flags & PIN_VALIDATE) {
0885 flags &= I915_VMA_BIND_MASK;
0886
0887 return (flags & bound) == flags;
0888 }
0889
0890
0891 flags &= I915_VMA_BIND_MASK;
0892 do {
0893 if (unlikely(flags & ~bound))
0894 return false;
0895
0896 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
0897 return false;
0898
0899 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
0900 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
0901
0902 return true;
0903 }
0904
0905 static struct scatterlist *
0906 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
0907 unsigned int width, unsigned int height,
0908 unsigned int src_stride, unsigned int dst_stride,
0909 struct sg_table *st, struct scatterlist *sg)
0910 {
0911 unsigned int column, row;
0912 unsigned int src_idx;
0913
0914 for (column = 0; column < width; column++) {
0915 unsigned int left;
0916
0917 src_idx = src_stride * (height - 1) + column + offset;
0918 for (row = 0; row < height; row++) {
0919 st->nents++;
0920
0921
0922
0923
0924
0925 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
0926 sg_dma_address(sg) =
0927 i915_gem_object_get_dma_address(obj, src_idx);
0928 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
0929 sg = sg_next(sg);
0930 src_idx -= src_stride;
0931 }
0932
0933 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
0934
0935 if (!left)
0936 continue;
0937
0938 st->nents++;
0939
0940
0941
0942
0943
0944
0945 sg_set_page(sg, NULL, left, 0);
0946 sg_dma_address(sg) = 0;
0947 sg_dma_len(sg) = left;
0948 sg = sg_next(sg);
0949 }
0950
0951 return sg;
0952 }
0953
0954 static noinline struct sg_table *
0955 intel_rotate_pages(struct intel_rotation_info *rot_info,
0956 struct drm_i915_gem_object *obj)
0957 {
0958 unsigned int size = intel_rotation_info_size(rot_info);
0959 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0960 struct sg_table *st;
0961 struct scatterlist *sg;
0962 int ret = -ENOMEM;
0963 int i;
0964
0965
0966 st = kmalloc(sizeof(*st), GFP_KERNEL);
0967 if (!st)
0968 goto err_st_alloc;
0969
0970 ret = sg_alloc_table(st, size, GFP_KERNEL);
0971 if (ret)
0972 goto err_sg_alloc;
0973
0974 st->nents = 0;
0975 sg = st->sgl;
0976
0977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
0978 sg = rotate_pages(obj, rot_info->plane[i].offset,
0979 rot_info->plane[i].width, rot_info->plane[i].height,
0980 rot_info->plane[i].src_stride,
0981 rot_info->plane[i].dst_stride,
0982 st, sg);
0983
0984 return st;
0985
0986 err_sg_alloc:
0987 kfree(st);
0988 err_st_alloc:
0989
0990 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
0991 obj->base.size, rot_info->plane[0].width,
0992 rot_info->plane[0].height, size);
0993
0994 return ERR_PTR(ret);
0995 }
0996
0997 static struct scatterlist *
0998 add_padding_pages(unsigned int count,
0999 struct sg_table *st, struct scatterlist *sg)
1000 {
1001 st->nents++;
1002
1003
1004
1005
1006
1007
1008 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
1009 sg_dma_address(sg) = 0;
1010 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
1011 sg = sg_next(sg);
1012
1013 return sg;
1014 }
1015
1016 static struct scatterlist *
1017 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1018 unsigned int offset, unsigned int alignment_pad,
1019 unsigned int width, unsigned int height,
1020 unsigned int src_stride, unsigned int dst_stride,
1021 struct sg_table *st, struct scatterlist *sg,
1022 unsigned int *gtt_offset)
1023 {
1024 unsigned int row;
1025
1026 if (!width || !height)
1027 return sg;
1028
1029 if (alignment_pad)
1030 sg = add_padding_pages(alignment_pad, st, sg);
1031
1032 for (row = 0; row < height; row++) {
1033 unsigned int left = width * I915_GTT_PAGE_SIZE;
1034
1035 while (left) {
1036 dma_addr_t addr;
1037 unsigned int length;
1038
1039
1040
1041
1042
1043
1044
1045 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1046
1047 length = min(left, length);
1048
1049 st->nents++;
1050
1051 sg_set_page(sg, NULL, length, 0);
1052 sg_dma_address(sg) = addr;
1053 sg_dma_len(sg) = length;
1054 sg = sg_next(sg);
1055
1056 offset += length / I915_GTT_PAGE_SIZE;
1057 left -= length;
1058 }
1059
1060 offset += src_stride - width;
1061
1062 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1063
1064 if (!left)
1065 continue;
1066
1067 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1068 }
1069
1070 *gtt_offset += alignment_pad + dst_stride * height;
1071
1072 return sg;
1073 }
1074
1075 static struct scatterlist *
1076 remap_contiguous_pages(struct drm_i915_gem_object *obj,
1077 unsigned int obj_offset,
1078 unsigned int count,
1079 struct sg_table *st, struct scatterlist *sg)
1080 {
1081 struct scatterlist *iter;
1082 unsigned int offset;
1083
1084 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1085 GEM_BUG_ON(!iter);
1086
1087 do {
1088 unsigned int len;
1089
1090 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1091 count << PAGE_SHIFT);
1092 sg_set_page(sg, NULL, len, 0);
1093 sg_dma_address(sg) =
1094 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1095 sg_dma_len(sg) = len;
1096
1097 st->nents++;
1098 count -= len >> PAGE_SHIFT;
1099 if (count == 0)
1100 return sg;
1101
1102 sg = __sg_next(sg);
1103 iter = __sg_next(iter);
1104 offset = 0;
1105 } while (1);
1106 }
1107
1108 static struct scatterlist *
1109 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1110 unsigned int obj_offset, unsigned int alignment_pad,
1111 unsigned int size,
1112 struct sg_table *st, struct scatterlist *sg,
1113 unsigned int *gtt_offset)
1114 {
1115 if (!size)
1116 return sg;
1117
1118 if (alignment_pad)
1119 sg = add_padding_pages(alignment_pad, st, sg);
1120
1121 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1122 sg = sg_next(sg);
1123
1124 *gtt_offset += alignment_pad + size;
1125
1126 return sg;
1127 }
1128
1129 static struct scatterlist *
1130 remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1131 struct drm_i915_gem_object *obj,
1132 int color_plane,
1133 struct sg_table *st, struct scatterlist *sg,
1134 unsigned int *gtt_offset)
1135 {
1136 unsigned int alignment_pad = 0;
1137
1138 if (rem_info->plane_alignment)
1139 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1140
1141 if (rem_info->plane[color_plane].linear)
1142 sg = remap_linear_color_plane_pages(obj,
1143 rem_info->plane[color_plane].offset,
1144 alignment_pad,
1145 rem_info->plane[color_plane].size,
1146 st, sg,
1147 gtt_offset);
1148
1149 else
1150 sg = remap_tiled_color_plane_pages(obj,
1151 rem_info->plane[color_plane].offset,
1152 alignment_pad,
1153 rem_info->plane[color_plane].width,
1154 rem_info->plane[color_plane].height,
1155 rem_info->plane[color_plane].src_stride,
1156 rem_info->plane[color_plane].dst_stride,
1157 st, sg,
1158 gtt_offset);
1159
1160 return sg;
1161 }
1162
1163 static noinline struct sg_table *
1164 intel_remap_pages(struct intel_remapped_info *rem_info,
1165 struct drm_i915_gem_object *obj)
1166 {
1167 unsigned int size = intel_remapped_info_size(rem_info);
1168 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1169 struct sg_table *st;
1170 struct scatterlist *sg;
1171 unsigned int gtt_offset = 0;
1172 int ret = -ENOMEM;
1173 int i;
1174
1175
1176 st = kmalloc(sizeof(*st), GFP_KERNEL);
1177 if (!st)
1178 goto err_st_alloc;
1179
1180 ret = sg_alloc_table(st, size, GFP_KERNEL);
1181 if (ret)
1182 goto err_sg_alloc;
1183
1184 st->nents = 0;
1185 sg = st->sgl;
1186
1187 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1188 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
1189
1190 i915_sg_trim(st);
1191
1192 return st;
1193
1194 err_sg_alloc:
1195 kfree(st);
1196 err_st_alloc:
1197
1198 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1199 obj->base.size, rem_info->plane[0].width,
1200 rem_info->plane[0].height, size);
1201
1202 return ERR_PTR(ret);
1203 }
1204
1205 static noinline struct sg_table *
1206 intel_partial_pages(const struct i915_ggtt_view *view,
1207 struct drm_i915_gem_object *obj)
1208 {
1209 struct sg_table *st;
1210 struct scatterlist *sg;
1211 unsigned int count = view->partial.size;
1212 int ret = -ENOMEM;
1213
1214 st = kmalloc(sizeof(*st), GFP_KERNEL);
1215 if (!st)
1216 goto err_st_alloc;
1217
1218 ret = sg_alloc_table(st, count, GFP_KERNEL);
1219 if (ret)
1220 goto err_sg_alloc;
1221
1222 st->nents = 0;
1223
1224 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1225
1226 sg_mark_end(sg);
1227 i915_sg_trim(st);
1228
1229 return st;
1230
1231 err_sg_alloc:
1232 kfree(st);
1233 err_st_alloc:
1234 return ERR_PTR(ret);
1235 }
1236
1237 static int
1238 __i915_vma_get_pages(struct i915_vma *vma)
1239 {
1240 struct sg_table *pages;
1241
1242
1243
1244
1245
1246
1247
1248 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1249
1250 switch (vma->ggtt_view.type) {
1251 default:
1252 GEM_BUG_ON(vma->ggtt_view.type);
1253 fallthrough;
1254 case I915_GGTT_VIEW_NORMAL:
1255 pages = vma->obj->mm.pages;
1256 break;
1257
1258 case I915_GGTT_VIEW_ROTATED:
1259 pages =
1260 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1261 break;
1262
1263 case I915_GGTT_VIEW_REMAPPED:
1264 pages =
1265 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1266 break;
1267
1268 case I915_GGTT_VIEW_PARTIAL:
1269 pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1270 break;
1271 }
1272
1273 if (IS_ERR(pages)) {
1274 drm_err(&vma->vm->i915->drm,
1275 "Failed to get pages for VMA view type %u (%ld)!\n",
1276 vma->ggtt_view.type, PTR_ERR(pages));
1277 return PTR_ERR(pages);
1278 }
1279
1280 vma->pages = pages;
1281
1282 return 0;
1283 }
1284
1285 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1286 {
1287 int err;
1288
1289 if (atomic_add_unless(&vma->pages_count, 1, 0))
1290 return 0;
1291
1292 err = i915_gem_object_pin_pages(vma->obj);
1293 if (err)
1294 return err;
1295
1296 err = __i915_vma_get_pages(vma);
1297 if (err)
1298 goto err_unpin;
1299
1300 vma->page_sizes = vma->obj->mm.page_sizes;
1301 atomic_inc(&vma->pages_count);
1302
1303 return 0;
1304
1305 err_unpin:
1306 __i915_gem_object_unpin_pages(vma->obj);
1307
1308 return err;
1309 }
1310
1311 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
1312 {
1313
1314
1315
1316
1317
1318
1319
1320
1321 WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
1322 }
1323
1324 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1325 {
1326
1327 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1328
1329 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1330 if (vma->pages != vma->obj->mm.pages) {
1331 sg_free_table(vma->pages);
1332 kfree(vma->pages);
1333 }
1334 vma->pages = NULL;
1335
1336 i915_gem_object_unpin_pages(vma->obj);
1337 }
1338 }
1339
1340 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1341 {
1342 if (atomic_add_unless(&vma->pages_count, -1, 1))
1343 return;
1344
1345 __vma_put_pages(vma, 1);
1346 }
1347
1348 static void vma_unbind_pages(struct i915_vma *vma)
1349 {
1350 unsigned int count;
1351
1352 lockdep_assert_held(&vma->vm->mutex);
1353
1354
1355 count = atomic_read(&vma->pages_count);
1356 count >>= I915_VMA_PAGES_BIAS;
1357 GEM_BUG_ON(!count);
1358
1359 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1360 }
1361
1362 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1363 u64 size, u64 alignment, u64 flags)
1364 {
1365 struct i915_vma_work *work = NULL;
1366 struct dma_fence *moving = NULL;
1367 struct i915_vma_resource *vma_res = NULL;
1368 intel_wakeref_t wakeref = 0;
1369 unsigned int bound;
1370 int err;
1371
1372 assert_vma_held(vma);
1373 GEM_BUG_ON(!ww);
1374
1375 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1376 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1377
1378 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1379
1380
1381 if (try_qad_pin(vma, flags))
1382 return 0;
1383
1384 err = i915_vma_get_pages(vma);
1385 if (err)
1386 return err;
1387
1388 if (flags & PIN_GLOBAL)
1389 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1390
1391 if (flags & vma->vm->bind_async_flags) {
1392
1393 err = i915_vm_lock_objects(vma->vm, ww);
1394 if (err)
1395 goto err_rpm;
1396
1397 work = i915_vma_work();
1398 if (!work) {
1399 err = -ENOMEM;
1400 goto err_rpm;
1401 }
1402
1403 work->vm = vma->vm;
1404
1405 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1406 if (err)
1407 goto err_rpm;
1408
1409 dma_fence_work_chain(&work->base, moving);
1410
1411
1412 if (vma->vm->allocate_va_range) {
1413 err = i915_vm_alloc_pt_stash(vma->vm,
1414 &work->stash,
1415 vma->size);
1416 if (err)
1417 goto err_fence;
1418
1419 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1420 if (err)
1421 goto err_fence;
1422 }
1423 }
1424
1425 vma_res = i915_vma_resource_alloc();
1426 if (IS_ERR(vma_res)) {
1427 err = PTR_ERR(vma_res);
1428 goto err_fence;
1429 }
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1449 !(flags & PIN_GLOBAL));
1450 if (err)
1451 goto err_vma_res;
1452
1453
1454
1455 if (unlikely(i915_vma_is_closed(vma))) {
1456 err = -ENOENT;
1457 goto err_unlock;
1458 }
1459
1460 bound = atomic_read(&vma->flags);
1461 if (unlikely(bound & I915_VMA_ERROR)) {
1462 err = -ENOMEM;
1463 goto err_unlock;
1464 }
1465
1466 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1467 err = -EAGAIN;
1468 goto err_unlock;
1469 }
1470
1471 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1472 if (!(flags & PIN_VALIDATE))
1473 __i915_vma_pin(vma);
1474 goto err_unlock;
1475 }
1476
1477 err = i915_active_acquire(&vma->active);
1478 if (err)
1479 goto err_unlock;
1480
1481 if (!(bound & I915_VMA_BIND_MASK)) {
1482 err = i915_vma_insert(vma, ww, size, alignment, flags);
1483 if (err)
1484 goto err_active;
1485
1486 if (i915_is_ggtt(vma->vm))
1487 __i915_vma_set_map_and_fenceable(vma);
1488 }
1489
1490 GEM_BUG_ON(!vma->pages);
1491 err = i915_vma_bind(vma,
1492 vma->obj->cache_level,
1493 flags, work, vma_res);
1494 vma_res = NULL;
1495 if (err)
1496 goto err_remove;
1497
1498
1499 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1500 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1501 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1502
1503 if (!(flags & PIN_VALIDATE)) {
1504 __i915_vma_pin(vma);
1505 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1506 }
1507 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1508 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1509
1510 err_remove:
1511 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1512 i915_vma_detach(vma);
1513 drm_mm_remove_node(&vma->node);
1514 }
1515 err_active:
1516 i915_active_release(&vma->active);
1517 err_unlock:
1518 mutex_unlock(&vma->vm->mutex);
1519 err_vma_res:
1520 i915_vma_resource_free(vma_res);
1521 err_fence:
1522 if (work)
1523 dma_fence_work_commit_imm(&work->base);
1524 err_rpm:
1525 if (wakeref)
1526 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1527
1528 if (moving)
1529 dma_fence_put(moving);
1530
1531 i915_vma_put_pages(vma);
1532 return err;
1533 }
1534
1535 static void flush_idle_contexts(struct intel_gt *gt)
1536 {
1537 struct intel_engine_cs *engine;
1538 enum intel_engine_id id;
1539
1540 for_each_engine(engine, gt, id)
1541 intel_engine_flush_barriers(engine);
1542
1543 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1544 }
1545
1546 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1547 u32 align, unsigned int flags)
1548 {
1549 struct i915_address_space *vm = vma->vm;
1550 int err;
1551
1552 do {
1553 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1554
1555 if (err != -ENOSPC) {
1556 if (!err) {
1557 err = i915_vma_wait_for_bind(vma);
1558 if (err)
1559 i915_vma_unpin(vma);
1560 }
1561 return err;
1562 }
1563
1564
1565 flush_idle_contexts(vm->gt);
1566 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1567
1568
1569
1570
1571
1572 i915_gem_evict_vm(vm, NULL);
1573 mutex_unlock(&vm->mutex);
1574 }
1575 } while (1);
1576 }
1577
1578 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1579 u32 align, unsigned int flags)
1580 {
1581 struct i915_gem_ww_ctx _ww;
1582 int err;
1583
1584 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1585
1586 if (ww)
1587 return __i915_ggtt_pin(vma, ww, align, flags);
1588
1589 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1590
1591 for_i915_gem_ww(&_ww, err, true) {
1592 err = i915_gem_object_lock(vma->obj, &_ww);
1593 if (!err)
1594 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1595 }
1596
1597 return err;
1598 }
1599
1600 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1601 {
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 GEM_BUG_ON(i915_vma_is_closed(vma));
1615 list_add(&vma->closed_link, >->closed_vma);
1616 }
1617
1618 void i915_vma_close(struct i915_vma *vma)
1619 {
1620 struct intel_gt *gt = vma->vm->gt;
1621 unsigned long flags;
1622
1623 if (i915_vma_is_ggtt(vma))
1624 return;
1625
1626 GEM_BUG_ON(!atomic_read(&vma->open_count));
1627 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1628 >->closed_lock,
1629 flags)) {
1630 __vma_close(vma, gt);
1631 spin_unlock_irqrestore(>->closed_lock, flags);
1632 }
1633 }
1634
1635 static void __i915_vma_remove_closed(struct i915_vma *vma)
1636 {
1637 list_del_init(&vma->closed_link);
1638 }
1639
1640 void i915_vma_reopen(struct i915_vma *vma)
1641 {
1642 struct intel_gt *gt = vma->vm->gt;
1643
1644 spin_lock_irq(>->closed_lock);
1645 if (i915_vma_is_closed(vma))
1646 __i915_vma_remove_closed(vma);
1647 spin_unlock_irq(>->closed_lock);
1648 }
1649
1650 static void force_unbind(struct i915_vma *vma)
1651 {
1652 if (!drm_mm_node_allocated(&vma->node))
1653 return;
1654
1655 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1656 WARN_ON(__i915_vma_unbind(vma));
1657 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1658 }
1659
1660 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1661 bool vm_ddestroy)
1662 {
1663 struct drm_i915_gem_object *obj = vma->obj;
1664
1665 GEM_BUG_ON(i915_vma_is_active(vma));
1666
1667 spin_lock(&obj->vma.lock);
1668 list_del(&vma->obj_link);
1669 if (!RB_EMPTY_NODE(&vma->obj_node))
1670 rb_erase(&vma->obj_node, &obj->vma.tree);
1671
1672 spin_unlock(&obj->vma.lock);
1673
1674 spin_lock_irq(>->closed_lock);
1675 __i915_vma_remove_closed(vma);
1676 spin_unlock_irq(>->closed_lock);
1677
1678 if (vm_ddestroy)
1679 i915_vm_resv_put(vma->vm);
1680
1681 i915_active_fini(&vma->active);
1682 GEM_WARN_ON(vma->resource);
1683 i915_vma_free(vma);
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 void i915_vma_destroy_locked(struct i915_vma *vma)
1713 {
1714 lockdep_assert_held(&vma->vm->mutex);
1715
1716 force_unbind(vma);
1717 list_del_init(&vma->vm_link);
1718 release_references(vma, vma->vm->gt, false);
1719 }
1720
1721 void i915_vma_destroy(struct i915_vma *vma)
1722 {
1723 struct intel_gt *gt;
1724 bool vm_ddestroy;
1725
1726 mutex_lock(&vma->vm->mutex);
1727 force_unbind(vma);
1728 list_del_init(&vma->vm_link);
1729 vm_ddestroy = vma->vm_ddestroy;
1730 vma->vm_ddestroy = false;
1731
1732
1733 gt = vma->vm->gt;
1734 mutex_unlock(&vma->vm->mutex);
1735 release_references(vma, gt, vm_ddestroy);
1736 }
1737
1738 void i915_vma_parked(struct intel_gt *gt)
1739 {
1740 struct i915_vma *vma, *next;
1741 LIST_HEAD(closed);
1742
1743 spin_lock_irq(>->closed_lock);
1744 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1745 struct drm_i915_gem_object *obj = vma->obj;
1746 struct i915_address_space *vm = vma->vm;
1747
1748
1749
1750 if (!kref_get_unless_zero(&obj->base.refcount))
1751 continue;
1752
1753 if (!i915_vm_tryget(vm)) {
1754 i915_gem_object_put(obj);
1755 continue;
1756 }
1757
1758 list_move(&vma->closed_link, &closed);
1759 }
1760 spin_unlock_irq(>->closed_lock);
1761
1762
1763 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1764 struct drm_i915_gem_object *obj = vma->obj;
1765 struct i915_address_space *vm = vma->vm;
1766
1767 if (i915_gem_object_trylock(obj, NULL)) {
1768 INIT_LIST_HEAD(&vma->closed_link);
1769 i915_vma_destroy(vma);
1770 i915_gem_object_unlock(obj);
1771 } else {
1772
1773 spin_lock_irq(>->closed_lock);
1774 list_add(&vma->closed_link, >->closed_vma);
1775 spin_unlock_irq(>->closed_lock);
1776 }
1777
1778 i915_gem_object_put(obj);
1779 i915_vm_put(vm);
1780 }
1781 }
1782
1783 static void __i915_vma_iounmap(struct i915_vma *vma)
1784 {
1785 GEM_BUG_ON(i915_vma_is_pinned(vma));
1786
1787 if (vma->iomap == NULL)
1788 return;
1789
1790 if (page_unmask_bits(vma->iomap))
1791 __i915_gem_object_release_map(vma->obj);
1792 else
1793 io_mapping_unmap(vma->iomap);
1794 vma->iomap = NULL;
1795 }
1796
1797 void i915_vma_revoke_mmap(struct i915_vma *vma)
1798 {
1799 struct drm_vma_offset_node *node;
1800 u64 vma_offset;
1801
1802 if (!i915_vma_has_userfault(vma))
1803 return;
1804
1805 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1806 GEM_BUG_ON(!vma->obj->userfault_count);
1807
1808 node = &vma->mmo->vma_node;
1809 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1810 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1811 drm_vma_node_offset_addr(node) + vma_offset,
1812 vma->size,
1813 1);
1814
1815 i915_vma_unset_userfault(vma);
1816 if (!--vma->obj->userfault_count)
1817 list_del(&vma->obj->userfault_link);
1818 }
1819
1820 static int
1821 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1822 {
1823 return __i915_request_await_exclusive(rq, &vma->active);
1824 }
1825
1826 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1827 {
1828 int err;
1829
1830
1831 err = __i915_request_await_bind(rq, vma);
1832 if (err)
1833 return err;
1834
1835 return i915_active_add_request(&vma->active, rq);
1836 }
1837
1838 int _i915_vma_move_to_active(struct i915_vma *vma,
1839 struct i915_request *rq,
1840 struct dma_fence *fence,
1841 unsigned int flags)
1842 {
1843 struct drm_i915_gem_object *obj = vma->obj;
1844 int err;
1845
1846 assert_object_held(obj);
1847
1848 GEM_BUG_ON(!vma->pages);
1849
1850 err = __i915_vma_move_to_active(vma, rq);
1851 if (unlikely(err))
1852 return err;
1853
1854
1855
1856
1857
1858 if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
1859 struct dma_fence *curr;
1860 int idx;
1861
1862 dma_fence_array_for_each(curr, idx, fence)
1863 ;
1864 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1865 if (unlikely(err))
1866 return err;
1867 }
1868
1869 if (flags & EXEC_OBJECT_WRITE) {
1870 struct intel_frontbuffer *front;
1871
1872 front = __intel_frontbuffer_get(obj);
1873 if (unlikely(front)) {
1874 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1875 i915_active_add_request(&front->write, rq);
1876 intel_frontbuffer_put(front);
1877 }
1878 }
1879
1880 if (fence) {
1881 struct dma_fence *curr;
1882 enum dma_resv_usage usage;
1883 int idx;
1884
1885 if (flags & EXEC_OBJECT_WRITE) {
1886 usage = DMA_RESV_USAGE_WRITE;
1887 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1888 obj->read_domains = 0;
1889 } else {
1890 usage = DMA_RESV_USAGE_READ;
1891 obj->write_domain = 0;
1892 }
1893
1894 dma_fence_array_for_each(curr, idx, fence)
1895 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1896 }
1897
1898 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1899 i915_active_add_request(&vma->fence->active, rq);
1900
1901 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1902 obj->mm.dirty = true;
1903
1904 GEM_BUG_ON(!i915_vma_is_active(vma));
1905 return 0;
1906 }
1907
1908 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1909 {
1910 struct i915_vma_resource *vma_res = vma->resource;
1911 struct dma_fence *unbind_fence;
1912
1913 GEM_BUG_ON(i915_vma_is_pinned(vma));
1914 assert_vma_held_evict(vma);
1915
1916 if (i915_vma_is_map_and_fenceable(vma)) {
1917
1918 i915_vma_revoke_mmap(vma);
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933 i915_vma_flush_writes(vma);
1934
1935
1936 i915_vma_revoke_fence(vma);
1937
1938 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1939 }
1940
1941 __i915_vma_iounmap(vma);
1942
1943 GEM_BUG_ON(vma->fence);
1944 GEM_BUG_ON(i915_vma_has_userfault(vma));
1945
1946
1947 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1948
1949
1950 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1951 kref_read(&vma->vm->ref);
1952 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
1953 vma->vm->skip_pte_rewrite;
1954 trace_i915_vma_unbind(vma);
1955
1956 if (async)
1957 unbind_fence = i915_vma_resource_unbind(vma_res,
1958 &vma->obj->mm.tlb);
1959 else
1960 unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
1961
1962 vma->resource = NULL;
1963
1964 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1965 &vma->flags);
1966
1967 i915_vma_detach(vma);
1968
1969 if (!async) {
1970 if (unbind_fence) {
1971 dma_fence_wait(unbind_fence, false);
1972 dma_fence_put(unbind_fence);
1973 unbind_fence = NULL;
1974 }
1975 vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
1976 }
1977
1978
1979
1980
1981
1982
1983
1984 vma_unbind_pages(vma);
1985 return unbind_fence;
1986 }
1987
1988 int __i915_vma_unbind(struct i915_vma *vma)
1989 {
1990 int ret;
1991
1992 lockdep_assert_held(&vma->vm->mutex);
1993 assert_vma_held_evict(vma);
1994
1995 if (!drm_mm_node_allocated(&vma->node))
1996 return 0;
1997
1998 if (i915_vma_is_pinned(vma)) {
1999 vma_print_allocator(vma, "is pinned");
2000 return -EAGAIN;
2001 }
2002
2003
2004
2005
2006
2007
2008 ret = i915_vma_sync(vma);
2009 if (ret)
2010 return ret;
2011
2012 GEM_BUG_ON(i915_vma_is_active(vma));
2013 __i915_vma_evict(vma, false);
2014
2015 drm_mm_remove_node(&vma->node);
2016 return 0;
2017 }
2018
2019 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2020 {
2021 struct dma_fence *fence;
2022
2023 lockdep_assert_held(&vma->vm->mutex);
2024
2025 if (!drm_mm_node_allocated(&vma->node))
2026 return NULL;
2027
2028 if (i915_vma_is_pinned(vma) ||
2029 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2030 return ERR_PTR(-EAGAIN);
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2042 I915_ACTIVE_AWAIT_EXCL |
2043 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
2044 return ERR_PTR(-EBUSY);
2045 }
2046
2047 fence = __i915_vma_evict(vma, true);
2048
2049 drm_mm_remove_node(&vma->node);
2050
2051 return fence;
2052 }
2053
2054 int i915_vma_unbind(struct i915_vma *vma)
2055 {
2056 struct i915_address_space *vm = vma->vm;
2057 intel_wakeref_t wakeref = 0;
2058 int err;
2059
2060 assert_object_held_shared(vma->obj);
2061
2062
2063 err = i915_vma_sync(vma);
2064 if (err)
2065 return err;
2066
2067 if (!drm_mm_node_allocated(&vma->node))
2068 return 0;
2069
2070 if (i915_vma_is_pinned(vma)) {
2071 vma_print_allocator(vma, "is pinned");
2072 return -EAGAIN;
2073 }
2074
2075 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2076
2077 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2078
2079 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2080 if (err)
2081 goto out_rpm;
2082
2083 err = __i915_vma_unbind(vma);
2084 mutex_unlock(&vm->mutex);
2085
2086 out_rpm:
2087 if (wakeref)
2088 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2089 return err;
2090 }
2091
2092 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2093 {
2094 struct drm_i915_gem_object *obj = vma->obj;
2095 struct i915_address_space *vm = vma->vm;
2096 intel_wakeref_t wakeref = 0;
2097 struct dma_fence *fence;
2098 int err;
2099
2100
2101
2102
2103
2104 assert_object_held(obj);
2105
2106 if (!drm_mm_node_allocated(&vma->node))
2107 return 0;
2108
2109 if (i915_vma_is_pinned(vma)) {
2110 vma_print_allocator(vma, "is pinned");
2111 return -EAGAIN;
2112 }
2113
2114 if (!obj->mm.rsgt)
2115 return -EBUSY;
2116
2117 err = dma_resv_reserve_fences(obj->base.resv, 1);
2118 if (err)
2119 return -EBUSY;
2120
2121
2122
2123
2124
2125
2126 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2127 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2128
2129 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2130 err = -EBUSY;
2131 goto out_rpm;
2132 } else if (!trylock_vm) {
2133 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2134 if (err)
2135 goto out_rpm;
2136 }
2137
2138 fence = __i915_vma_unbind_async(vma);
2139 mutex_unlock(&vm->mutex);
2140 if (IS_ERR_OR_NULL(fence)) {
2141 err = PTR_ERR_OR_ZERO(fence);
2142 goto out_rpm;
2143 }
2144
2145 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2146 dma_fence_put(fence);
2147
2148 out_rpm:
2149 if (wakeref)
2150 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2151 return err;
2152 }
2153
2154 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2155 {
2156 int err;
2157
2158 i915_gem_object_lock(vma->obj, NULL);
2159 err = i915_vma_unbind(vma);
2160 i915_gem_object_unlock(vma->obj);
2161
2162 return err;
2163 }
2164
2165 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2166 {
2167 i915_gem_object_make_unshrinkable(vma->obj);
2168 return vma;
2169 }
2170
2171 void i915_vma_make_shrinkable(struct i915_vma *vma)
2172 {
2173 i915_gem_object_make_shrinkable(vma->obj);
2174 }
2175
2176 void i915_vma_make_purgeable(struct i915_vma *vma)
2177 {
2178 i915_gem_object_make_purgeable(vma->obj);
2179 }
2180
2181 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2182 #include "selftests/i915_vma.c"
2183 #endif
2184
2185 void i915_vma_module_exit(void)
2186 {
2187 kmem_cache_destroy(slab_vmas);
2188 }
2189
2190 int __init i915_vma_module_init(void)
2191 {
2192 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
2193 if (!slab_vmas)
2194 return -ENOMEM;
2195
2196 return 0;
2197 }