0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include <linux/list_sort.h>
0026 #include <linux/prime_numbers.h>
0027
0028 #include "gem/i915_gem_context.h"
0029 #include "gem/i915_gem_internal.h"
0030 #include "gem/i915_gem_region.h"
0031 #include "gem/selftests/mock_context.h"
0032 #include "gt/intel_context.h"
0033 #include "gt/intel_gpu_commands.h"
0034 #include "gt/intel_gtt.h"
0035
0036 #include "i915_random.h"
0037 #include "i915_selftest.h"
0038 #include "i915_vma_resource.h"
0039
0040 #include "mock_drm.h"
0041 #include "mock_gem_device.h"
0042 #include "mock_gtt.h"
0043 #include "igt_flush_test.h"
0044
0045 static void cleanup_freed_objects(struct drm_i915_private *i915)
0046 {
0047 i915_gem_drain_freed_objects(i915);
0048 }
0049
0050 static void fake_free_pages(struct drm_i915_gem_object *obj,
0051 struct sg_table *pages)
0052 {
0053 sg_free_table(pages);
0054 kfree(pages);
0055 }
0056
0057 static int fake_get_pages(struct drm_i915_gem_object *obj)
0058 {
0059 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
0060 #define PFN_BIAS 0x1000
0061 struct sg_table *pages;
0062 struct scatterlist *sg;
0063 unsigned int sg_page_sizes;
0064 typeof(obj->base.size) rem;
0065
0066 pages = kmalloc(sizeof(*pages), GFP);
0067 if (!pages)
0068 return -ENOMEM;
0069
0070 rem = round_up(obj->base.size, BIT(31)) >> 31;
0071 if (sg_alloc_table(pages, rem, GFP)) {
0072 kfree(pages);
0073 return -ENOMEM;
0074 }
0075
0076 sg_page_sizes = 0;
0077 rem = obj->base.size;
0078 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
0079 unsigned long len = min_t(typeof(rem), rem, BIT(31));
0080
0081 GEM_BUG_ON(!len);
0082 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
0083 sg_dma_address(sg) = page_to_phys(sg_page(sg));
0084 sg_dma_len(sg) = len;
0085 sg_page_sizes |= len;
0086
0087 rem -= len;
0088 }
0089 GEM_BUG_ON(rem);
0090
0091 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
0092
0093 return 0;
0094 #undef GFP
0095 }
0096
0097 static void fake_put_pages(struct drm_i915_gem_object *obj,
0098 struct sg_table *pages)
0099 {
0100 fake_free_pages(obj, pages);
0101 obj->mm.dirty = false;
0102 }
0103
0104 static const struct drm_i915_gem_object_ops fake_ops = {
0105 .name = "fake-gem",
0106 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0107 .get_pages = fake_get_pages,
0108 .put_pages = fake_put_pages,
0109 };
0110
0111 static struct drm_i915_gem_object *
0112 fake_dma_object(struct drm_i915_private *i915, u64 size)
0113 {
0114 static struct lock_class_key lock_class;
0115 struct drm_i915_gem_object *obj;
0116
0117 GEM_BUG_ON(!size);
0118 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
0119
0120 if (overflows_type(size, obj->base.size))
0121 return ERR_PTR(-E2BIG);
0122
0123 obj = i915_gem_object_alloc();
0124 if (!obj)
0125 goto err;
0126
0127 drm_gem_private_object_init(&i915->drm, &obj->base, size);
0128 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
0129
0130 i915_gem_object_set_volatile(obj);
0131
0132 obj->write_domain = I915_GEM_DOMAIN_CPU;
0133 obj->read_domains = I915_GEM_DOMAIN_CPU;
0134 obj->cache_level = I915_CACHE_NONE;
0135
0136
0137 if (i915_gem_object_pin_pages_unlocked(obj))
0138 goto err_obj;
0139
0140 i915_gem_object_unpin_pages(obj);
0141 return obj;
0142
0143 err_obj:
0144 i915_gem_object_put(obj);
0145 err:
0146 return ERR_PTR(-ENOMEM);
0147 }
0148
0149 static int igt_ppgtt_alloc(void *arg)
0150 {
0151 struct drm_i915_private *dev_priv = arg;
0152 struct i915_ppgtt *ppgtt;
0153 struct i915_gem_ww_ctx ww;
0154 u64 size, last, limit;
0155 int err = 0;
0156
0157
0158
0159 if (!HAS_PPGTT(dev_priv))
0160 return 0;
0161
0162 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
0163 if (IS_ERR(ppgtt))
0164 return PTR_ERR(ppgtt);
0165
0166 if (!ppgtt->vm.allocate_va_range)
0167 goto err_ppgtt_cleanup;
0168
0169
0170
0171
0172
0173
0174
0175
0176 limit = totalram_pages() << PAGE_SHIFT;
0177 limit = min(ppgtt->vm.total, limit);
0178
0179 i915_gem_ww_ctx_init(&ww, false);
0180 retry:
0181 err = i915_vm_lock_objects(&ppgtt->vm, &ww);
0182 if (err)
0183 goto err_ppgtt_cleanup;
0184
0185
0186 for (size = 4096; size <= limit; size <<= 2) {
0187 struct i915_vm_pt_stash stash = {};
0188
0189 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
0190 if (err)
0191 goto err_ppgtt_cleanup;
0192
0193 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
0194 if (err) {
0195 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0196 goto err_ppgtt_cleanup;
0197 }
0198
0199 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
0200 cond_resched();
0201
0202 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
0203
0204 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0205 }
0206
0207
0208 for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
0209 struct i915_vm_pt_stash stash = {};
0210
0211 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
0212 if (err)
0213 goto err_ppgtt_cleanup;
0214
0215 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
0216 if (err) {
0217 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0218 goto err_ppgtt_cleanup;
0219 }
0220
0221 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
0222 last, size - last);
0223 cond_resched();
0224
0225 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
0226 }
0227
0228 err_ppgtt_cleanup:
0229 if (err == -EDEADLK) {
0230 err = i915_gem_ww_ctx_backoff(&ww);
0231 if (!err)
0232 goto retry;
0233 }
0234 i915_gem_ww_ctx_fini(&ww);
0235
0236 i915_vm_put(&ppgtt->vm);
0237 return err;
0238 }
0239
0240 static int lowlevel_hole(struct i915_address_space *vm,
0241 u64 hole_start, u64 hole_end,
0242 unsigned long end_time)
0243 {
0244 const unsigned int min_alignment =
0245 i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0246 I915_RND_STATE(seed_prng);
0247 struct i915_vma_resource *mock_vma_res;
0248 unsigned int size;
0249
0250 mock_vma_res = kzalloc(sizeof(*mock_vma_res), GFP_KERNEL);
0251 if (!mock_vma_res)
0252 return -ENOMEM;
0253
0254
0255 for (size = 12; (hole_end - hole_start) >> size; size++) {
0256 I915_RND_SUBSTATE(prng, seed_prng);
0257 struct drm_i915_gem_object *obj;
0258 unsigned int *order, count, n;
0259 u64 hole_size, aligned_size;
0260
0261 aligned_size = max_t(u32, ilog2(min_alignment), size);
0262 hole_size = (hole_end - hole_start) >> aligned_size;
0263 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
0264 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
0265 count = hole_size >> 1;
0266 if (!count) {
0267 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
0268 __func__, hole_start, hole_end, size, hole_size);
0269 break;
0270 }
0271
0272 do {
0273 order = i915_random_order(count, &prng);
0274 if (order)
0275 break;
0276 } while (count >>= 1);
0277 if (!count) {
0278 kfree(mock_vma_res);
0279 return -ENOMEM;
0280 }
0281 GEM_BUG_ON(!order);
0282
0283 GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
0284 GEM_BUG_ON(hole_start + count * BIT_ULL(aligned_size) > hole_end);
0285
0286
0287
0288
0289
0290
0291
0292 obj = fake_dma_object(vm->i915, BIT_ULL(size));
0293 if (IS_ERR(obj)) {
0294 kfree(order);
0295 break;
0296 }
0297
0298 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
0299
0300 if (i915_gem_object_pin_pages_unlocked(obj)) {
0301 i915_gem_object_put(obj);
0302 kfree(order);
0303 break;
0304 }
0305
0306 for (n = 0; n < count; n++) {
0307 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
0308 intel_wakeref_t wakeref;
0309
0310 GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
0311
0312 if (igt_timeout(end_time,
0313 "%s timed out before %d/%d\n",
0314 __func__, n, count)) {
0315 hole_end = hole_start;
0316 break;
0317 }
0318
0319 if (vm->allocate_va_range) {
0320 struct i915_vm_pt_stash stash = {};
0321 struct i915_gem_ww_ctx ww;
0322 int err;
0323
0324 i915_gem_ww_ctx_init(&ww, false);
0325 retry:
0326 err = i915_vm_lock_objects(vm, &ww);
0327 if (err)
0328 goto alloc_vm_end;
0329
0330 err = -ENOMEM;
0331 if (i915_vm_alloc_pt_stash(vm, &stash,
0332 BIT_ULL(size)))
0333 goto alloc_vm_end;
0334
0335 err = i915_vm_map_pt_stash(vm, &stash);
0336 if (!err)
0337 vm->allocate_va_range(vm, &stash,
0338 addr, BIT_ULL(size));
0339 i915_vm_free_pt_stash(vm, &stash);
0340 alloc_vm_end:
0341 if (err == -EDEADLK) {
0342 err = i915_gem_ww_ctx_backoff(&ww);
0343 if (!err)
0344 goto retry;
0345 }
0346 i915_gem_ww_ctx_fini(&ww);
0347
0348 if (err)
0349 break;
0350 }
0351
0352 mock_vma_res->bi.pages = obj->mm.pages;
0353 mock_vma_res->node_size = BIT_ULL(aligned_size);
0354 mock_vma_res->start = addr;
0355
0356 with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
0357 vm->insert_entries(vm, mock_vma_res,
0358 I915_CACHE_NONE, 0);
0359 }
0360 count = n;
0361
0362 i915_random_reorder(order, count, &prng);
0363 for (n = 0; n < count; n++) {
0364 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
0365 intel_wakeref_t wakeref;
0366
0367 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
0368 with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
0369 vm->clear_range(vm, addr, BIT_ULL(size));
0370 }
0371
0372 i915_gem_object_unpin_pages(obj);
0373 i915_gem_object_put(obj);
0374
0375 kfree(order);
0376
0377 cleanup_freed_objects(vm->i915);
0378 }
0379
0380 kfree(mock_vma_res);
0381 return 0;
0382 }
0383
0384 static void close_object_list(struct list_head *objects,
0385 struct i915_address_space *vm)
0386 {
0387 struct drm_i915_gem_object *obj, *on;
0388 int ignored;
0389
0390 list_for_each_entry_safe(obj, on, objects, st_link) {
0391 struct i915_vma *vma;
0392
0393 vma = i915_vma_instance(obj, vm, NULL);
0394 if (!IS_ERR(vma))
0395 ignored = i915_vma_unbind_unlocked(vma);
0396
0397 list_del(&obj->st_link);
0398 i915_gem_object_put(obj);
0399 }
0400 }
0401
0402 static int fill_hole(struct i915_address_space *vm,
0403 u64 hole_start, u64 hole_end,
0404 unsigned long end_time)
0405 {
0406 const u64 hole_size = hole_end - hole_start;
0407 struct drm_i915_gem_object *obj;
0408 const unsigned int min_alignment =
0409 i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0410 const unsigned long max_pages =
0411 min_t(u64, ULONG_MAX - 1, (hole_size / 2) >> ilog2(min_alignment));
0412 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
0413 unsigned long npages, prime, flags;
0414 struct i915_vma *vma;
0415 LIST_HEAD(objects);
0416 int err;
0417
0418
0419
0420 flags = PIN_OFFSET_FIXED | PIN_USER;
0421 if (i915_is_ggtt(vm))
0422 flags |= PIN_GLOBAL;
0423
0424 for_each_prime_number_from(prime, 2, max_step) {
0425 for (npages = 1; npages <= max_pages; npages *= prime) {
0426 const u64 full_size = npages << PAGE_SHIFT;
0427 const struct {
0428 const char *name;
0429 u64 offset;
0430 int step;
0431 } phases[] = {
0432 { "top-down", hole_end, -1, },
0433 { "bottom-up", hole_start, 1, },
0434 { }
0435 }, *p;
0436
0437 obj = fake_dma_object(vm->i915, full_size);
0438 if (IS_ERR(obj))
0439 break;
0440
0441 list_add(&obj->st_link, &objects);
0442
0443
0444
0445
0446
0447 for (p = phases; p->name; p++) {
0448 u64 offset;
0449
0450 offset = p->offset;
0451 list_for_each_entry(obj, &objects, st_link) {
0452 u64 aligned_size = round_up(obj->base.size,
0453 min_alignment);
0454
0455 vma = i915_vma_instance(obj, vm, NULL);
0456 if (IS_ERR(vma))
0457 continue;
0458
0459 if (p->step < 0) {
0460 if (offset < hole_start + aligned_size)
0461 break;
0462 offset -= aligned_size;
0463 }
0464
0465 err = i915_vma_pin(vma, 0, 0, offset | flags);
0466 if (err) {
0467 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
0468 __func__, p->name, err, npages, prime, offset);
0469 goto err;
0470 }
0471
0472 if (!drm_mm_node_allocated(&vma->node) ||
0473 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
0474 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
0475 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
0476 offset);
0477 err = -EINVAL;
0478 goto err;
0479 }
0480
0481 i915_vma_unpin(vma);
0482
0483 if (p->step > 0) {
0484 if (offset + aligned_size > hole_end)
0485 break;
0486 offset += aligned_size;
0487 }
0488 }
0489
0490 offset = p->offset;
0491 list_for_each_entry(obj, &objects, st_link) {
0492 u64 aligned_size = round_up(obj->base.size,
0493 min_alignment);
0494
0495 vma = i915_vma_instance(obj, vm, NULL);
0496 if (IS_ERR(vma))
0497 continue;
0498
0499 if (p->step < 0) {
0500 if (offset < hole_start + aligned_size)
0501 break;
0502 offset -= aligned_size;
0503 }
0504
0505 if (!drm_mm_node_allocated(&vma->node) ||
0506 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
0507 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
0508 __func__, p->name, vma->node.start, vma->node.size,
0509 offset);
0510 err = -EINVAL;
0511 goto err;
0512 }
0513
0514 err = i915_vma_unbind_unlocked(vma);
0515 if (err) {
0516 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
0517 __func__, p->name, vma->node.start, vma->node.size,
0518 err);
0519 goto err;
0520 }
0521
0522 if (p->step > 0) {
0523 if (offset + aligned_size > hole_end)
0524 break;
0525 offset += aligned_size;
0526 }
0527 }
0528
0529 offset = p->offset;
0530 list_for_each_entry_reverse(obj, &objects, st_link) {
0531 u64 aligned_size = round_up(obj->base.size,
0532 min_alignment);
0533
0534 vma = i915_vma_instance(obj, vm, NULL);
0535 if (IS_ERR(vma))
0536 continue;
0537
0538 if (p->step < 0) {
0539 if (offset < hole_start + aligned_size)
0540 break;
0541 offset -= aligned_size;
0542 }
0543
0544 err = i915_vma_pin(vma, 0, 0, offset | flags);
0545 if (err) {
0546 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
0547 __func__, p->name, err, npages, prime, offset);
0548 goto err;
0549 }
0550
0551 if (!drm_mm_node_allocated(&vma->node) ||
0552 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
0553 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
0554 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
0555 offset);
0556 err = -EINVAL;
0557 goto err;
0558 }
0559
0560 i915_vma_unpin(vma);
0561
0562 if (p->step > 0) {
0563 if (offset + aligned_size > hole_end)
0564 break;
0565 offset += aligned_size;
0566 }
0567 }
0568
0569 offset = p->offset;
0570 list_for_each_entry_reverse(obj, &objects, st_link) {
0571 u64 aligned_size = round_up(obj->base.size,
0572 min_alignment);
0573
0574 vma = i915_vma_instance(obj, vm, NULL);
0575 if (IS_ERR(vma))
0576 continue;
0577
0578 if (p->step < 0) {
0579 if (offset < hole_start + aligned_size)
0580 break;
0581 offset -= aligned_size;
0582 }
0583
0584 if (!drm_mm_node_allocated(&vma->node) ||
0585 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
0586 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
0587 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
0588 offset);
0589 err = -EINVAL;
0590 goto err;
0591 }
0592
0593 err = i915_vma_unbind_unlocked(vma);
0594 if (err) {
0595 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
0596 __func__, p->name, vma->node.start, vma->node.size,
0597 err);
0598 goto err;
0599 }
0600
0601 if (p->step > 0) {
0602 if (offset + aligned_size > hole_end)
0603 break;
0604 offset += aligned_size;
0605 }
0606 }
0607 }
0608
0609 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
0610 __func__, npages, prime)) {
0611 err = -EINTR;
0612 goto err;
0613 }
0614 }
0615
0616 close_object_list(&objects, vm);
0617 cleanup_freed_objects(vm->i915);
0618 }
0619
0620 return 0;
0621
0622 err:
0623 close_object_list(&objects, vm);
0624 return err;
0625 }
0626
0627 static int walk_hole(struct i915_address_space *vm,
0628 u64 hole_start, u64 hole_end,
0629 unsigned long end_time)
0630 {
0631 const u64 hole_size = hole_end - hole_start;
0632 const unsigned long max_pages =
0633 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
0634 unsigned long min_alignment;
0635 unsigned long flags;
0636 u64 size;
0637
0638
0639
0640 flags = PIN_OFFSET_FIXED | PIN_USER;
0641 if (i915_is_ggtt(vm))
0642 flags |= PIN_GLOBAL;
0643
0644 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0645
0646 for_each_prime_number_from(size, 1, max_pages) {
0647 struct drm_i915_gem_object *obj;
0648 struct i915_vma *vma;
0649 u64 addr;
0650 int err = 0;
0651
0652 obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
0653 if (IS_ERR(obj))
0654 break;
0655
0656 vma = i915_vma_instance(obj, vm, NULL);
0657 if (IS_ERR(vma)) {
0658 err = PTR_ERR(vma);
0659 goto err_put;
0660 }
0661
0662 for (addr = hole_start;
0663 addr + obj->base.size < hole_end;
0664 addr += round_up(obj->base.size, min_alignment)) {
0665 err = i915_vma_pin(vma, 0, 0, addr | flags);
0666 if (err) {
0667 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
0668 __func__, addr, vma->size,
0669 hole_start, hole_end, err);
0670 goto err_put;
0671 }
0672 i915_vma_unpin(vma);
0673
0674 if (!drm_mm_node_allocated(&vma->node) ||
0675 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
0676 pr_err("%s incorrect at %llx + %llx\n",
0677 __func__, addr, vma->size);
0678 err = -EINVAL;
0679 goto err_put;
0680 }
0681
0682 err = i915_vma_unbind_unlocked(vma);
0683 if (err) {
0684 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
0685 __func__, addr, vma->size, err);
0686 goto err_put;
0687 }
0688
0689 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
0690
0691 if (igt_timeout(end_time,
0692 "%s timed out at %llx\n",
0693 __func__, addr)) {
0694 err = -EINTR;
0695 goto err_put;
0696 }
0697 }
0698
0699 err_put:
0700 i915_gem_object_put(obj);
0701 if (err)
0702 return err;
0703
0704 cleanup_freed_objects(vm->i915);
0705 }
0706
0707 return 0;
0708 }
0709
0710 static int pot_hole(struct i915_address_space *vm,
0711 u64 hole_start, u64 hole_end,
0712 unsigned long end_time)
0713 {
0714 struct drm_i915_gem_object *obj;
0715 struct i915_vma *vma;
0716 unsigned int min_alignment;
0717 unsigned long flags;
0718 unsigned int pot;
0719 int err = 0;
0720
0721 flags = PIN_OFFSET_FIXED | PIN_USER;
0722 if (i915_is_ggtt(vm))
0723 flags |= PIN_GLOBAL;
0724
0725 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0726
0727 obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
0728 if (IS_ERR(obj))
0729 return PTR_ERR(obj);
0730
0731 vma = i915_vma_instance(obj, vm, NULL);
0732 if (IS_ERR(vma)) {
0733 err = PTR_ERR(vma);
0734 goto err_obj;
0735 }
0736
0737
0738 for (pot = fls64(hole_end - 1) - 1;
0739 pot > ilog2(2 * min_alignment);
0740 pot--) {
0741 u64 step = BIT_ULL(pot);
0742 u64 addr;
0743
0744 for (addr = round_up(hole_start + min_alignment, step) - min_alignment;
0745 hole_end > addr && hole_end - addr >= 2 * min_alignment;
0746 addr += step) {
0747 err = i915_vma_pin(vma, 0, 0, addr | flags);
0748 if (err) {
0749 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
0750 __func__,
0751 addr,
0752 hole_start, hole_end,
0753 err);
0754 goto err_obj;
0755 }
0756
0757 if (!drm_mm_node_allocated(&vma->node) ||
0758 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
0759 pr_err("%s incorrect at %llx + %llx\n",
0760 __func__, addr, vma->size);
0761 i915_vma_unpin(vma);
0762 err = i915_vma_unbind_unlocked(vma);
0763 err = -EINVAL;
0764 goto err_obj;
0765 }
0766
0767 i915_vma_unpin(vma);
0768 err = i915_vma_unbind_unlocked(vma);
0769 GEM_BUG_ON(err);
0770 }
0771
0772 if (igt_timeout(end_time,
0773 "%s timed out after %d/%d\n",
0774 __func__, pot, fls64(hole_end - 1) - 1)) {
0775 err = -EINTR;
0776 goto err_obj;
0777 }
0778 }
0779
0780 err_obj:
0781 i915_gem_object_put(obj);
0782 return err;
0783 }
0784
0785 static int drunk_hole(struct i915_address_space *vm,
0786 u64 hole_start, u64 hole_end,
0787 unsigned long end_time)
0788 {
0789 I915_RND_STATE(prng);
0790 unsigned int min_alignment;
0791 unsigned int size;
0792 unsigned long flags;
0793
0794 flags = PIN_OFFSET_FIXED | PIN_USER;
0795 if (i915_is_ggtt(vm))
0796 flags |= PIN_GLOBAL;
0797
0798 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0799
0800
0801 for (size = 12; (hole_end - hole_start) >> size; size++) {
0802 struct drm_i915_gem_object *obj;
0803 unsigned int *order, count, n;
0804 struct i915_vma *vma;
0805 u64 hole_size, aligned_size;
0806 int err = -ENODEV;
0807
0808 aligned_size = max_t(u32, ilog2(min_alignment), size);
0809 hole_size = (hole_end - hole_start) >> aligned_size;
0810 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
0811 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
0812 count = hole_size >> 1;
0813 if (!count) {
0814 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
0815 __func__, hole_start, hole_end, size, hole_size);
0816 break;
0817 }
0818
0819 do {
0820 order = i915_random_order(count, &prng);
0821 if (order)
0822 break;
0823 } while (count >>= 1);
0824 if (!count)
0825 return -ENOMEM;
0826 GEM_BUG_ON(!order);
0827
0828
0829
0830
0831
0832
0833
0834 obj = fake_dma_object(vm->i915, BIT_ULL(size));
0835 if (IS_ERR(obj)) {
0836 kfree(order);
0837 break;
0838 }
0839
0840 vma = i915_vma_instance(obj, vm, NULL);
0841 if (IS_ERR(vma)) {
0842 err = PTR_ERR(vma);
0843 goto err_obj;
0844 }
0845
0846 GEM_BUG_ON(vma->size != BIT_ULL(size));
0847
0848 for (n = 0; n < count; n++) {
0849 u64 addr = hole_start + order[n] * BIT_ULL(aligned_size);
0850
0851 err = i915_vma_pin(vma, 0, 0, addr | flags);
0852 if (err) {
0853 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
0854 __func__,
0855 addr, BIT_ULL(size),
0856 hole_start, hole_end,
0857 err);
0858 goto err_obj;
0859 }
0860
0861 if (!drm_mm_node_allocated(&vma->node) ||
0862 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
0863 pr_err("%s incorrect at %llx + %llx\n",
0864 __func__, addr, BIT_ULL(size));
0865 i915_vma_unpin(vma);
0866 err = i915_vma_unbind_unlocked(vma);
0867 err = -EINVAL;
0868 goto err_obj;
0869 }
0870
0871 i915_vma_unpin(vma);
0872 err = i915_vma_unbind_unlocked(vma);
0873 GEM_BUG_ON(err);
0874
0875 if (igt_timeout(end_time,
0876 "%s timed out after %d/%d\n",
0877 __func__, n, count)) {
0878 err = -EINTR;
0879 goto err_obj;
0880 }
0881 }
0882
0883 err_obj:
0884 i915_gem_object_put(obj);
0885 kfree(order);
0886 if (err)
0887 return err;
0888
0889 cleanup_freed_objects(vm->i915);
0890 }
0891
0892 return 0;
0893 }
0894
0895 static int __shrink_hole(struct i915_address_space *vm,
0896 u64 hole_start, u64 hole_end,
0897 unsigned long end_time)
0898 {
0899 struct drm_i915_gem_object *obj;
0900 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
0901 unsigned int min_alignment;
0902 unsigned int order = 12;
0903 LIST_HEAD(objects);
0904 int err = 0;
0905 u64 addr;
0906
0907 min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
0908
0909
0910 for (addr = hole_start; addr < hole_end; ) {
0911 struct i915_vma *vma;
0912 u64 size = BIT_ULL(order++);
0913
0914 size = min(size, hole_end - addr);
0915 obj = fake_dma_object(vm->i915, size);
0916 if (IS_ERR(obj)) {
0917 err = PTR_ERR(obj);
0918 break;
0919 }
0920
0921 list_add(&obj->st_link, &objects);
0922
0923 vma = i915_vma_instance(obj, vm, NULL);
0924 if (IS_ERR(vma)) {
0925 err = PTR_ERR(vma);
0926 break;
0927 }
0928
0929 GEM_BUG_ON(vma->size != size);
0930
0931 err = i915_vma_pin(vma, 0, 0, addr | flags);
0932 if (err) {
0933 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
0934 __func__, addr, size, hole_start, hole_end, err);
0935 break;
0936 }
0937
0938 if (!drm_mm_node_allocated(&vma->node) ||
0939 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
0940 pr_err("%s incorrect at %llx + %llx\n",
0941 __func__, addr, size);
0942 i915_vma_unpin(vma);
0943 err = i915_vma_unbind_unlocked(vma);
0944 err = -EINVAL;
0945 break;
0946 }
0947
0948 i915_vma_unpin(vma);
0949 addr += round_up(size, min_alignment);
0950
0951
0952
0953
0954
0955
0956 err = i915_vma_sync(vma);
0957 if (err)
0958 break;
0959
0960 if (igt_timeout(end_time,
0961 "%s timed out at ofset %llx [%llx - %llx]\n",
0962 __func__, addr, hole_start, hole_end)) {
0963 err = -EINTR;
0964 break;
0965 }
0966 }
0967
0968 close_object_list(&objects, vm);
0969 cleanup_freed_objects(vm->i915);
0970 return err;
0971 }
0972
0973 static int shrink_hole(struct i915_address_space *vm,
0974 u64 hole_start, u64 hole_end,
0975 unsigned long end_time)
0976 {
0977 unsigned long prime;
0978 int err;
0979
0980 vm->fault_attr.probability = 999;
0981 atomic_set(&vm->fault_attr.times, -1);
0982
0983 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
0984 vm->fault_attr.interval = prime;
0985 err = __shrink_hole(vm, hole_start, hole_end, end_time);
0986 if (err)
0987 break;
0988 }
0989
0990 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
0991
0992 return err;
0993 }
0994
0995 static int shrink_boom(struct i915_address_space *vm,
0996 u64 hole_start, u64 hole_end,
0997 unsigned long end_time)
0998 {
0999 unsigned int sizes[] = { SZ_2M, SZ_1G };
1000 struct drm_i915_gem_object *purge;
1001 struct drm_i915_gem_object *explode;
1002 int err;
1003 int i;
1004
1005
1006
1007
1008
1009
1010
1011
1012 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1013 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1014 unsigned int size = sizes[i];
1015 struct i915_vma *vma;
1016
1017 purge = fake_dma_object(vm->i915, size);
1018 if (IS_ERR(purge))
1019 return PTR_ERR(purge);
1020
1021 vma = i915_vma_instance(purge, vm, NULL);
1022 if (IS_ERR(vma)) {
1023 err = PTR_ERR(vma);
1024 goto err_purge;
1025 }
1026
1027 err = i915_vma_pin(vma, 0, 0, flags);
1028 if (err)
1029 goto err_purge;
1030
1031
1032 i915_vma_unpin(vma);
1033
1034 explode = fake_dma_object(vm->i915, size);
1035 if (IS_ERR(explode)) {
1036 err = PTR_ERR(explode);
1037 goto err_purge;
1038 }
1039
1040 vm->fault_attr.probability = 100;
1041 vm->fault_attr.interval = 1;
1042 atomic_set(&vm->fault_attr.times, -1);
1043
1044 vma = i915_vma_instance(explode, vm, NULL);
1045 if (IS_ERR(vma)) {
1046 err = PTR_ERR(vma);
1047 goto err_explode;
1048 }
1049
1050 err = i915_vma_pin(vma, 0, 0, flags | size);
1051 if (err)
1052 goto err_explode;
1053
1054 i915_vma_unpin(vma);
1055
1056 i915_gem_object_put(purge);
1057 i915_gem_object_put(explode);
1058
1059 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1060 cleanup_freed_objects(vm->i915);
1061 }
1062
1063 return 0;
1064
1065 err_explode:
1066 i915_gem_object_put(explode);
1067 err_purge:
1068 i915_gem_object_put(purge);
1069 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
1070 return err;
1071 }
1072
1073 static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
1074 u64 addr, u64 size, unsigned long flags)
1075 {
1076 struct drm_i915_gem_object *obj;
1077 struct i915_vma *vma;
1078 int err = 0;
1079 u64 expected_vma_size, expected_node_size;
1080 bool is_stolen = mr->type == INTEL_MEMORY_STOLEN_SYSTEM ||
1081 mr->type == INTEL_MEMORY_STOLEN_LOCAL;
1082
1083 obj = i915_gem_object_create_region(mr, size, 0, 0);
1084 if (IS_ERR(obj)) {
1085
1086 if (PTR_ERR(obj) == -ENODEV && is_stolen)
1087 return 0;
1088 return PTR_ERR(obj);
1089 }
1090
1091 vma = i915_vma_instance(obj, vm, NULL);
1092 if (IS_ERR(vma)) {
1093 err = PTR_ERR(vma);
1094 goto err_put;
1095 }
1096
1097 err = i915_vma_pin(vma, 0, 0, addr | flags);
1098 if (err)
1099 goto err_put;
1100 i915_vma_unpin(vma);
1101
1102 if (!drm_mm_node_allocated(&vma->node)) {
1103 err = -EINVAL;
1104 goto err_put;
1105 }
1106
1107 if (i915_vma_misplaced(vma, 0, 0, addr | flags)) {
1108 err = -EINVAL;
1109 goto err_put;
1110 }
1111
1112 expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
1113 expected_node_size = expected_vma_size;
1114
1115 if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
1116
1117
1118
1119
1120 expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1121 if (NEEDS_COMPACT_PT(vm->i915) && !i915_is_ggtt(vm))
1122 expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1123 else
1124 expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
1125 }
1126
1127 if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
1128 err = i915_vma_unbind_unlocked(vma);
1129 err = -EBADSLT;
1130 goto err_put;
1131 }
1132
1133 err = i915_vma_unbind_unlocked(vma);
1134 if (err)
1135 goto err_put;
1136
1137 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1138
1139 err_put:
1140 i915_gem_object_put(obj);
1141 cleanup_freed_objects(vm->i915);
1142 return err;
1143 }
1144
1145 static int misaligned_pin(struct i915_address_space *vm,
1146 u64 hole_start, u64 hole_end,
1147 unsigned long end_time)
1148 {
1149 struct intel_memory_region *mr;
1150 enum intel_region_id id;
1151 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
1152 int err = 0;
1153 u64 hole_size = hole_end - hole_start;
1154
1155 if (i915_is_ggtt(vm))
1156 flags |= PIN_GLOBAL;
1157
1158 for_each_memory_region(mr, vm->i915, id) {
1159 u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
1160 u64 size = min_alignment;
1161 u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
1162
1163
1164 if (hole_size < 3 * min_alignment)
1165 continue;
1166
1167
1168 if (min_alignment != I915_GTT_PAGE_SIZE_4K) {
1169 err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
1170
1171 if (!err)
1172 err = -EBADSLT;
1173 if (err != -EINVAL)
1174 return err;
1175 }
1176
1177
1178 err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
1179 if (err)
1180 return err;
1181
1182
1183 err = misaligned_case(vm, mr, addr, size / 2, flags);
1184 if (err)
1185 return err;
1186 }
1187
1188 return 0;
1189 }
1190
1191 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
1192 int (*func)(struct i915_address_space *vm,
1193 u64 hole_start, u64 hole_end,
1194 unsigned long end_time))
1195 {
1196 struct i915_ppgtt *ppgtt;
1197 IGT_TIMEOUT(end_time);
1198 struct file *file;
1199 int err;
1200
1201 if (!HAS_FULL_PPGTT(dev_priv))
1202 return 0;
1203
1204 file = mock_file(dev_priv);
1205 if (IS_ERR(file))
1206 return PTR_ERR(file);
1207
1208 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1209 if (IS_ERR(ppgtt)) {
1210 err = PTR_ERR(ppgtt);
1211 goto out_free;
1212 }
1213 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1214 assert_vm_alive(&ppgtt->vm);
1215
1216 err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
1217
1218 i915_vm_put(&ppgtt->vm);
1219
1220 out_free:
1221 fput(file);
1222 return err;
1223 }
1224
1225 static int igt_ppgtt_fill(void *arg)
1226 {
1227 return exercise_ppgtt(arg, fill_hole);
1228 }
1229
1230 static int igt_ppgtt_walk(void *arg)
1231 {
1232 return exercise_ppgtt(arg, walk_hole);
1233 }
1234
1235 static int igt_ppgtt_pot(void *arg)
1236 {
1237 return exercise_ppgtt(arg, pot_hole);
1238 }
1239
1240 static int igt_ppgtt_drunk(void *arg)
1241 {
1242 return exercise_ppgtt(arg, drunk_hole);
1243 }
1244
1245 static int igt_ppgtt_lowlevel(void *arg)
1246 {
1247 return exercise_ppgtt(arg, lowlevel_hole);
1248 }
1249
1250 static int igt_ppgtt_shrink(void *arg)
1251 {
1252 return exercise_ppgtt(arg, shrink_hole);
1253 }
1254
1255 static int igt_ppgtt_shrink_boom(void *arg)
1256 {
1257 return exercise_ppgtt(arg, shrink_boom);
1258 }
1259
1260 static int igt_ppgtt_misaligned_pin(void *arg)
1261 {
1262 return exercise_ppgtt(arg, misaligned_pin);
1263 }
1264
1265 static int sort_holes(void *priv, const struct list_head *A,
1266 const struct list_head *B)
1267 {
1268 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1269 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1270
1271 if (a->start < b->start)
1272 return -1;
1273 else
1274 return 1;
1275 }
1276
1277 static int exercise_ggtt(struct drm_i915_private *i915,
1278 int (*func)(struct i915_address_space *vm,
1279 u64 hole_start, u64 hole_end,
1280 unsigned long end_time))
1281 {
1282 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1283 u64 hole_start, hole_end, last = 0;
1284 struct drm_mm_node *node;
1285 IGT_TIMEOUT(end_time);
1286 int err = 0;
1287
1288 restart:
1289 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1290 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1291 if (hole_start < last)
1292 continue;
1293
1294 if (ggtt->vm.mm.color_adjust)
1295 ggtt->vm.mm.color_adjust(node, 0,
1296 &hole_start, &hole_end);
1297 if (hole_start >= hole_end)
1298 continue;
1299
1300 err = func(&ggtt->vm, hole_start, hole_end, end_time);
1301 if (err)
1302 break;
1303
1304
1305 last = hole_end;
1306 goto restart;
1307 }
1308
1309 return err;
1310 }
1311
1312 static int igt_ggtt_fill(void *arg)
1313 {
1314 return exercise_ggtt(arg, fill_hole);
1315 }
1316
1317 static int igt_ggtt_walk(void *arg)
1318 {
1319 return exercise_ggtt(arg, walk_hole);
1320 }
1321
1322 static int igt_ggtt_pot(void *arg)
1323 {
1324 return exercise_ggtt(arg, pot_hole);
1325 }
1326
1327 static int igt_ggtt_drunk(void *arg)
1328 {
1329 return exercise_ggtt(arg, drunk_hole);
1330 }
1331
1332 static int igt_ggtt_lowlevel(void *arg)
1333 {
1334 return exercise_ggtt(arg, lowlevel_hole);
1335 }
1336
1337 static int igt_ggtt_misaligned_pin(void *arg)
1338 {
1339 return exercise_ggtt(arg, misaligned_pin);
1340 }
1341
1342 static int igt_ggtt_page(void *arg)
1343 {
1344 const unsigned int count = PAGE_SIZE/sizeof(u32);
1345 I915_RND_STATE(prng);
1346 struct drm_i915_private *i915 = arg;
1347 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1348 struct drm_i915_gem_object *obj;
1349 intel_wakeref_t wakeref;
1350 struct drm_mm_node tmp;
1351 unsigned int *order, n;
1352 int err;
1353
1354 if (!i915_ggtt_has_aperture(ggtt))
1355 return 0;
1356
1357 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1358 if (IS_ERR(obj))
1359 return PTR_ERR(obj);
1360
1361 err = i915_gem_object_pin_pages_unlocked(obj);
1362 if (err)
1363 goto out_free;
1364
1365 memset(&tmp, 0, sizeof(tmp));
1366 mutex_lock(&ggtt->vm.mutex);
1367 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1368 count * PAGE_SIZE, 0,
1369 I915_COLOR_UNEVICTABLE,
1370 0, ggtt->mappable_end,
1371 DRM_MM_INSERT_LOW);
1372 mutex_unlock(&ggtt->vm.mutex);
1373 if (err)
1374 goto out_unpin;
1375
1376 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1377
1378 for (n = 0; n < count; n++) {
1379 u64 offset = tmp.start + n * PAGE_SIZE;
1380
1381 ggtt->vm.insert_page(&ggtt->vm,
1382 i915_gem_object_get_dma_address(obj, 0),
1383 offset, I915_CACHE_NONE, 0);
1384 }
1385
1386 order = i915_random_order(count, &prng);
1387 if (!order) {
1388 err = -ENOMEM;
1389 goto out_remove;
1390 }
1391
1392 for (n = 0; n < count; n++) {
1393 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1394 u32 __iomem *vaddr;
1395
1396 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1397 iowrite32(n, vaddr + n);
1398 io_mapping_unmap_atomic(vaddr);
1399 }
1400 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1401
1402 i915_random_reorder(order, count, &prng);
1403 for (n = 0; n < count; n++) {
1404 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1405 u32 __iomem *vaddr;
1406 u32 val;
1407
1408 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1409 val = ioread32(vaddr + n);
1410 io_mapping_unmap_atomic(vaddr);
1411
1412 if (val != n) {
1413 pr_err("insert page failed: found %d, expected %d\n",
1414 val, n);
1415 err = -EINVAL;
1416 break;
1417 }
1418 }
1419
1420 kfree(order);
1421 out_remove:
1422 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1423 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1424 mutex_lock(&ggtt->vm.mutex);
1425 drm_mm_remove_node(&tmp);
1426 mutex_unlock(&ggtt->vm.mutex);
1427 out_unpin:
1428 i915_gem_object_unpin_pages(obj);
1429 out_free:
1430 i915_gem_object_put(obj);
1431 return err;
1432 }
1433
1434 static void track_vma_bind(struct i915_vma *vma)
1435 {
1436 struct drm_i915_gem_object *obj = vma->obj;
1437
1438 __i915_gem_object_pin_pages(obj);
1439
1440 GEM_BUG_ON(atomic_read(&vma->pages_count));
1441 atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
1442 __i915_gem_object_pin_pages(obj);
1443 vma->pages = obj->mm.pages;
1444 vma->resource->bi.pages = vma->pages;
1445
1446 mutex_lock(&vma->vm->mutex);
1447 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1448 mutex_unlock(&vma->vm->mutex);
1449 }
1450
1451 static int exercise_mock(struct drm_i915_private *i915,
1452 int (*func)(struct i915_address_space *vm,
1453 u64 hole_start, u64 hole_end,
1454 unsigned long end_time))
1455 {
1456 const u64 limit = totalram_pages() << PAGE_SHIFT;
1457 struct i915_address_space *vm;
1458 struct i915_gem_context *ctx;
1459 IGT_TIMEOUT(end_time);
1460 int err;
1461
1462 ctx = mock_context(i915, "mock");
1463 if (!ctx)
1464 return -ENOMEM;
1465
1466 vm = i915_gem_context_get_eb_vm(ctx);
1467 err = func(vm, 0, min(vm->total, limit), end_time);
1468 i915_vm_put(vm);
1469
1470 mock_context_close(ctx);
1471 return err;
1472 }
1473
1474 static int igt_mock_fill(void *arg)
1475 {
1476 struct i915_ggtt *ggtt = arg;
1477
1478 return exercise_mock(ggtt->vm.i915, fill_hole);
1479 }
1480
1481 static int igt_mock_walk(void *arg)
1482 {
1483 struct i915_ggtt *ggtt = arg;
1484
1485 return exercise_mock(ggtt->vm.i915, walk_hole);
1486 }
1487
1488 static int igt_mock_pot(void *arg)
1489 {
1490 struct i915_ggtt *ggtt = arg;
1491
1492 return exercise_mock(ggtt->vm.i915, pot_hole);
1493 }
1494
1495 static int igt_mock_drunk(void *arg)
1496 {
1497 struct i915_ggtt *ggtt = arg;
1498
1499 return exercise_mock(ggtt->vm.i915, drunk_hole);
1500 }
1501
1502 static int reserve_gtt_with_resource(struct i915_vma *vma, u64 offset)
1503 {
1504 struct i915_address_space *vm = vma->vm;
1505 struct i915_vma_resource *vma_res;
1506 struct drm_i915_gem_object *obj = vma->obj;
1507 int err;
1508
1509 vma_res = i915_vma_resource_alloc();
1510 if (IS_ERR(vma_res))
1511 return PTR_ERR(vma_res);
1512
1513 mutex_lock(&vm->mutex);
1514 err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
1515 offset,
1516 obj->cache_level,
1517 0);
1518 if (!err) {
1519 i915_vma_resource_init_from_vma(vma_res, vma);
1520 vma->resource = vma_res;
1521 } else {
1522 kfree(vma_res);
1523 }
1524 mutex_unlock(&vm->mutex);
1525
1526 return err;
1527 }
1528
1529 static int igt_gtt_reserve(void *arg)
1530 {
1531 struct i915_ggtt *ggtt = arg;
1532 struct drm_i915_gem_object *obj, *on;
1533 I915_RND_STATE(prng);
1534 LIST_HEAD(objects);
1535 u64 total;
1536 int err = -ENODEV;
1537
1538
1539
1540
1541
1542
1543
1544 for (total = 0;
1545 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1546 total += 2 * I915_GTT_PAGE_SIZE) {
1547 struct i915_vma *vma;
1548
1549 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1550 2 * PAGE_SIZE);
1551 if (IS_ERR(obj)) {
1552 err = PTR_ERR(obj);
1553 goto out;
1554 }
1555
1556 err = i915_gem_object_pin_pages_unlocked(obj);
1557 if (err) {
1558 i915_gem_object_put(obj);
1559 goto out;
1560 }
1561
1562 list_add(&obj->st_link, &objects);
1563 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1564 if (IS_ERR(vma)) {
1565 err = PTR_ERR(vma);
1566 goto out;
1567 }
1568
1569 err = reserve_gtt_with_resource(vma, total);
1570 if (err) {
1571 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1572 total, ggtt->vm.total, err);
1573 goto out;
1574 }
1575 track_vma_bind(vma);
1576
1577 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1578 if (vma->node.start != total ||
1579 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1580 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1581 vma->node.start, vma->node.size,
1582 total, 2*I915_GTT_PAGE_SIZE);
1583 err = -EINVAL;
1584 goto out;
1585 }
1586 }
1587
1588
1589 for (total = I915_GTT_PAGE_SIZE;
1590 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1591 total += 2 * I915_GTT_PAGE_SIZE) {
1592 struct i915_vma *vma;
1593
1594 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1595 2 * PAGE_SIZE);
1596 if (IS_ERR(obj)) {
1597 err = PTR_ERR(obj);
1598 goto out;
1599 }
1600
1601 err = i915_gem_object_pin_pages_unlocked(obj);
1602 if (err) {
1603 i915_gem_object_put(obj);
1604 goto out;
1605 }
1606
1607 list_add(&obj->st_link, &objects);
1608
1609 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1610 if (IS_ERR(vma)) {
1611 err = PTR_ERR(vma);
1612 goto out;
1613 }
1614
1615 err = reserve_gtt_with_resource(vma, total);
1616 if (err) {
1617 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1618 total, ggtt->vm.total, err);
1619 goto out;
1620 }
1621 track_vma_bind(vma);
1622
1623 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1624 if (vma->node.start != total ||
1625 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1626 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1627 vma->node.start, vma->node.size,
1628 total, 2*I915_GTT_PAGE_SIZE);
1629 err = -EINVAL;
1630 goto out;
1631 }
1632 }
1633
1634
1635 list_for_each_entry_safe(obj, on, &objects, st_link) {
1636 struct i915_vma *vma;
1637 u64 offset;
1638
1639 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1640 if (IS_ERR(vma)) {
1641 err = PTR_ERR(vma);
1642 goto out;
1643 }
1644
1645 err = i915_vma_unbind_unlocked(vma);
1646 if (err) {
1647 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1648 goto out;
1649 }
1650
1651 offset = igt_random_offset(&prng,
1652 0, ggtt->vm.total,
1653 2 * I915_GTT_PAGE_SIZE,
1654 I915_GTT_MIN_ALIGNMENT);
1655
1656 err = reserve_gtt_with_resource(vma, offset);
1657 if (err) {
1658 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1659 total, ggtt->vm.total, err);
1660 goto out;
1661 }
1662 track_vma_bind(vma);
1663
1664 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1665 if (vma->node.start != offset ||
1666 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1667 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
1668 vma->node.start, vma->node.size,
1669 offset, 2*I915_GTT_PAGE_SIZE);
1670 err = -EINVAL;
1671 goto out;
1672 }
1673 }
1674
1675 out:
1676 list_for_each_entry_safe(obj, on, &objects, st_link) {
1677 i915_gem_object_unpin_pages(obj);
1678 i915_gem_object_put(obj);
1679 }
1680 return err;
1681 }
1682
1683 static int insert_gtt_with_resource(struct i915_vma *vma)
1684 {
1685 struct i915_address_space *vm = vma->vm;
1686 struct i915_vma_resource *vma_res;
1687 struct drm_i915_gem_object *obj = vma->obj;
1688 int err;
1689
1690 vma_res = i915_vma_resource_alloc();
1691 if (IS_ERR(vma_res))
1692 return PTR_ERR(vma_res);
1693
1694 mutex_lock(&vm->mutex);
1695 err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
1696 obj->cache_level, 0, vm->total, 0);
1697 if (!err) {
1698 i915_vma_resource_init_from_vma(vma_res, vma);
1699 vma->resource = vma_res;
1700 } else {
1701 kfree(vma_res);
1702 }
1703 mutex_unlock(&vm->mutex);
1704
1705 return err;
1706 }
1707
1708 static int igt_gtt_insert(void *arg)
1709 {
1710 struct i915_ggtt *ggtt = arg;
1711 struct drm_i915_gem_object *obj, *on;
1712 struct drm_mm_node tmp = {};
1713 const struct invalid_insert {
1714 u64 size;
1715 u64 alignment;
1716 u64 start, end;
1717 } invalid_insert[] = {
1718 {
1719 ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
1720 0, ggtt->vm.total,
1721 },
1722 {
1723 2*I915_GTT_PAGE_SIZE, 0,
1724 0, I915_GTT_PAGE_SIZE,
1725 },
1726 {
1727 -(u64)I915_GTT_PAGE_SIZE, 0,
1728 0, 4*I915_GTT_PAGE_SIZE,
1729 },
1730 {
1731 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1732 0, 4*I915_GTT_PAGE_SIZE,
1733 },
1734 {
1735 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1736 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1737 },
1738 {}
1739 }, *ii;
1740 LIST_HEAD(objects);
1741 u64 total;
1742 int err = -ENODEV;
1743
1744
1745
1746
1747
1748
1749 for (ii = invalid_insert; ii->size; ii++) {
1750 mutex_lock(&ggtt->vm.mutex);
1751 err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
1752 ii->size, ii->alignment,
1753 I915_COLOR_UNEVICTABLE,
1754 ii->start, ii->end,
1755 0);
1756 mutex_unlock(&ggtt->vm.mutex);
1757 if (err != -ENOSPC) {
1758 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1759 ii->size, ii->alignment, ii->start, ii->end,
1760 err);
1761 return -EINVAL;
1762 }
1763 }
1764
1765
1766 for (total = 0;
1767 total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1768 total += I915_GTT_PAGE_SIZE) {
1769 struct i915_vma *vma;
1770
1771 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1772 I915_GTT_PAGE_SIZE);
1773 if (IS_ERR(obj)) {
1774 err = PTR_ERR(obj);
1775 goto out;
1776 }
1777
1778 err = i915_gem_object_pin_pages_unlocked(obj);
1779 if (err) {
1780 i915_gem_object_put(obj);
1781 goto out;
1782 }
1783
1784 list_add(&obj->st_link, &objects);
1785
1786 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1787 if (IS_ERR(vma)) {
1788 err = PTR_ERR(vma);
1789 goto out;
1790 }
1791
1792 err = insert_gtt_with_resource(vma);
1793 if (err == -ENOSPC) {
1794
1795 i915_gem_object_put(obj);
1796 break;
1797 }
1798 if (err) {
1799 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1800 total, ggtt->vm.total, err);
1801 goto out;
1802 }
1803 track_vma_bind(vma);
1804 __i915_vma_pin(vma);
1805
1806 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1807 }
1808
1809 list_for_each_entry(obj, &objects, st_link) {
1810 struct i915_vma *vma;
1811
1812 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1813 if (IS_ERR(vma)) {
1814 err = PTR_ERR(vma);
1815 goto out;
1816 }
1817
1818 if (!drm_mm_node_allocated(&vma->node)) {
1819 pr_err("VMA was unexpectedly evicted!\n");
1820 err = -EINVAL;
1821 goto out;
1822 }
1823
1824 __i915_vma_unpin(vma);
1825 }
1826
1827
1828 list_for_each_entry_safe(obj, on, &objects, st_link) {
1829 struct i915_vma *vma;
1830 u64 offset;
1831
1832 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1833 if (IS_ERR(vma)) {
1834 err = PTR_ERR(vma);
1835 goto out;
1836 }
1837
1838 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1839 offset = vma->node.start;
1840
1841 err = i915_vma_unbind_unlocked(vma);
1842 if (err) {
1843 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1844 goto out;
1845 }
1846
1847 err = insert_gtt_with_resource(vma);
1848 if (err) {
1849 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1850 total, ggtt->vm.total, err);
1851 goto out;
1852 }
1853 track_vma_bind(vma);
1854
1855 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1856 if (vma->node.start != offset) {
1857 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1858 offset, vma->node.start);
1859 err = -EINVAL;
1860 goto out;
1861 }
1862 }
1863
1864
1865 for (total = 0;
1866 total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
1867 total += 2 * I915_GTT_PAGE_SIZE) {
1868 struct i915_vma *vma;
1869
1870 obj = i915_gem_object_create_internal(ggtt->vm.i915,
1871 2 * I915_GTT_PAGE_SIZE);
1872 if (IS_ERR(obj)) {
1873 err = PTR_ERR(obj);
1874 goto out;
1875 }
1876
1877 err = i915_gem_object_pin_pages_unlocked(obj);
1878 if (err) {
1879 i915_gem_object_put(obj);
1880 goto out;
1881 }
1882
1883 list_add(&obj->st_link, &objects);
1884
1885 vma = i915_vma_instance(obj, &ggtt->vm, NULL);
1886 if (IS_ERR(vma)) {
1887 err = PTR_ERR(vma);
1888 goto out;
1889 }
1890
1891 err = insert_gtt_with_resource(vma);
1892 if (err) {
1893 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1894 total, ggtt->vm.total, err);
1895 goto out;
1896 }
1897 track_vma_bind(vma);
1898
1899 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1900 }
1901
1902 out:
1903 list_for_each_entry_safe(obj, on, &objects, st_link) {
1904 i915_gem_object_unpin_pages(obj);
1905 i915_gem_object_put(obj);
1906 }
1907 return err;
1908 }
1909
1910 int i915_gem_gtt_mock_selftests(void)
1911 {
1912 static const struct i915_subtest tests[] = {
1913 SUBTEST(igt_mock_drunk),
1914 SUBTEST(igt_mock_walk),
1915 SUBTEST(igt_mock_pot),
1916 SUBTEST(igt_mock_fill),
1917 SUBTEST(igt_gtt_reserve),
1918 SUBTEST(igt_gtt_insert),
1919 };
1920 struct drm_i915_private *i915;
1921 struct intel_gt *gt;
1922 int err;
1923
1924 i915 = mock_gem_device();
1925 if (!i915)
1926 return -ENOMEM;
1927
1928
1929 err = intel_gt_assign_ggtt(to_gt(i915));
1930 if (err)
1931 goto out_put;
1932
1933 gt = to_gt(i915);
1934
1935 mock_init_ggtt(gt);
1936
1937 err = i915_subtests(tests, gt->ggtt);
1938
1939 mock_device_flush(i915);
1940 i915_gem_drain_freed_objects(i915);
1941 mock_fini_ggtt(gt->ggtt);
1942
1943 out_put:
1944 mock_destroy_device(i915);
1945 return err;
1946 }
1947
1948 static int context_sync(struct intel_context *ce)
1949 {
1950 struct i915_request *rq;
1951 long timeout;
1952
1953 rq = intel_context_create_request(ce);
1954 if (IS_ERR(rq))
1955 return PTR_ERR(rq);
1956
1957 i915_request_get(rq);
1958 i915_request_add(rq);
1959
1960 timeout = i915_request_wait(rq, 0, HZ / 5);
1961 i915_request_put(rq);
1962
1963 return timeout < 0 ? -EIO : 0;
1964 }
1965
1966 static struct i915_request *
1967 submit_batch(struct intel_context *ce, u64 addr)
1968 {
1969 struct i915_request *rq;
1970 int err;
1971
1972 rq = intel_context_create_request(ce);
1973 if (IS_ERR(rq))
1974 return rq;
1975
1976 err = 0;
1977 if (rq->engine->emit_init_breadcrumb)
1978 err = rq->engine->emit_init_breadcrumb(rq);
1979 if (err == 0)
1980 err = rq->engine->emit_bb_start(rq, addr, 0, 0);
1981
1982 if (err == 0)
1983 i915_request_get(rq);
1984 i915_request_add(rq);
1985
1986 return err ? ERR_PTR(err) : rq;
1987 }
1988
1989 static u32 *spinner(u32 *batch, int i)
1990 {
1991 return batch + i * 64 / sizeof(*batch) + 4;
1992 }
1993
1994 static void end_spin(u32 *batch, int i)
1995 {
1996 *spinner(batch, i) = MI_BATCH_BUFFER_END;
1997 wmb();
1998 }
1999
2000 static int igt_cs_tlb(void *arg)
2001 {
2002 const unsigned int count = PAGE_SIZE / 64;
2003 const unsigned int chunk_size = count * PAGE_SIZE;
2004 struct drm_i915_private *i915 = arg;
2005 struct drm_i915_gem_object *bbe, *act, *out;
2006 struct i915_gem_engines_iter it;
2007 struct i915_address_space *vm;
2008 struct i915_gem_context *ctx;
2009 struct intel_context *ce;
2010 struct i915_vma *vma;
2011 I915_RND_STATE(prng);
2012 struct file *file;
2013 unsigned int i;
2014 u32 *result;
2015 u32 *batch;
2016 int err = 0;
2017
2018
2019
2020
2021
2022
2023
2024 file = mock_file(i915);
2025 if (IS_ERR(file))
2026 return PTR_ERR(file);
2027
2028 ctx = live_context(i915, file);
2029 if (IS_ERR(ctx)) {
2030 err = PTR_ERR(ctx);
2031 goto out_unlock;
2032 }
2033
2034 vm = i915_gem_context_get_eb_vm(ctx);
2035 if (i915_is_ggtt(vm))
2036 goto out_vm;
2037
2038
2039 bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
2040 if (IS_ERR(bbe)) {
2041 err = PTR_ERR(bbe);
2042 goto out_vm;
2043 }
2044
2045 batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
2046 if (IS_ERR(batch)) {
2047 err = PTR_ERR(batch);
2048 goto out_put_bbe;
2049 }
2050 memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32));
2051 i915_gem_object_flush_map(bbe);
2052 i915_gem_object_unpin_map(bbe);
2053
2054 act = i915_gem_object_create_internal(i915, PAGE_SIZE);
2055 if (IS_ERR(act)) {
2056 err = PTR_ERR(act);
2057 goto out_put_bbe;
2058 }
2059
2060
2061 batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
2062 if (IS_ERR(batch)) {
2063 err = PTR_ERR(batch);
2064 goto out_put_act;
2065 }
2066 for (i = 0; i < count; i++) {
2067 u32 *cs = batch + i * 64 / sizeof(*cs);
2068 u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32);
2069
2070 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
2071 cs[0] = MI_STORE_DWORD_IMM_GEN4;
2072 if (GRAPHICS_VER(i915) >= 8) {
2073 cs[1] = lower_32_bits(addr);
2074 cs[2] = upper_32_bits(addr);
2075 cs[3] = i;
2076 cs[4] = MI_NOOP;
2077 cs[5] = MI_BATCH_BUFFER_START_GEN8;
2078 } else {
2079 cs[1] = 0;
2080 cs[2] = lower_32_bits(addr);
2081 cs[3] = i;
2082 cs[4] = MI_NOOP;
2083 cs[5] = MI_BATCH_BUFFER_START;
2084 }
2085 }
2086
2087 out = i915_gem_object_create_internal(i915, PAGE_SIZE);
2088 if (IS_ERR(out)) {
2089 err = PTR_ERR(out);
2090 goto out_put_batch;
2091 }
2092 i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED);
2093
2094 vma = i915_vma_instance(out, vm, NULL);
2095 if (IS_ERR(vma)) {
2096 err = PTR_ERR(vma);
2097 goto out_put_out;
2098 }
2099
2100 err = i915_vma_pin(vma, 0, 0,
2101 PIN_USER |
2102 PIN_OFFSET_FIXED |
2103 (vm->total - PAGE_SIZE));
2104 if (err)
2105 goto out_put_out;
2106 GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
2107
2108 result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
2109 if (IS_ERR(result)) {
2110 err = PTR_ERR(result);
2111 goto out_put_out;
2112 }
2113
2114 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2115 IGT_TIMEOUT(end_time);
2116 unsigned long pass = 0;
2117
2118 if (!intel_engine_can_store_dword(ce->engine))
2119 continue;
2120
2121 while (!__igt_timeout(end_time, NULL)) {
2122 struct i915_vm_pt_stash stash = {};
2123 struct i915_request *rq;
2124 struct i915_gem_ww_ctx ww;
2125 struct i915_vma_resource *vma_res;
2126 u64 offset;
2127
2128 offset = igt_random_offset(&prng,
2129 0, vm->total - PAGE_SIZE,
2130 chunk_size, PAGE_SIZE);
2131
2132 memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
2133
2134 vma = i915_vma_instance(bbe, vm, NULL);
2135 if (IS_ERR(vma)) {
2136 err = PTR_ERR(vma);
2137 goto end;
2138 }
2139
2140 i915_gem_object_lock(bbe, NULL);
2141 err = i915_vma_get_pages(vma);
2142 i915_gem_object_unlock(bbe);
2143 if (err)
2144 goto end;
2145
2146 vma_res = i915_vma_resource_alloc();
2147 if (IS_ERR(vma_res)) {
2148 i915_vma_put_pages(vma);
2149 err = PTR_ERR(vma_res);
2150 goto end;
2151 }
2152
2153 i915_gem_ww_ctx_init(&ww, false);
2154 retry:
2155 err = i915_vm_lock_objects(vm, &ww);
2156 if (err)
2157 goto end_ww;
2158
2159 err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
2160 if (err)
2161 goto end_ww;
2162
2163 err = i915_vm_map_pt_stash(vm, &stash);
2164 if (!err)
2165 vm->allocate_va_range(vm, &stash, offset, chunk_size);
2166 i915_vm_free_pt_stash(vm, &stash);
2167 end_ww:
2168 if (err == -EDEADLK) {
2169 err = i915_gem_ww_ctx_backoff(&ww);
2170 if (!err)
2171 goto retry;
2172 }
2173 i915_gem_ww_ctx_fini(&ww);
2174 if (err) {
2175 kfree(vma_res);
2176 goto end;
2177 }
2178
2179 i915_vma_resource_init_from_vma(vma_res, vma);
2180
2181 for (i = 0; i < count; i++) {
2182 vma_res->start = offset + i * PAGE_SIZE;
2183 vm->insert_entries(vm, vma_res, I915_CACHE_NONE,
2184 0);
2185
2186 rq = submit_batch(ce, vma_res->start);
2187 if (IS_ERR(rq)) {
2188 err = PTR_ERR(rq);
2189 i915_vma_resource_fini(vma_res);
2190 kfree(vma_res);
2191 goto end;
2192 }
2193 i915_request_put(rq);
2194 }
2195 i915_vma_resource_fini(vma_res);
2196 i915_vma_put_pages(vma);
2197
2198 err = context_sync(ce);
2199 if (err) {
2200 pr_err("%s: dummy setup timed out\n",
2201 ce->engine->name);
2202 kfree(vma_res);
2203 goto end;
2204 }
2205
2206 vma = i915_vma_instance(act, vm, NULL);
2207 if (IS_ERR(vma)) {
2208 kfree(vma_res);
2209 err = PTR_ERR(vma);
2210 goto end;
2211 }
2212
2213 i915_gem_object_lock(act, NULL);
2214 err = i915_vma_get_pages(vma);
2215 i915_gem_object_unlock(act);
2216 if (err) {
2217 kfree(vma_res);
2218 goto end;
2219 }
2220
2221 i915_vma_resource_init_from_vma(vma_res, vma);
2222
2223 for (i = 0; i < count; i++) {
2224 struct i915_request *rq;
2225 u32 *cs = batch + i * 64 / sizeof(*cs);
2226 u64 addr;
2227
2228 vma_res->start = offset + i * PAGE_SIZE;
2229 vm->insert_entries(vm, vma_res, I915_CACHE_NONE, 0);
2230
2231 addr = vma_res->start + i * 64;
2232 cs[4] = MI_NOOP;
2233 cs[6] = lower_32_bits(addr);
2234 cs[7] = upper_32_bits(addr);
2235 wmb();
2236
2237 rq = submit_batch(ce, addr);
2238 if (IS_ERR(rq)) {
2239 err = PTR_ERR(rq);
2240 i915_vma_resource_fini(vma_res);
2241 kfree(vma_res);
2242 goto end;
2243 }
2244
2245
2246 if (i == 0) {
2247 while (READ_ONCE(result[i]) &&
2248 !i915_request_completed(rq))
2249 cond_resched();
2250 } else {
2251 end_spin(batch, i - 1);
2252 }
2253
2254 i915_request_put(rq);
2255 }
2256 end_spin(batch, count - 1);
2257
2258 i915_vma_resource_fini(vma_res);
2259 kfree(vma_res);
2260 i915_vma_put_pages(vma);
2261
2262 err = context_sync(ce);
2263 if (err) {
2264 pr_err("%s: writes timed out\n",
2265 ce->engine->name);
2266 goto end;
2267 }
2268
2269 for (i = 0; i < count; i++) {
2270 if (result[i] != i) {
2271 pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n",
2272 ce->engine->name, pass,
2273 offset, i, result[i], i);
2274 err = -EINVAL;
2275 goto end;
2276 }
2277 }
2278
2279 vm->clear_range(vm, offset, chunk_size);
2280 pass++;
2281 }
2282 }
2283 end:
2284 if (igt_flush_test(i915))
2285 err = -EIO;
2286 i915_gem_context_unlock_engines(ctx);
2287 i915_gem_object_unpin_map(out);
2288 out_put_out:
2289 i915_gem_object_put(out);
2290 out_put_batch:
2291 i915_gem_object_unpin_map(act);
2292 out_put_act:
2293 i915_gem_object_put(act);
2294 out_put_bbe:
2295 i915_gem_object_put(bbe);
2296 out_vm:
2297 i915_vm_put(vm);
2298 out_unlock:
2299 fput(file);
2300 return err;
2301 }
2302
2303 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
2304 {
2305 static const struct i915_subtest tests[] = {
2306 SUBTEST(igt_ppgtt_alloc),
2307 SUBTEST(igt_ppgtt_lowlevel),
2308 SUBTEST(igt_ppgtt_drunk),
2309 SUBTEST(igt_ppgtt_walk),
2310 SUBTEST(igt_ppgtt_pot),
2311 SUBTEST(igt_ppgtt_fill),
2312 SUBTEST(igt_ppgtt_shrink),
2313 SUBTEST(igt_ppgtt_shrink_boom),
2314 SUBTEST(igt_ppgtt_misaligned_pin),
2315 SUBTEST(igt_ggtt_lowlevel),
2316 SUBTEST(igt_ggtt_drunk),
2317 SUBTEST(igt_ggtt_walk),
2318 SUBTEST(igt_ggtt_pot),
2319 SUBTEST(igt_ggtt_fill),
2320 SUBTEST(igt_ggtt_page),
2321 SUBTEST(igt_ggtt_misaligned_pin),
2322 SUBTEST(igt_cs_tlb),
2323 };
2324
2325 GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
2326
2327 return i915_subtests(tests, i915);
2328 }