0001
0002
0003
0004
0005
0006 #include <linux/prime_numbers.h>
0007 #include <linux/sort.h>
0008
0009 #include <drm/drm_buddy.h>
0010
0011 #include "../i915_selftest.h"
0012
0013 #include "mock_drm.h"
0014 #include "mock_gem_device.h"
0015 #include "mock_region.h"
0016
0017 #include "gem/i915_gem_context.h"
0018 #include "gem/i915_gem_lmem.h"
0019 #include "gem/i915_gem_region.h"
0020 #include "gem/i915_gem_ttm.h"
0021 #include "gem/selftests/igt_gem_utils.h"
0022 #include "gem/selftests/mock_context.h"
0023 #include "gt/intel_engine_pm.h"
0024 #include "gt/intel_engine_user.h"
0025 #include "gt/intel_gt.h"
0026 #include "gt/intel_migrate.h"
0027 #include "i915_memcpy.h"
0028 #include "i915_ttm_buddy_manager.h"
0029 #include "selftests/igt_flush_test.h"
0030 #include "selftests/i915_random.h"
0031
0032 static void close_objects(struct intel_memory_region *mem,
0033 struct list_head *objects)
0034 {
0035 struct drm_i915_private *i915 = mem->i915;
0036 struct drm_i915_gem_object *obj, *on;
0037
0038 list_for_each_entry_safe(obj, on, objects, st_link) {
0039 i915_gem_object_lock(obj, NULL);
0040 if (i915_gem_object_has_pinned_pages(obj))
0041 i915_gem_object_unpin_pages(obj);
0042
0043 __i915_gem_object_put_pages(obj);
0044 i915_gem_object_unlock(obj);
0045 list_del(&obj->st_link);
0046 i915_gem_object_put(obj);
0047 }
0048
0049 cond_resched();
0050
0051 i915_gem_drain_freed_objects(i915);
0052 }
0053
0054 static int igt_mock_fill(void *arg)
0055 {
0056 struct intel_memory_region *mem = arg;
0057 resource_size_t total = resource_size(&mem->region);
0058 resource_size_t page_size;
0059 resource_size_t rem;
0060 unsigned long max_pages;
0061 unsigned long page_num;
0062 LIST_HEAD(objects);
0063 int err = 0;
0064
0065 page_size = PAGE_SIZE;
0066 max_pages = div64_u64(total, page_size);
0067 rem = total;
0068
0069 for_each_prime_number_from(page_num, 1, max_pages) {
0070 resource_size_t size = page_num * page_size;
0071 struct drm_i915_gem_object *obj;
0072
0073 obj = i915_gem_object_create_region(mem, size, 0, 0);
0074 if (IS_ERR(obj)) {
0075 err = PTR_ERR(obj);
0076 break;
0077 }
0078
0079 err = i915_gem_object_pin_pages_unlocked(obj);
0080 if (err) {
0081 i915_gem_object_put(obj);
0082 break;
0083 }
0084
0085 list_add(&obj->st_link, &objects);
0086 rem -= size;
0087 }
0088
0089 if (err == -ENOMEM)
0090 err = 0;
0091 if (err == -ENXIO) {
0092 if (page_num * page_size <= rem) {
0093 pr_err("%s failed, space still left in region\n",
0094 __func__);
0095 err = -EINVAL;
0096 } else {
0097 err = 0;
0098 }
0099 }
0100
0101 close_objects(mem, &objects);
0102
0103 return err;
0104 }
0105
0106 static struct drm_i915_gem_object *
0107 igt_object_create(struct intel_memory_region *mem,
0108 struct list_head *objects,
0109 u64 size,
0110 unsigned int flags)
0111 {
0112 struct drm_i915_gem_object *obj;
0113 int err;
0114
0115 obj = i915_gem_object_create_region(mem, size, 0, flags);
0116 if (IS_ERR(obj))
0117 return obj;
0118
0119 err = i915_gem_object_pin_pages_unlocked(obj);
0120 if (err)
0121 goto put;
0122
0123 list_add(&obj->st_link, objects);
0124 return obj;
0125
0126 put:
0127 i915_gem_object_put(obj);
0128 return ERR_PTR(err);
0129 }
0130
0131 static void igt_object_release(struct drm_i915_gem_object *obj)
0132 {
0133 i915_gem_object_lock(obj, NULL);
0134 i915_gem_object_unpin_pages(obj);
0135 __i915_gem_object_put_pages(obj);
0136 i915_gem_object_unlock(obj);
0137 list_del(&obj->st_link);
0138 i915_gem_object_put(obj);
0139 }
0140
0141 static bool is_contiguous(struct drm_i915_gem_object *obj)
0142 {
0143 struct scatterlist *sg;
0144 dma_addr_t addr = -1;
0145
0146 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
0147 if (addr != -1 && sg_dma_address(sg) != addr)
0148 return false;
0149
0150 addr = sg_dma_address(sg) + sg_dma_len(sg);
0151 }
0152
0153 return true;
0154 }
0155
0156 static int igt_mock_reserve(void *arg)
0157 {
0158 struct intel_memory_region *mem = arg;
0159 struct drm_i915_private *i915 = mem->i915;
0160 resource_size_t avail = resource_size(&mem->region);
0161 struct drm_i915_gem_object *obj;
0162 const u32 chunk_size = SZ_32M;
0163 u32 i, offset, count, *order;
0164 u64 allocated, cur_avail;
0165 I915_RND_STATE(prng);
0166 LIST_HEAD(objects);
0167 int err = 0;
0168
0169 count = avail / chunk_size;
0170 order = i915_random_order(count, &prng);
0171 if (!order)
0172 return 0;
0173
0174 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
0175 if (IS_ERR(mem)) {
0176 pr_err("failed to create memory region\n");
0177 err = PTR_ERR(mem);
0178 goto out_free_order;
0179 }
0180
0181
0182 for (i = 0; i < count; ++i) {
0183 u64 start = order[i] * chunk_size;
0184 u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
0185
0186
0187 if (!size)
0188 continue;
0189
0190 size = round_up(size, PAGE_SIZE);
0191 offset = igt_random_offset(&prng, 0, chunk_size, size,
0192 PAGE_SIZE);
0193
0194 err = intel_memory_region_reserve(mem, start + offset, size);
0195 if (err) {
0196 pr_err("%s failed to reserve range", __func__);
0197 goto out_close;
0198 }
0199
0200
0201 avail -= size;
0202 }
0203
0204
0205 allocated = 0;
0206 cur_avail = avail;
0207 do {
0208 u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
0209
0210 size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
0211 obj = igt_object_create(mem, &objects, size, 0);
0212 if (IS_ERR(obj)) {
0213 if (PTR_ERR(obj) == -ENXIO)
0214 break;
0215
0216 err = PTR_ERR(obj);
0217 goto out_close;
0218 }
0219 cur_avail -= size;
0220 allocated += size;
0221 } while (1);
0222
0223 if (allocated != avail) {
0224 pr_err("%s mismatch between allocation and free space", __func__);
0225 err = -EINVAL;
0226 }
0227
0228 out_close:
0229 close_objects(mem, &objects);
0230 intel_memory_region_destroy(mem);
0231 out_free_order:
0232 kfree(order);
0233 return err;
0234 }
0235
0236 static int igt_mock_contiguous(void *arg)
0237 {
0238 struct intel_memory_region *mem = arg;
0239 struct drm_i915_gem_object *obj;
0240 unsigned long n_objects;
0241 LIST_HEAD(objects);
0242 LIST_HEAD(holes);
0243 I915_RND_STATE(prng);
0244 resource_size_t total;
0245 resource_size_t min;
0246 u64 target;
0247 int err = 0;
0248
0249 total = resource_size(&mem->region);
0250
0251
0252 obj = igt_object_create(mem, &objects, PAGE_SIZE,
0253 I915_BO_ALLOC_CONTIGUOUS);
0254 if (IS_ERR(obj))
0255 return PTR_ERR(obj);
0256
0257 if (!is_contiguous(obj)) {
0258 pr_err("%s min object spans disjoint sg entries\n", __func__);
0259 err = -EINVAL;
0260 goto err_close_objects;
0261 }
0262
0263 igt_object_release(obj);
0264
0265
0266 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
0267 if (IS_ERR(obj))
0268 return PTR_ERR(obj);
0269
0270 if (!is_contiguous(obj)) {
0271 pr_err("%s max object spans disjoint sg entries\n", __func__);
0272 err = -EINVAL;
0273 goto err_close_objects;
0274 }
0275
0276 igt_object_release(obj);
0277
0278
0279 target = i915_prandom_u64_state(&prng);
0280 div64_u64_rem(target, total, &target);
0281 target = round_up(target, PAGE_SIZE);
0282 target = max_t(u64, PAGE_SIZE, target);
0283
0284 obj = igt_object_create(mem, &objects, target,
0285 I915_BO_ALLOC_CONTIGUOUS);
0286 if (IS_ERR(obj))
0287 return PTR_ERR(obj);
0288
0289 if (obj->base.size != target) {
0290 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
0291 obj->base.size, target);
0292 err = -EINVAL;
0293 goto err_close_objects;
0294 }
0295
0296 if (!is_contiguous(obj)) {
0297 pr_err("%s object spans disjoint sg entries\n", __func__);
0298 err = -EINVAL;
0299 goto err_close_objects;
0300 }
0301
0302 igt_object_release(obj);
0303
0304
0305
0306
0307
0308
0309 target = SZ_64K;
0310 n_objects = div64_u64(total, target);
0311
0312 while (n_objects--) {
0313 struct list_head *list;
0314
0315 if (n_objects % 2)
0316 list = &holes;
0317 else
0318 list = &objects;
0319
0320 obj = igt_object_create(mem, list, target,
0321 I915_BO_ALLOC_CONTIGUOUS);
0322 if (IS_ERR(obj)) {
0323 err = PTR_ERR(obj);
0324 goto err_close_objects;
0325 }
0326 }
0327
0328 close_objects(mem, &holes);
0329
0330 min = target;
0331 target = total >> 1;
0332
0333
0334 obj = igt_object_create(mem, &objects, target, 0);
0335 if (IS_ERR(obj)) {
0336 err = PTR_ERR(obj);
0337 goto err_close_objects;
0338 }
0339
0340 igt_object_release(obj);
0341
0342
0343
0344
0345
0346
0347 do {
0348 bool should_fail = target > min;
0349
0350 obj = igt_object_create(mem, &objects, target,
0351 I915_BO_ALLOC_CONTIGUOUS);
0352 if (should_fail != IS_ERR(obj)) {
0353 pr_err("%s target allocation(%llx) mismatch\n",
0354 __func__, target);
0355 err = -EINVAL;
0356 goto err_close_objects;
0357 }
0358
0359 target >>= 1;
0360 } while (target >= PAGE_SIZE);
0361
0362 err_close_objects:
0363 list_splice_tail(&holes, &objects);
0364 close_objects(mem, &objects);
0365 return err;
0366 }
0367
0368 static int igt_mock_splintered_region(void *arg)
0369 {
0370 struct intel_memory_region *mem = arg;
0371 struct drm_i915_private *i915 = mem->i915;
0372 struct i915_ttm_buddy_resource *res;
0373 struct drm_i915_gem_object *obj;
0374 struct drm_buddy *mm;
0375 unsigned int expected_order;
0376 LIST_HEAD(objects);
0377 u64 size;
0378 int err = 0;
0379
0380
0381
0382
0383
0384
0385
0386 size = (SZ_4G - 1) & PAGE_MASK;
0387 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
0388 if (IS_ERR(mem))
0389 return PTR_ERR(mem);
0390
0391 obj = igt_object_create(mem, &objects, size, 0);
0392 if (IS_ERR(obj)) {
0393 err = PTR_ERR(obj);
0394 goto out_close;
0395 }
0396
0397 res = to_ttm_buddy_resource(obj->mm.res);
0398 mm = res->mm;
0399 if (mm->size != size) {
0400 pr_err("%s size mismatch(%llu != %llu)\n",
0401 __func__, mm->size, size);
0402 err = -EINVAL;
0403 goto out_put;
0404 }
0405
0406 expected_order = get_order(rounddown_pow_of_two(size));
0407 if (mm->max_order != expected_order) {
0408 pr_err("%s order mismatch(%u != %u)\n",
0409 __func__, mm->max_order, expected_order);
0410 err = -EINVAL;
0411 goto out_put;
0412 }
0413
0414 close_objects(mem, &objects);
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
0425 if (!IS_ERR(obj)) {
0426 pr_err("%s too large contiguous allocation was not rejected\n",
0427 __func__);
0428 err = -EINVAL;
0429 goto out_close;
0430 }
0431
0432 obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
0433 I915_BO_ALLOC_CONTIGUOUS);
0434 if (IS_ERR(obj)) {
0435 pr_err("%s largest possible contiguous allocation failed\n",
0436 __func__);
0437 err = PTR_ERR(obj);
0438 goto out_close;
0439 }
0440
0441 out_close:
0442 close_objects(mem, &objects);
0443 out_put:
0444 intel_memory_region_destroy(mem);
0445 return err;
0446 }
0447
0448 #ifndef SZ_8G
0449 #define SZ_8G BIT_ULL(33)
0450 #endif
0451
0452 static int igt_mock_max_segment(void *arg)
0453 {
0454 struct intel_memory_region *mem = arg;
0455 struct drm_i915_private *i915 = mem->i915;
0456 struct i915_ttm_buddy_resource *res;
0457 struct drm_i915_gem_object *obj;
0458 struct drm_buddy_block *block;
0459 struct drm_buddy *mm;
0460 struct list_head *blocks;
0461 struct scatterlist *sg;
0462 I915_RND_STATE(prng);
0463 LIST_HEAD(objects);
0464 unsigned int max_segment;
0465 unsigned int ps;
0466 u64 size;
0467 int err = 0;
0468
0469
0470
0471
0472
0473
0474
0475
0476 size = SZ_8G;
0477 ps = PAGE_SIZE;
0478 if (i915_prandom_u64_state(&prng) & 1)
0479 ps = SZ_64K;
0480
0481 max_segment = round_down(UINT_MAX, ps);
0482
0483 mem = mock_region_create(i915, 0, size, ps, 0, 0);
0484 if (IS_ERR(mem))
0485 return PTR_ERR(mem);
0486
0487 obj = igt_object_create(mem, &objects, size, 0);
0488 if (IS_ERR(obj)) {
0489 err = PTR_ERR(obj);
0490 goto out_put;
0491 }
0492
0493 res = to_ttm_buddy_resource(obj->mm.res);
0494 blocks = &res->blocks;
0495 mm = res->mm;
0496 size = 0;
0497 list_for_each_entry(block, blocks, link) {
0498 if (drm_buddy_block_size(mm, block) > size)
0499 size = drm_buddy_block_size(mm, block);
0500 }
0501 if (size < max_segment) {
0502 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
0503 __func__, max_segment, size);
0504 err = -EINVAL;
0505 goto out_close;
0506 }
0507
0508 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
0509 dma_addr_t daddr = sg_dma_address(sg);
0510
0511 if (sg->length > max_segment) {
0512 pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
0513 __func__, sg->length, max_segment);
0514 err = -EINVAL;
0515 goto out_close;
0516 }
0517
0518 if (!IS_ALIGNED(daddr, ps)) {
0519 pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
0520 __func__, &daddr, ps);
0521 err = -EINVAL;
0522 goto out_close;
0523 }
0524 }
0525
0526 out_close:
0527 close_objects(mem, &objects);
0528 out_put:
0529 intel_memory_region_destroy(mem);
0530 return err;
0531 }
0532
0533 static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
0534 {
0535 struct intel_memory_region *mr = obj->mm.region;
0536 struct i915_ttm_buddy_resource *bman_res =
0537 to_ttm_buddy_resource(obj->mm.res);
0538 struct drm_buddy *mm = bman_res->mm;
0539 struct drm_buddy_block *block;
0540 u64 total;
0541
0542 total = 0;
0543 list_for_each_entry(block, &bman_res->blocks, link) {
0544 u64 start = drm_buddy_block_offset(block);
0545 u64 end = start + drm_buddy_block_size(mm, block);
0546
0547 if (start < mr->io_size)
0548 total += min_t(u64, end, mr->io_size) - start;
0549 }
0550
0551 return total;
0552 }
0553
0554 static int igt_mock_io_size(void *arg)
0555 {
0556 struct intel_memory_region *mr = arg;
0557 struct drm_i915_private *i915 = mr->i915;
0558 struct drm_i915_gem_object *obj;
0559 u64 mappable_theft_total;
0560 u64 io_size;
0561 u64 total;
0562 u64 ps;
0563 u64 rem;
0564 u64 size;
0565 I915_RND_STATE(prng);
0566 LIST_HEAD(objects);
0567 int err = 0;
0568
0569 ps = SZ_4K;
0570 if (i915_prandom_u64_state(&prng) & 1)
0571 ps = SZ_64K;
0572
0573 div64_u64_rem(i915_prandom_u64_state(&prng), SZ_8G, &total);
0574 total = round_down(total, ps);
0575 total = max_t(u64, total, SZ_1G);
0576
0577 div64_u64_rem(i915_prandom_u64_state(&prng), total - ps, &io_size);
0578 io_size = round_down(io_size, ps);
0579 io_size = max_t(u64, io_size, SZ_256M);
0580
0581 pr_info("%s with ps=%llx, io_size=%llx, total=%llx\n",
0582 __func__, ps, io_size, total);
0583
0584 mr = mock_region_create(i915, 0, total, ps, 0, io_size);
0585 if (IS_ERR(mr)) {
0586 err = PTR_ERR(mr);
0587 goto out_err;
0588 }
0589
0590 mappable_theft_total = 0;
0591 rem = total - io_size;
0592 do {
0593 div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
0594 size = round_down(size, ps);
0595 size = max(size, ps);
0596
0597 obj = igt_object_create(mr, &objects, size,
0598 I915_BO_ALLOC_GPU_ONLY);
0599 if (IS_ERR(obj)) {
0600 pr_err("%s TOPDOWN failed with rem=%llx, size=%llx\n",
0601 __func__, rem, size);
0602 err = PTR_ERR(obj);
0603 goto out_close;
0604 }
0605
0606 mappable_theft_total += igt_object_mappable_total(obj);
0607 rem -= size;
0608 } while (rem);
0609
0610 pr_info("%s mappable theft=(%lluMiB/%lluMiB), total=%lluMiB\n",
0611 __func__,
0612 (u64)mappable_theft_total >> 20,
0613 (u64)io_size >> 20,
0614 (u64)total >> 20);
0615
0616
0617
0618
0619
0620 obj = igt_object_create(mr, &objects, io_size,
0621 I915_BO_ALLOC_GPU_ONLY);
0622 if (IS_ERR(obj)) {
0623 pr_err("%s allocation unexpectedly failed\n", __func__);
0624 err = PTR_ERR(obj);
0625 goto out_close;
0626 }
0627
0628 close_objects(mr, &objects);
0629
0630 rem = io_size;
0631 do {
0632 div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
0633 size = round_down(size, ps);
0634 size = max(size, ps);
0635
0636 obj = igt_object_create(mr, &objects, size, 0);
0637 if (IS_ERR(obj)) {
0638 pr_err("%s MAPPABLE failed with rem=%llx, size=%llx\n",
0639 __func__, rem, size);
0640 err = PTR_ERR(obj);
0641 goto out_close;
0642 }
0643
0644 if (igt_object_mappable_total(obj) != size) {
0645 pr_err("%s allocation is not mappable(size=%llx)\n",
0646 __func__, size);
0647 err = -EINVAL;
0648 goto out_close;
0649 }
0650 rem -= size;
0651 } while (rem);
0652
0653
0654
0655
0656
0657 obj = igt_object_create(mr, &objects, ps, 0);
0658 if (!IS_ERR(obj)) {
0659 pr_err("%s allocation unexpectedly succeeded\n", __func__);
0660 err = -EINVAL;
0661 goto out_close;
0662 }
0663
0664 out_close:
0665 close_objects(mr, &objects);
0666 intel_memory_region_destroy(mr);
0667 out_err:
0668 if (err == -ENOMEM)
0669 err = 0;
0670
0671 return err;
0672 }
0673
0674 static int igt_gpu_write_dw(struct intel_context *ce,
0675 struct i915_vma *vma,
0676 u32 dword,
0677 u32 value)
0678 {
0679 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
0680 vma->size >> PAGE_SHIFT, value);
0681 }
0682
0683 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
0684 {
0685 unsigned long n = obj->base.size >> PAGE_SHIFT;
0686 u32 *ptr;
0687 int err;
0688
0689 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
0690 if (err)
0691 return err;
0692
0693 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
0694 if (IS_ERR(ptr))
0695 return PTR_ERR(ptr);
0696
0697 ptr += dword;
0698 while (n--) {
0699 if (*ptr != val) {
0700 pr_err("base[%u]=%08x, val=%08x\n",
0701 dword, *ptr, val);
0702 err = -EINVAL;
0703 break;
0704 }
0705
0706 ptr += PAGE_SIZE / sizeof(*ptr);
0707 }
0708
0709 i915_gem_object_unpin_map(obj);
0710 return err;
0711 }
0712
0713 static int igt_gpu_write(struct i915_gem_context *ctx,
0714 struct drm_i915_gem_object *obj)
0715 {
0716 struct i915_gem_engines *engines;
0717 struct i915_gem_engines_iter it;
0718 struct i915_address_space *vm;
0719 struct intel_context *ce;
0720 I915_RND_STATE(prng);
0721 IGT_TIMEOUT(end_time);
0722 unsigned int count;
0723 struct i915_vma *vma;
0724 int *order;
0725 int i, n;
0726 int err = 0;
0727
0728 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
0729
0730 n = 0;
0731 count = 0;
0732 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
0733 count++;
0734 if (!intel_engine_can_store_dword(ce->engine))
0735 continue;
0736
0737 vm = ce->vm;
0738 n++;
0739 }
0740 i915_gem_context_unlock_engines(ctx);
0741 if (!n)
0742 return 0;
0743
0744 order = i915_random_order(count * count, &prng);
0745 if (!order)
0746 return -ENOMEM;
0747
0748 vma = i915_vma_instance(obj, vm, NULL);
0749 if (IS_ERR(vma)) {
0750 err = PTR_ERR(vma);
0751 goto out_free;
0752 }
0753
0754 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0755 if (err)
0756 goto out_free;
0757
0758 i = 0;
0759 engines = i915_gem_context_lock_engines(ctx);
0760 do {
0761 u32 rng = prandom_u32_state(&prng);
0762 u32 dword = offset_in_page(rng) / 4;
0763
0764 ce = engines->engines[order[i] % engines->num_engines];
0765 i = (i + 1) % (count * count);
0766 if (!ce || !intel_engine_can_store_dword(ce->engine))
0767 continue;
0768
0769 err = igt_gpu_write_dw(ce, vma, dword, rng);
0770 if (err)
0771 break;
0772
0773 i915_gem_object_lock(obj, NULL);
0774 err = igt_cpu_check(obj, dword, rng);
0775 i915_gem_object_unlock(obj);
0776 if (err)
0777 break;
0778 } while (!__igt_timeout(end_time, NULL));
0779 i915_gem_context_unlock_engines(ctx);
0780
0781 out_free:
0782 kfree(order);
0783
0784 if (err == -ENOMEM)
0785 err = 0;
0786
0787 return err;
0788 }
0789
0790 static int igt_lmem_create(void *arg)
0791 {
0792 struct drm_i915_private *i915 = arg;
0793 struct drm_i915_gem_object *obj;
0794 int err = 0;
0795
0796 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
0797 if (IS_ERR(obj))
0798 return PTR_ERR(obj);
0799
0800 err = i915_gem_object_pin_pages_unlocked(obj);
0801 if (err)
0802 goto out_put;
0803
0804 i915_gem_object_unpin_pages(obj);
0805 out_put:
0806 i915_gem_object_put(obj);
0807
0808 return err;
0809 }
0810
0811 static int igt_lmem_create_with_ps(void *arg)
0812 {
0813 struct drm_i915_private *i915 = arg;
0814 int err = 0;
0815 u32 ps;
0816
0817 for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
0818 struct drm_i915_gem_object *obj;
0819 dma_addr_t daddr;
0820
0821 obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
0822 if (IS_ERR(obj)) {
0823 err = PTR_ERR(obj);
0824 if (err == -ENXIO || err == -E2BIG) {
0825 pr_info("%s not enough lmem for ps(%u) err=%d\n",
0826 __func__, ps, err);
0827 err = 0;
0828 }
0829
0830 break;
0831 }
0832
0833 if (obj->base.size != ps) {
0834 pr_err("%s size(%zu) != ps(%u)\n",
0835 __func__, obj->base.size, ps);
0836 err = -EINVAL;
0837 goto out_put;
0838 }
0839
0840 i915_gem_object_lock(obj, NULL);
0841 err = i915_gem_object_pin_pages(obj);
0842 if (err) {
0843 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
0844 pr_info("%s not enough lmem for ps(%u) err=%d\n",
0845 __func__, ps, err);
0846 err = 0;
0847 }
0848 goto out_put;
0849 }
0850
0851 daddr = i915_gem_object_get_dma_address(obj, 0);
0852 if (!IS_ALIGNED(daddr, ps)) {
0853 pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
0854 __func__, &daddr, ps);
0855 err = -EINVAL;
0856 goto out_unpin;
0857 }
0858
0859 out_unpin:
0860 i915_gem_object_unpin_pages(obj);
0861 __i915_gem_object_put_pages(obj);
0862 out_put:
0863 i915_gem_object_unlock(obj);
0864 i915_gem_object_put(obj);
0865
0866 if (err)
0867 break;
0868 }
0869
0870 return err;
0871 }
0872
0873 static int igt_lmem_create_cleared_cpu(void *arg)
0874 {
0875 struct drm_i915_private *i915 = arg;
0876 I915_RND_STATE(prng);
0877 IGT_TIMEOUT(end_time);
0878 u32 size, i;
0879 int err;
0880
0881 i915_gem_drain_freed_objects(i915);
0882
0883 size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
0884 size = round_up(size, PAGE_SIZE);
0885 i = 0;
0886
0887 do {
0888 struct drm_i915_gem_object *obj;
0889 unsigned int flags;
0890 u32 dword, val;
0891 void *vaddr;
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901 flags = I915_BO_ALLOC_CPU_CLEAR;
0902 if (i & 1)
0903 flags = 0;
0904
0905 obj = i915_gem_object_create_lmem(i915, size, flags);
0906 if (IS_ERR(obj))
0907 return PTR_ERR(obj);
0908
0909 i915_gem_object_lock(obj, NULL);
0910 err = i915_gem_object_pin_pages(obj);
0911 if (err)
0912 goto out_put;
0913
0914 dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
0915 &prng);
0916
0917 if (flags & I915_BO_ALLOC_CPU_CLEAR) {
0918 err = igt_cpu_check(obj, dword, 0);
0919 if (err) {
0920 pr_err("%s failed with size=%u, flags=%u\n",
0921 __func__, size, flags);
0922 goto out_unpin;
0923 }
0924 }
0925
0926 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
0927 if (IS_ERR(vaddr)) {
0928 err = PTR_ERR(vaddr);
0929 goto out_unpin;
0930 }
0931
0932 val = prandom_u32_state(&prng);
0933
0934 memset32(vaddr, val, obj->base.size / sizeof(u32));
0935
0936 i915_gem_object_flush_map(obj);
0937 i915_gem_object_unpin_map(obj);
0938 out_unpin:
0939 i915_gem_object_unpin_pages(obj);
0940 __i915_gem_object_put_pages(obj);
0941 out_put:
0942 i915_gem_object_unlock(obj);
0943 i915_gem_object_put(obj);
0944
0945 if (err)
0946 break;
0947 ++i;
0948 } while (!__igt_timeout(end_time, NULL));
0949
0950 pr_info("%s completed (%u) iterations\n", __func__, i);
0951
0952 return err;
0953 }
0954
0955 static int igt_lmem_write_gpu(void *arg)
0956 {
0957 struct drm_i915_private *i915 = arg;
0958 struct drm_i915_gem_object *obj;
0959 struct i915_gem_context *ctx;
0960 struct file *file;
0961 I915_RND_STATE(prng);
0962 u32 sz;
0963 int err;
0964
0965 file = mock_file(i915);
0966 if (IS_ERR(file))
0967 return PTR_ERR(file);
0968
0969 ctx = live_context(i915, file);
0970 if (IS_ERR(ctx)) {
0971 err = PTR_ERR(ctx);
0972 goto out_file;
0973 }
0974
0975 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
0976
0977 obj = i915_gem_object_create_lmem(i915, sz, 0);
0978 if (IS_ERR(obj)) {
0979 err = PTR_ERR(obj);
0980 goto out_file;
0981 }
0982
0983 err = i915_gem_object_pin_pages_unlocked(obj);
0984 if (err)
0985 goto out_put;
0986
0987 err = igt_gpu_write(ctx, obj);
0988 if (err)
0989 pr_err("igt_gpu_write failed(%d)\n", err);
0990
0991 i915_gem_object_unpin_pages(obj);
0992 out_put:
0993 i915_gem_object_put(obj);
0994 out_file:
0995 fput(file);
0996 return err;
0997 }
0998
0999 static struct intel_engine_cs *
1000 random_engine_class(struct drm_i915_private *i915,
1001 unsigned int class,
1002 struct rnd_state *prng)
1003 {
1004 struct intel_engine_cs *engine;
1005 unsigned int count;
1006
1007 count = 0;
1008 for (engine = intel_engine_lookup_user(i915, class, 0);
1009 engine && engine->uabi_class == class;
1010 engine = rb_entry_safe(rb_next(&engine->uabi_node),
1011 typeof(*engine), uabi_node))
1012 count++;
1013
1014 count = i915_prandom_u32_max_state(count, prng);
1015 return intel_engine_lookup_user(i915, class, count);
1016 }
1017
1018 static int igt_lmem_write_cpu(void *arg)
1019 {
1020 struct drm_i915_private *i915 = arg;
1021 struct drm_i915_gem_object *obj;
1022 I915_RND_STATE(prng);
1023 IGT_TIMEOUT(end_time);
1024 u32 bytes[] = {
1025 0,
1026 sizeof(u32),
1027 sizeof(u64),
1028 64,
1029 PAGE_SIZE,
1030 PAGE_SIZE - sizeof(u32),
1031 PAGE_SIZE - sizeof(u64),
1032 PAGE_SIZE - 64,
1033 };
1034 struct intel_engine_cs *engine;
1035 struct i915_request *rq;
1036 u32 *vaddr;
1037 u32 sz;
1038 u32 i;
1039 int *order;
1040 int count;
1041 int err;
1042
1043 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
1044 if (!engine)
1045 return 0;
1046
1047 pr_info("%s: using %s\n", __func__, engine->name);
1048
1049 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
1050 sz = max_t(u32, 2 * PAGE_SIZE, sz);
1051
1052 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
1053 if (IS_ERR(obj))
1054 return PTR_ERR(obj);
1055
1056 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1057 if (IS_ERR(vaddr)) {
1058 err = PTR_ERR(vaddr);
1059 goto out_put;
1060 }
1061
1062 i915_gem_object_lock(obj, NULL);
1063
1064 err = dma_resv_reserve_fences(obj->base.resv, 1);
1065 if (err) {
1066 i915_gem_object_unlock(obj);
1067 goto out_put;
1068 }
1069
1070
1071 intel_engine_pm_get(engine);
1072 err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
1073 obj->mm.pages->sgl, I915_CACHE_NONE,
1074 true, 0xdeadbeaf, &rq);
1075 if (rq) {
1076 dma_resv_add_fence(obj->base.resv, &rq->fence,
1077 DMA_RESV_USAGE_WRITE);
1078 i915_request_put(rq);
1079 }
1080
1081 intel_engine_pm_put(engine);
1082 if (!err)
1083 err = i915_gem_object_set_to_wc_domain(obj, true);
1084 i915_gem_object_unlock(obj);
1085 if (err)
1086 goto out_unpin;
1087
1088 count = ARRAY_SIZE(bytes);
1089 order = i915_random_order(count * count, &prng);
1090 if (!order) {
1091 err = -ENOMEM;
1092 goto out_unpin;
1093 }
1094
1095
1096 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
1097 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
1098
1099 i = 0;
1100 do {
1101 u32 offset;
1102 u32 align;
1103 u32 dword;
1104 u32 size;
1105 u32 val;
1106
1107 size = bytes[order[i] % count];
1108 i = (i + 1) % (count * count);
1109
1110 align = bytes[order[i] % count];
1111 i = (i + 1) % (count * count);
1112
1113 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
1114
1115 offset = igt_random_offset(&prng, 0, obj->base.size,
1116 size, align);
1117
1118 val = prandom_u32_state(&prng);
1119 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
1120 size / sizeof(u32));
1121
1122
1123
1124
1125
1126 dword = igt_random_offset(&prng, offset,
1127 offset + size,
1128 sizeof(u32), sizeof(u32));
1129 dword /= sizeof(u32);
1130 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
1131 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
1132 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
1133 size, align, offset);
1134 err = -EINVAL;
1135 break;
1136 }
1137 } while (!__igt_timeout(end_time, NULL));
1138
1139 out_unpin:
1140 i915_gem_object_unpin_map(obj);
1141 out_put:
1142 i915_gem_object_put(obj);
1143
1144 return err;
1145 }
1146
1147 static const char *repr_type(u32 type)
1148 {
1149 switch (type) {
1150 case I915_MAP_WB:
1151 return "WB";
1152 case I915_MAP_WC:
1153 return "WC";
1154 }
1155
1156 return "";
1157 }
1158
1159 static struct drm_i915_gem_object *
1160 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
1161 void **out_addr)
1162 {
1163 struct drm_i915_gem_object *obj;
1164 void *addr;
1165
1166 obj = i915_gem_object_create_region(mr, size, 0, 0);
1167 if (IS_ERR(obj)) {
1168 if (PTR_ERR(obj) == -ENOSPC)
1169 return ERR_PTR(-ENODEV);
1170 return obj;
1171 }
1172
1173 addr = i915_gem_object_pin_map_unlocked(obj, type);
1174 if (IS_ERR(addr)) {
1175 i915_gem_object_put(obj);
1176 if (PTR_ERR(addr) == -ENXIO)
1177 return ERR_PTR(-ENODEV);
1178 return addr;
1179 }
1180
1181 *out_addr = addr;
1182 return obj;
1183 }
1184
1185 static int wrap_ktime_compare(const void *A, const void *B)
1186 {
1187 const ktime_t *a = A, *b = B;
1188
1189 return ktime_compare(*a, *b);
1190 }
1191
1192 static void igt_memcpy_long(void *dst, const void *src, size_t size)
1193 {
1194 unsigned long *tmp = dst;
1195 const unsigned long *s = src;
1196
1197 size = size / sizeof(unsigned long);
1198 while (size--)
1199 *tmp++ = *s++;
1200 }
1201
1202 static inline void igt_memcpy(void *dst, const void *src, size_t size)
1203 {
1204 memcpy(dst, src, size);
1205 }
1206
1207 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
1208 {
1209 i915_memcpy_from_wc(dst, src, size);
1210 }
1211
1212 static int _perf_memcpy(struct intel_memory_region *src_mr,
1213 struct intel_memory_region *dst_mr,
1214 u64 size, u32 src_type, u32 dst_type)
1215 {
1216 struct drm_i915_private *i915 = src_mr->i915;
1217 const struct {
1218 const char *name;
1219 void (*copy)(void *dst, const void *src, size_t size);
1220 bool skip;
1221 } tests[] = {
1222 {
1223 "memcpy",
1224 igt_memcpy,
1225 },
1226 {
1227 "memcpy_long",
1228 igt_memcpy_long,
1229 },
1230 {
1231 "memcpy_from_wc",
1232 igt_memcpy_from_wc,
1233 !i915_has_memcpy_from_wc(),
1234 },
1235 };
1236 struct drm_i915_gem_object *src, *dst;
1237 void *src_addr, *dst_addr;
1238 int ret = 0;
1239 int i;
1240
1241 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
1242 if (IS_ERR(src)) {
1243 ret = PTR_ERR(src);
1244 goto out;
1245 }
1246
1247 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
1248 if (IS_ERR(dst)) {
1249 ret = PTR_ERR(dst);
1250 goto out_unpin_src;
1251 }
1252
1253 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
1254 ktime_t t[5];
1255 int pass;
1256
1257 if (tests[i].skip)
1258 continue;
1259
1260 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
1261 ktime_t t0, t1;
1262
1263 t0 = ktime_get();
1264
1265 tests[i].copy(dst_addr, src_addr, size);
1266
1267 t1 = ktime_get();
1268 t[pass] = ktime_sub(t1, t0);
1269 }
1270
1271 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
1272 if (t[0] <= 0) {
1273
1274 pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
1275 __func__,
1276 src_mr->name, repr_type(src_type),
1277 dst_mr->name, repr_type(dst_type),
1278 tests[i].name, size >> 10,
1279 t[0], t[4]);
1280 continue;
1281 }
1282
1283 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
1284 __func__,
1285 src_mr->name, repr_type(src_type),
1286 dst_mr->name, repr_type(dst_type),
1287 tests[i].name, size >> 10,
1288 div64_u64(mul_u32_u32(4 * size,
1289 1000 * 1000 * 1000),
1290 t[1] + 2 * t[2] + t[3]) >> 20);
1291
1292 cond_resched();
1293 }
1294
1295 i915_gem_object_unpin_map(dst);
1296 i915_gem_object_put(dst);
1297 out_unpin_src:
1298 i915_gem_object_unpin_map(src);
1299 i915_gem_object_put(src);
1300
1301 i915_gem_drain_freed_objects(i915);
1302 out:
1303 if (ret == -ENODEV)
1304 ret = 0;
1305
1306 return ret;
1307 }
1308
1309 static int perf_memcpy(void *arg)
1310 {
1311 struct drm_i915_private *i915 = arg;
1312 static const u32 types[] = {
1313 I915_MAP_WB,
1314 I915_MAP_WC,
1315 };
1316 static const u32 sizes[] = {
1317 SZ_4K,
1318 SZ_64K,
1319 SZ_4M,
1320 };
1321 struct intel_memory_region *src_mr, *dst_mr;
1322 int src_id, dst_id;
1323 int i, j, k;
1324 int ret;
1325
1326 for_each_memory_region(src_mr, i915, src_id) {
1327 for_each_memory_region(dst_mr, i915, dst_id) {
1328 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1329 for (j = 0; j < ARRAY_SIZE(types); ++j) {
1330 for (k = 0; k < ARRAY_SIZE(types); ++k) {
1331 ret = _perf_memcpy(src_mr,
1332 dst_mr,
1333 sizes[i],
1334 types[j],
1335 types[k]);
1336 if (ret)
1337 return ret;
1338 }
1339 }
1340 }
1341 }
1342 }
1343
1344 return 0;
1345 }
1346
1347 int intel_memory_region_mock_selftests(void)
1348 {
1349 static const struct i915_subtest tests[] = {
1350 SUBTEST(igt_mock_reserve),
1351 SUBTEST(igt_mock_fill),
1352 SUBTEST(igt_mock_contiguous),
1353 SUBTEST(igt_mock_splintered_region),
1354 SUBTEST(igt_mock_max_segment),
1355 SUBTEST(igt_mock_io_size),
1356 };
1357 struct intel_memory_region *mem;
1358 struct drm_i915_private *i915;
1359 int err;
1360
1361 i915 = mock_gem_device();
1362 if (!i915)
1363 return -ENOMEM;
1364
1365 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
1366 if (IS_ERR(mem)) {
1367 pr_err("failed to create memory region\n");
1368 err = PTR_ERR(mem);
1369 goto out_unref;
1370 }
1371
1372 err = i915_subtests(tests, mem);
1373
1374 intel_memory_region_destroy(mem);
1375 out_unref:
1376 mock_destroy_device(i915);
1377 return err;
1378 }
1379
1380 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1381 {
1382 static const struct i915_subtest tests[] = {
1383 SUBTEST(igt_lmem_create),
1384 SUBTEST(igt_lmem_create_with_ps),
1385 SUBTEST(igt_lmem_create_cleared_cpu),
1386 SUBTEST(igt_lmem_write_cpu),
1387 SUBTEST(igt_lmem_write_gpu),
1388 };
1389
1390 if (!HAS_LMEM(i915)) {
1391 pr_info("device lacks LMEM support, skipping\n");
1392 return 0;
1393 }
1394
1395 if (intel_gt_is_wedged(to_gt(i915)))
1396 return 0;
1397
1398 return i915_live_subtests(tests, i915);
1399 }
1400
1401 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1402 {
1403 static const struct i915_subtest tests[] = {
1404 SUBTEST(perf_memcpy),
1405 };
1406
1407 if (intel_gt_is_wedged(to_gt(i915)))
1408 return 0;
1409
1410 return i915_live_subtests(tests, i915);
1411 }