0001
0002
0003
0004
0005
0006
0007 #include <linux/prime_numbers.h>
0008 #include <linux/string_helpers.h>
0009 #include <linux/swap.h>
0010
0011 #include "i915_selftest.h"
0012
0013 #include "gem/i915_gem_internal.h"
0014 #include "gem/i915_gem_lmem.h"
0015 #include "gem/i915_gem_pm.h"
0016 #include "gem/i915_gem_region.h"
0017
0018 #include "gt/intel_gt.h"
0019
0020 #include "igt_gem_utils.h"
0021 #include "mock_context.h"
0022
0023 #include "selftests/mock_drm.h"
0024 #include "selftests/mock_gem_device.h"
0025 #include "selftests/mock_region.h"
0026 #include "selftests/i915_random.h"
0027
0028 static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915,
0029 struct file *file)
0030 {
0031 struct i915_gem_context *ctx = live_context(i915, file);
0032 struct i915_address_space *vm;
0033
0034 if (IS_ERR(ctx))
0035 return ctx;
0036
0037 vm = ctx->vm;
0038 if (vm)
0039 WRITE_ONCE(vm->scrub_64K, true);
0040
0041 return ctx;
0042 }
0043
0044 static const unsigned int page_sizes[] = {
0045 I915_GTT_PAGE_SIZE_2M,
0046 I915_GTT_PAGE_SIZE_64K,
0047 I915_GTT_PAGE_SIZE_4K,
0048 };
0049
0050 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
0051 u64 rem)
0052 {
0053 int i;
0054
0055 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
0056 unsigned int page_size = page_sizes[i];
0057
0058 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
0059 return page_size;
0060 }
0061
0062 return 0;
0063 }
0064
0065 static void huge_pages_free_pages(struct sg_table *st)
0066 {
0067 struct scatterlist *sg;
0068
0069 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
0070 if (sg_page(sg))
0071 __free_pages(sg_page(sg), get_order(sg->length));
0072 }
0073
0074 sg_free_table(st);
0075 kfree(st);
0076 }
0077
0078 static int get_huge_pages(struct drm_i915_gem_object *obj)
0079 {
0080 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
0081 unsigned int page_mask = obj->mm.page_mask;
0082 struct sg_table *st;
0083 struct scatterlist *sg;
0084 unsigned int sg_page_sizes;
0085 u64 rem;
0086
0087 st = kmalloc(sizeof(*st), GFP);
0088 if (!st)
0089 return -ENOMEM;
0090
0091 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
0092 kfree(st);
0093 return -ENOMEM;
0094 }
0095
0096 rem = obj->base.size;
0097 sg = st->sgl;
0098 st->nents = 0;
0099 sg_page_sizes = 0;
0100
0101
0102
0103
0104
0105
0106 do {
0107 unsigned int bit = ilog2(page_mask);
0108 unsigned int page_size = BIT(bit);
0109 int order = get_order(page_size);
0110
0111 do {
0112 struct page *page;
0113
0114 GEM_BUG_ON(order >= MAX_ORDER);
0115 page = alloc_pages(GFP | __GFP_ZERO, order);
0116 if (!page)
0117 goto err;
0118
0119 sg_set_page(sg, page, page_size, 0);
0120 sg_page_sizes |= page_size;
0121 st->nents++;
0122
0123 rem -= page_size;
0124 if (!rem) {
0125 sg_mark_end(sg);
0126 break;
0127 }
0128
0129 sg = __sg_next(sg);
0130 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
0131
0132 page_mask &= (page_size-1);
0133 } while (page_mask);
0134
0135 if (i915_gem_gtt_prepare_pages(obj, st))
0136 goto err;
0137
0138 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
0139 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
0140
0141 return 0;
0142
0143 err:
0144 sg_set_page(sg, NULL, 0, 0);
0145 sg_mark_end(sg);
0146 huge_pages_free_pages(st);
0147
0148 return -ENOMEM;
0149 }
0150
0151 static void put_huge_pages(struct drm_i915_gem_object *obj,
0152 struct sg_table *pages)
0153 {
0154 i915_gem_gtt_finish_pages(obj, pages);
0155 huge_pages_free_pages(pages);
0156
0157 obj->mm.dirty = false;
0158
0159 __start_cpu_write(obj);
0160 }
0161
0162 static const struct drm_i915_gem_object_ops huge_page_ops = {
0163 .name = "huge-gem",
0164 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0165 .get_pages = get_huge_pages,
0166 .put_pages = put_huge_pages,
0167 };
0168
0169 static struct drm_i915_gem_object *
0170 huge_pages_object(struct drm_i915_private *i915,
0171 u64 size,
0172 unsigned int page_mask)
0173 {
0174 static struct lock_class_key lock_class;
0175 struct drm_i915_gem_object *obj;
0176 unsigned int cache_level;
0177
0178 GEM_BUG_ON(!size);
0179 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
0180
0181 if (size >> PAGE_SHIFT > INT_MAX)
0182 return ERR_PTR(-E2BIG);
0183
0184 if (overflows_type(size, obj->base.size))
0185 return ERR_PTR(-E2BIG);
0186
0187 obj = i915_gem_object_alloc();
0188 if (!obj)
0189 return ERR_PTR(-ENOMEM);
0190
0191 drm_gem_private_object_init(&i915->drm, &obj->base, size);
0192 i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
0193 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
0194 i915_gem_object_set_volatile(obj);
0195
0196 obj->write_domain = I915_GEM_DOMAIN_CPU;
0197 obj->read_domains = I915_GEM_DOMAIN_CPU;
0198
0199 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
0200 i915_gem_object_set_cache_coherency(obj, cache_level);
0201
0202 obj->mm.page_mask = page_mask;
0203
0204 return obj;
0205 }
0206
0207 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
0208 {
0209 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0210 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
0211 struct sg_table *st;
0212 struct scatterlist *sg;
0213 unsigned int sg_page_sizes;
0214 u64 rem;
0215
0216 st = kmalloc(sizeof(*st), GFP);
0217 if (!st)
0218 return -ENOMEM;
0219
0220 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
0221 kfree(st);
0222 return -ENOMEM;
0223 }
0224
0225
0226 rem = obj->base.size;
0227 sg = st->sgl;
0228 st->nents = 0;
0229 sg_page_sizes = 0;
0230 do {
0231 unsigned int page_size = get_largest_page_size(i915, rem);
0232 unsigned int len = min(page_size * div_u64(rem, page_size),
0233 max_len);
0234
0235 GEM_BUG_ON(!page_size);
0236
0237 sg->offset = 0;
0238 sg->length = len;
0239 sg_dma_len(sg) = len;
0240 sg_dma_address(sg) = page_size;
0241
0242 sg_page_sizes |= len;
0243
0244 st->nents++;
0245
0246 rem -= len;
0247 if (!rem) {
0248 sg_mark_end(sg);
0249 break;
0250 }
0251
0252 sg = sg_next(sg);
0253 } while (1);
0254
0255 i915_sg_trim(st);
0256
0257 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
0258
0259 return 0;
0260 }
0261
0262 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
0263 {
0264 struct drm_i915_private *i915 = to_i915(obj->base.dev);
0265 struct sg_table *st;
0266 struct scatterlist *sg;
0267 unsigned int page_size;
0268
0269 st = kmalloc(sizeof(*st), GFP);
0270 if (!st)
0271 return -ENOMEM;
0272
0273 if (sg_alloc_table(st, 1, GFP)) {
0274 kfree(st);
0275 return -ENOMEM;
0276 }
0277
0278 sg = st->sgl;
0279 st->nents = 1;
0280
0281 page_size = get_largest_page_size(i915, obj->base.size);
0282 GEM_BUG_ON(!page_size);
0283
0284 sg->offset = 0;
0285 sg->length = obj->base.size;
0286 sg_dma_len(sg) = obj->base.size;
0287 sg_dma_address(sg) = page_size;
0288
0289 __i915_gem_object_set_pages(obj, st, sg->length);
0290
0291 return 0;
0292 #undef GFP
0293 }
0294
0295 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
0296 struct sg_table *pages)
0297 {
0298 sg_free_table(pages);
0299 kfree(pages);
0300 }
0301
0302 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
0303 struct sg_table *pages)
0304 {
0305 fake_free_huge_pages(obj, pages);
0306 obj->mm.dirty = false;
0307 }
0308
0309 static const struct drm_i915_gem_object_ops fake_ops = {
0310 .name = "fake-gem",
0311 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0312 .get_pages = fake_get_huge_pages,
0313 .put_pages = fake_put_huge_pages,
0314 };
0315
0316 static const struct drm_i915_gem_object_ops fake_ops_single = {
0317 .name = "fake-gem",
0318 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
0319 .get_pages = fake_get_huge_pages_single,
0320 .put_pages = fake_put_huge_pages,
0321 };
0322
0323 static struct drm_i915_gem_object *
0324 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
0325 {
0326 static struct lock_class_key lock_class;
0327 struct drm_i915_gem_object *obj;
0328
0329 GEM_BUG_ON(!size);
0330 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
0331
0332 if (size >> PAGE_SHIFT > UINT_MAX)
0333 return ERR_PTR(-E2BIG);
0334
0335 if (overflows_type(size, obj->base.size))
0336 return ERR_PTR(-E2BIG);
0337
0338 obj = i915_gem_object_alloc();
0339 if (!obj)
0340 return ERR_PTR(-ENOMEM);
0341
0342 drm_gem_private_object_init(&i915->drm, &obj->base, size);
0343
0344 if (single)
0345 i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
0346 else
0347 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
0348
0349 i915_gem_object_set_volatile(obj);
0350
0351 obj->write_domain = I915_GEM_DOMAIN_CPU;
0352 obj->read_domains = I915_GEM_DOMAIN_CPU;
0353 obj->cache_level = I915_CACHE_NONE;
0354
0355 return obj;
0356 }
0357
0358 static int igt_check_page_sizes(struct i915_vma *vma)
0359 {
0360 struct drm_i915_private *i915 = vma->vm->i915;
0361 unsigned int supported = INTEL_INFO(i915)->page_sizes;
0362 struct drm_i915_gem_object *obj = vma->obj;
0363 int err;
0364
0365
0366 err = i915_vma_sync(vma);
0367 if (err)
0368 return err;
0369
0370 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
0371 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
0372 vma->page_sizes.sg & ~supported, supported);
0373 err = -EINVAL;
0374 }
0375
0376 if (!HAS_PAGE_SIZES(i915, vma->resource->page_sizes_gtt)) {
0377 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
0378 vma->resource->page_sizes_gtt & ~supported, supported);
0379 err = -EINVAL;
0380 }
0381
0382 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
0383 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
0384 vma->page_sizes.phys, obj->mm.page_sizes.phys);
0385 err = -EINVAL;
0386 }
0387
0388 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
0389 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
0390 vma->page_sizes.sg, obj->mm.page_sizes.sg);
0391 err = -EINVAL;
0392 }
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406 if (i915_gem_object_is_lmem(obj) &&
0407 IS_ALIGNED(vma->node.start, SZ_2M) &&
0408 vma->page_sizes.sg & SZ_2M &&
0409 vma->resource->page_sizes_gtt < SZ_2M) {
0410 pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
0411 vma->page_sizes.sg, vma->resource->page_sizes_gtt);
0412 err = -EINVAL;
0413 }
0414
0415 return err;
0416 }
0417
0418 static int igt_mock_exhaust_device_supported_pages(void *arg)
0419 {
0420 struct i915_ppgtt *ppgtt = arg;
0421 struct drm_i915_private *i915 = ppgtt->vm.i915;
0422 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
0423 struct drm_i915_gem_object *obj;
0424 struct i915_vma *vma;
0425 int i, j, single;
0426 int err;
0427
0428
0429
0430
0431
0432
0433 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
0434 unsigned int combination = SZ_4K;
0435
0436 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
0437 if (i & BIT(j))
0438 combination |= page_sizes[j];
0439 }
0440
0441 mkwrite_device_info(i915)->page_sizes = combination;
0442
0443 for (single = 0; single <= 1; ++single) {
0444 obj = fake_huge_pages_object(i915, combination, !!single);
0445 if (IS_ERR(obj)) {
0446 err = PTR_ERR(obj);
0447 goto out_device;
0448 }
0449
0450 if (obj->base.size != combination) {
0451 pr_err("obj->base.size=%zu, expected=%u\n",
0452 obj->base.size, combination);
0453 err = -EINVAL;
0454 goto out_put;
0455 }
0456
0457 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
0458 if (IS_ERR(vma)) {
0459 err = PTR_ERR(vma);
0460 goto out_put;
0461 }
0462
0463 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0464 if (err)
0465 goto out_put;
0466
0467 err = igt_check_page_sizes(vma);
0468
0469 if (vma->page_sizes.sg != combination) {
0470 pr_err("page_sizes.sg=%u, expected=%u\n",
0471 vma->page_sizes.sg, combination);
0472 err = -EINVAL;
0473 }
0474
0475 i915_vma_unpin(vma);
0476 i915_gem_object_put(obj);
0477
0478 if (err)
0479 goto out_device;
0480 }
0481 }
0482
0483 goto out_device;
0484
0485 out_put:
0486 i915_gem_object_put(obj);
0487 out_device:
0488 mkwrite_device_info(i915)->page_sizes = saved_mask;
0489
0490 return err;
0491 }
0492
0493 static int igt_mock_memory_region_huge_pages(void *arg)
0494 {
0495 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
0496 struct i915_ppgtt *ppgtt = arg;
0497 struct drm_i915_private *i915 = ppgtt->vm.i915;
0498 unsigned long supported = INTEL_INFO(i915)->page_sizes;
0499 struct intel_memory_region *mem;
0500 struct drm_i915_gem_object *obj;
0501 struct i915_vma *vma;
0502 int bit;
0503 int err = 0;
0504
0505 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
0506 if (IS_ERR(mem)) {
0507 pr_err("%s failed to create memory region\n", __func__);
0508 return PTR_ERR(mem);
0509 }
0510
0511 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
0512 unsigned int page_size = BIT(bit);
0513 resource_size_t phys;
0514 int i;
0515
0516 for (i = 0; i < ARRAY_SIZE(flags); ++i) {
0517 obj = i915_gem_object_create_region(mem,
0518 page_size, page_size,
0519 flags[i]);
0520 if (IS_ERR(obj)) {
0521 err = PTR_ERR(obj);
0522 goto out_region;
0523 }
0524
0525 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
0526 if (IS_ERR(vma)) {
0527 err = PTR_ERR(vma);
0528 goto out_put;
0529 }
0530
0531 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0532 if (err)
0533 goto out_put;
0534
0535 err = igt_check_page_sizes(vma);
0536 if (err)
0537 goto out_unpin;
0538
0539 phys = i915_gem_object_get_dma_address(obj, 0);
0540 if (!IS_ALIGNED(phys, page_size)) {
0541 pr_err("%s addr misaligned(%pa) page_size=%u\n",
0542 __func__, &phys, page_size);
0543 err = -EINVAL;
0544 goto out_unpin;
0545 }
0546
0547 if (vma->resource->page_sizes_gtt != page_size) {
0548 pr_err("%s page_sizes.gtt=%u, expected=%u\n",
0549 __func__, vma->resource->page_sizes_gtt,
0550 page_size);
0551 err = -EINVAL;
0552 goto out_unpin;
0553 }
0554
0555 i915_vma_unpin(vma);
0556 __i915_gem_object_put_pages(obj);
0557 i915_gem_object_put(obj);
0558 }
0559 }
0560
0561 goto out_region;
0562
0563 out_unpin:
0564 i915_vma_unpin(vma);
0565 out_put:
0566 i915_gem_object_put(obj);
0567 out_region:
0568 intel_memory_region_destroy(mem);
0569 return err;
0570 }
0571
0572 static int igt_mock_ppgtt_misaligned_dma(void *arg)
0573 {
0574 struct i915_ppgtt *ppgtt = arg;
0575 struct drm_i915_private *i915 = ppgtt->vm.i915;
0576 unsigned long supported = INTEL_INFO(i915)->page_sizes;
0577 struct drm_i915_gem_object *obj;
0578 int bit;
0579 int err;
0580
0581
0582
0583
0584
0585
0586
0587 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
0588
0589 for_each_set_bit_from(bit, &supported,
0590 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
0591 IGT_TIMEOUT(end_time);
0592 unsigned int page_size = BIT(bit);
0593 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
0594 unsigned int offset;
0595 unsigned int size =
0596 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
0597 struct i915_vma *vma;
0598
0599 obj = fake_huge_pages_object(i915, size, true);
0600 if (IS_ERR(obj))
0601 return PTR_ERR(obj);
0602
0603 if (obj->base.size != size) {
0604 pr_err("obj->base.size=%zu, expected=%u\n",
0605 obj->base.size, size);
0606 err = -EINVAL;
0607 goto out_put;
0608 }
0609
0610 err = i915_gem_object_pin_pages_unlocked(obj);
0611 if (err)
0612 goto out_put;
0613
0614
0615 obj->mm.page_sizes.sg = page_size;
0616
0617 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
0618 if (IS_ERR(vma)) {
0619 err = PTR_ERR(vma);
0620 goto out_unpin;
0621 }
0622
0623 err = i915_vma_pin(vma, 0, 0, flags);
0624 if (err)
0625 goto out_unpin;
0626
0627
0628 err = igt_check_page_sizes(vma);
0629
0630 if (vma->resource->page_sizes_gtt != page_size) {
0631 pr_err("page_sizes.gtt=%u, expected %u\n",
0632 vma->resource->page_sizes_gtt, page_size);
0633 err = -EINVAL;
0634 }
0635
0636 i915_vma_unpin(vma);
0637
0638 if (err)
0639 goto out_unpin;
0640
0641
0642
0643
0644
0645
0646 for (offset = 4096; offset < page_size; offset += 4096) {
0647 err = i915_vma_unbind_unlocked(vma);
0648 if (err)
0649 goto out_unpin;
0650
0651 err = i915_vma_pin(vma, 0, 0, flags | offset);
0652 if (err)
0653 goto out_unpin;
0654
0655 err = igt_check_page_sizes(vma);
0656
0657 if (vma->resource->page_sizes_gtt != I915_GTT_PAGE_SIZE_4K) {
0658 pr_err("page_sizes.gtt=%u, expected %llu\n",
0659 vma->resource->page_sizes_gtt,
0660 I915_GTT_PAGE_SIZE_4K);
0661 err = -EINVAL;
0662 }
0663
0664 i915_vma_unpin(vma);
0665
0666 if (err)
0667 goto out_unpin;
0668
0669 if (igt_timeout(end_time,
0670 "%s timed out at offset %x with page-size %x\n",
0671 __func__, offset, page_size))
0672 break;
0673 }
0674
0675 i915_gem_object_lock(obj, NULL);
0676 i915_gem_object_unpin_pages(obj);
0677 __i915_gem_object_put_pages(obj);
0678 i915_gem_object_unlock(obj);
0679 i915_gem_object_put(obj);
0680 }
0681
0682 return 0;
0683
0684 out_unpin:
0685 i915_gem_object_lock(obj, NULL);
0686 i915_gem_object_unpin_pages(obj);
0687 i915_gem_object_unlock(obj);
0688 out_put:
0689 i915_gem_object_put(obj);
0690
0691 return err;
0692 }
0693
0694 static void close_object_list(struct list_head *objects,
0695 struct i915_ppgtt *ppgtt)
0696 {
0697 struct drm_i915_gem_object *obj, *on;
0698
0699 list_for_each_entry_safe(obj, on, objects, st_link) {
0700 list_del(&obj->st_link);
0701 i915_gem_object_lock(obj, NULL);
0702 i915_gem_object_unpin_pages(obj);
0703 __i915_gem_object_put_pages(obj);
0704 i915_gem_object_unlock(obj);
0705 i915_gem_object_put(obj);
0706 }
0707 }
0708
0709 static int igt_mock_ppgtt_huge_fill(void *arg)
0710 {
0711 struct i915_ppgtt *ppgtt = arg;
0712 struct drm_i915_private *i915 = ppgtt->vm.i915;
0713 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
0714 unsigned long page_num;
0715 bool single = false;
0716 LIST_HEAD(objects);
0717 IGT_TIMEOUT(end_time);
0718 int err = -ENODEV;
0719
0720 for_each_prime_number_from(page_num, 1, max_pages) {
0721 struct drm_i915_gem_object *obj;
0722 u64 size = page_num << PAGE_SHIFT;
0723 struct i915_vma *vma;
0724 unsigned int expected_gtt = 0;
0725 int i;
0726
0727 obj = fake_huge_pages_object(i915, size, single);
0728 if (IS_ERR(obj)) {
0729 err = PTR_ERR(obj);
0730 break;
0731 }
0732
0733 if (obj->base.size != size) {
0734 pr_err("obj->base.size=%zd, expected=%llu\n",
0735 obj->base.size, size);
0736 i915_gem_object_put(obj);
0737 err = -EINVAL;
0738 break;
0739 }
0740
0741 err = i915_gem_object_pin_pages_unlocked(obj);
0742 if (err) {
0743 i915_gem_object_put(obj);
0744 break;
0745 }
0746
0747 list_add(&obj->st_link, &objects);
0748
0749 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
0750 if (IS_ERR(vma)) {
0751 err = PTR_ERR(vma);
0752 break;
0753 }
0754
0755 err = i915_vma_pin(vma, 0, 0, PIN_USER);
0756 if (err)
0757 break;
0758
0759 err = igt_check_page_sizes(vma);
0760 if (err) {
0761 i915_vma_unpin(vma);
0762 break;
0763 }
0764
0765
0766
0767
0768
0769
0770 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
0771 unsigned int page_size = page_sizes[i];
0772
0773 if (HAS_PAGE_SIZES(i915, page_size) &&
0774 size >= page_size) {
0775 expected_gtt |= page_size;
0776 size &= page_size-1;
0777 }
0778 }
0779
0780 GEM_BUG_ON(!expected_gtt);
0781 GEM_BUG_ON(size);
0782
0783 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
0784 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
0785
0786 i915_vma_unpin(vma);
0787
0788 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
0789 if (!IS_ALIGNED(vma->node.start,
0790 I915_GTT_PAGE_SIZE_2M)) {
0791 pr_err("node.start(%llx) not aligned to 2M\n",
0792 vma->node.start);
0793 err = -EINVAL;
0794 break;
0795 }
0796
0797 if (!IS_ALIGNED(vma->node.size,
0798 I915_GTT_PAGE_SIZE_2M)) {
0799 pr_err("node.size(%llx) not aligned to 2M\n",
0800 vma->node.size);
0801 err = -EINVAL;
0802 break;
0803 }
0804 }
0805
0806 if (vma->resource->page_sizes_gtt != expected_gtt) {
0807 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
0808 vma->resource->page_sizes_gtt, expected_gtt,
0809 obj->base.size, str_yes_no(!!single));
0810 err = -EINVAL;
0811 break;
0812 }
0813
0814 if (igt_timeout(end_time,
0815 "%s timed out at size %zd\n",
0816 __func__, obj->base.size))
0817 break;
0818
0819 single = !single;
0820 }
0821
0822 close_object_list(&objects, ppgtt);
0823
0824 if (err == -ENOMEM || err == -ENOSPC)
0825 err = 0;
0826
0827 return err;
0828 }
0829
0830 static int igt_mock_ppgtt_64K(void *arg)
0831 {
0832 struct i915_ppgtt *ppgtt = arg;
0833 struct drm_i915_private *i915 = ppgtt->vm.i915;
0834 struct drm_i915_gem_object *obj;
0835 const struct object_info {
0836 unsigned int size;
0837 unsigned int gtt;
0838 unsigned int offset;
0839 } objects[] = {
0840
0841 {
0842 .size = SZ_64K,
0843 .gtt = I915_GTT_PAGE_SIZE_64K,
0844 .offset = 0,
0845 },
0846 {
0847 .size = SZ_64K + SZ_4K,
0848 .gtt = I915_GTT_PAGE_SIZE_4K,
0849 .offset = 0,
0850 },
0851 {
0852 .size = SZ_64K - SZ_4K,
0853 .gtt = I915_GTT_PAGE_SIZE_4K,
0854 .offset = 0,
0855 },
0856 {
0857 .size = SZ_2M,
0858 .gtt = I915_GTT_PAGE_SIZE_64K,
0859 .offset = 0,
0860 },
0861 {
0862 .size = SZ_2M - SZ_4K,
0863 .gtt = I915_GTT_PAGE_SIZE_4K,
0864 .offset = 0,
0865 },
0866 {
0867 .size = SZ_2M + SZ_4K,
0868 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
0869 .offset = 0,
0870 },
0871 {
0872 .size = SZ_2M + SZ_64K,
0873 .gtt = I915_GTT_PAGE_SIZE_64K,
0874 .offset = 0,
0875 },
0876 {
0877 .size = SZ_2M - SZ_64K,
0878 .gtt = I915_GTT_PAGE_SIZE_64K,
0879 .offset = 0,
0880 },
0881
0882 {
0883 .size = SZ_64K,
0884 .offset = SZ_2M,
0885 .gtt = I915_GTT_PAGE_SIZE_4K,
0886 },
0887 {
0888 .size = SZ_128K,
0889 .offset = SZ_2M - SZ_64K,
0890 .gtt = I915_GTT_PAGE_SIZE_4K,
0891 },
0892 };
0893 struct i915_vma *vma;
0894 int i, single;
0895 int err;
0896
0897
0898
0899
0900
0901
0902
0903 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
0904 return 0;
0905
0906 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
0907 unsigned int size = objects[i].size;
0908 unsigned int expected_gtt = objects[i].gtt;
0909 unsigned int offset = objects[i].offset;
0910 unsigned int flags = PIN_USER;
0911
0912 for (single = 0; single <= 1; single++) {
0913 obj = fake_huge_pages_object(i915, size, !!single);
0914 if (IS_ERR(obj))
0915 return PTR_ERR(obj);
0916
0917 err = i915_gem_object_pin_pages_unlocked(obj);
0918 if (err)
0919 goto out_object_put;
0920
0921
0922
0923
0924
0925 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
0926
0927 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
0928 if (IS_ERR(vma)) {
0929 err = PTR_ERR(vma);
0930 goto out_object_unpin;
0931 }
0932
0933 if (offset)
0934 flags |= PIN_OFFSET_FIXED | offset;
0935
0936 err = i915_vma_pin(vma, 0, 0, flags);
0937 if (err)
0938 goto out_object_unpin;
0939
0940 err = igt_check_page_sizes(vma);
0941 if (err)
0942 goto out_vma_unpin;
0943
0944 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
0945 if (!IS_ALIGNED(vma->node.start,
0946 I915_GTT_PAGE_SIZE_2M)) {
0947 pr_err("node.start(%llx) not aligned to 2M\n",
0948 vma->node.start);
0949 err = -EINVAL;
0950 goto out_vma_unpin;
0951 }
0952
0953 if (!IS_ALIGNED(vma->node.size,
0954 I915_GTT_PAGE_SIZE_2M)) {
0955 pr_err("node.size(%llx) not aligned to 2M\n",
0956 vma->node.size);
0957 err = -EINVAL;
0958 goto out_vma_unpin;
0959 }
0960 }
0961
0962 if (vma->resource->page_sizes_gtt != expected_gtt) {
0963 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
0964 vma->resource->page_sizes_gtt,
0965 expected_gtt, i, str_yes_no(!!single));
0966 err = -EINVAL;
0967 goto out_vma_unpin;
0968 }
0969
0970 i915_vma_unpin(vma);
0971 i915_gem_object_lock(obj, NULL);
0972 i915_gem_object_unpin_pages(obj);
0973 __i915_gem_object_put_pages(obj);
0974 i915_gem_object_unlock(obj);
0975 i915_gem_object_put(obj);
0976
0977 i915_gem_drain_freed_objects(i915);
0978 }
0979 }
0980
0981 return 0;
0982
0983 out_vma_unpin:
0984 i915_vma_unpin(vma);
0985 out_object_unpin:
0986 i915_gem_object_lock(obj, NULL);
0987 i915_gem_object_unpin_pages(obj);
0988 i915_gem_object_unlock(obj);
0989 out_object_put:
0990 i915_gem_object_put(obj);
0991
0992 return err;
0993 }
0994
0995 static int gpu_write(struct intel_context *ce,
0996 struct i915_vma *vma,
0997 u32 dw,
0998 u32 val)
0999 {
1000 int err;
1001
1002 i915_gem_object_lock(vma->obj, NULL);
1003 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1004 i915_gem_object_unlock(vma->obj);
1005 if (err)
1006 return err;
1007
1008 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
1009 vma->size >> PAGE_SHIFT, val);
1010 }
1011
1012 static int
1013 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1014 {
1015 unsigned int needs_flush;
1016 unsigned long n;
1017 int err;
1018
1019 i915_gem_object_lock(obj, NULL);
1020 err = i915_gem_object_prepare_read(obj, &needs_flush);
1021 if (err)
1022 goto err_unlock;
1023
1024 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1025 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1026
1027 if (needs_flush & CLFLUSH_BEFORE)
1028 drm_clflush_virt_range(ptr, PAGE_SIZE);
1029
1030 if (ptr[dword] != val) {
1031 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1032 n, dword, ptr[dword], val);
1033 kunmap_atomic(ptr);
1034 err = -EINVAL;
1035 break;
1036 }
1037
1038 kunmap_atomic(ptr);
1039 }
1040
1041 i915_gem_object_finish_access(obj);
1042 err_unlock:
1043 i915_gem_object_unlock(obj);
1044
1045 return err;
1046 }
1047
1048 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1049 {
1050 unsigned long n = obj->base.size >> PAGE_SHIFT;
1051 u32 *ptr;
1052 int err;
1053
1054 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1055 if (err)
1056 return err;
1057
1058 ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1059 if (IS_ERR(ptr))
1060 return PTR_ERR(ptr);
1061
1062 ptr += dword;
1063 while (n--) {
1064 if (*ptr != val) {
1065 pr_err("base[%u]=%08x, val=%08x\n",
1066 dword, *ptr, val);
1067 err = -EINVAL;
1068 break;
1069 }
1070
1071 ptr += PAGE_SIZE / sizeof(*ptr);
1072 }
1073
1074 i915_gem_object_unpin_map(obj);
1075 return err;
1076 }
1077
1078 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1079 {
1080 if (i915_gem_object_has_struct_page(obj))
1081 return __cpu_check_shmem(obj, dword, val);
1082 else
1083 return __cpu_check_vmap(obj, dword, val);
1084 }
1085
1086 static int __igt_write_huge(struct intel_context *ce,
1087 struct drm_i915_gem_object *obj,
1088 u64 size, u64 offset,
1089 u32 dword, u32 val)
1090 {
1091 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1092 struct i915_vma *vma;
1093 int err;
1094
1095 vma = i915_vma_instance(obj, ce->vm, NULL);
1096 if (IS_ERR(vma))
1097 return PTR_ERR(vma);
1098
1099 err = i915_vma_pin(vma, size, 0, flags | offset);
1100 if (err) {
1101
1102
1103
1104
1105 if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1106 err = 0;
1107
1108 return err;
1109 }
1110
1111 err = igt_check_page_sizes(vma);
1112 if (err)
1113 goto out_vma_unpin;
1114
1115 err = gpu_write(ce, vma, dword, val);
1116 if (err) {
1117 pr_err("gpu-write failed at offset=%llx\n", offset);
1118 goto out_vma_unpin;
1119 }
1120
1121 err = cpu_check(obj, dword, val);
1122 if (err) {
1123 pr_err("cpu-check failed at offset=%llx\n", offset);
1124 goto out_vma_unpin;
1125 }
1126
1127 out_vma_unpin:
1128 i915_vma_unpin(vma);
1129 return err;
1130 }
1131
1132 static int igt_write_huge(struct drm_i915_private *i915,
1133 struct drm_i915_gem_object *obj)
1134 {
1135 struct i915_gem_engines *engines;
1136 struct i915_gem_engines_iter it;
1137 struct intel_context *ce;
1138 I915_RND_STATE(prng);
1139 IGT_TIMEOUT(end_time);
1140 unsigned int max_page_size;
1141 unsigned int count;
1142 struct i915_gem_context *ctx;
1143 struct file *file;
1144 u64 max;
1145 u64 num;
1146 u64 size;
1147 int *order;
1148 int i, n;
1149 int err = 0;
1150
1151 file = mock_file(i915);
1152 if (IS_ERR(file))
1153 return PTR_ERR(file);
1154
1155 ctx = hugepage_ctx(i915, file);
1156 if (IS_ERR(ctx)) {
1157 err = PTR_ERR(ctx);
1158 goto out;
1159 }
1160
1161 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1162
1163 size = obj->base.size;
1164 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1165 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1166
1167 n = 0;
1168 count = 0;
1169 max = U64_MAX;
1170 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1171 count++;
1172 if (!intel_engine_can_store_dword(ce->engine))
1173 continue;
1174
1175 max = min(max, ce->vm->total);
1176 n++;
1177 }
1178 i915_gem_context_unlock_engines(ctx);
1179 if (!n)
1180 goto out;
1181
1182
1183
1184
1185
1186
1187 order = i915_random_order(count * count, &prng);
1188 if (!order)
1189 return -ENOMEM;
1190
1191 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1192 max = div_u64(max - size, max_page_size);
1193
1194
1195
1196
1197
1198
1199 i = 0;
1200 engines = i915_gem_context_lock_engines(ctx);
1201 for_each_prime_number_from(num, 0, max) {
1202 u64 offset_low = num * max_page_size;
1203 u64 offset_high = (max - num) * max_page_size;
1204 u32 dword = offset_in_page(num) / 4;
1205 struct intel_context *ce;
1206
1207 ce = engines->engines[order[i] % engines->num_engines];
1208 i = (i + 1) % (count * count);
1209 if (!ce || !intel_engine_can_store_dword(ce->engine))
1210 continue;
1211
1212
1213
1214
1215
1216
1217
1218 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1219 offset_low = round_down(offset_low,
1220 I915_GTT_PAGE_SIZE_2M);
1221
1222 err = __igt_write_huge(ce, obj, size, offset_low,
1223 dword, num + 1);
1224 if (err)
1225 break;
1226
1227 err = __igt_write_huge(ce, obj, size, offset_high,
1228 dword, num + 1);
1229 if (err)
1230 break;
1231
1232 if (igt_timeout(end_time,
1233 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1234 __func__, ce->engine->name, offset_low, offset_high,
1235 max_page_size))
1236 break;
1237 }
1238 i915_gem_context_unlock_engines(ctx);
1239
1240 kfree(order);
1241
1242 out:
1243 fput(file);
1244 return err;
1245 }
1246
1247 typedef struct drm_i915_gem_object *
1248 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1249
1250 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1251 {
1252 return i915->mm.gemfs && has_transparent_hugepage();
1253 }
1254
1255 static struct drm_i915_gem_object *
1256 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1257 {
1258 if (!igt_can_allocate_thp(i915)) {
1259 pr_info("%s missing THP support, skipping\n", __func__);
1260 return ERR_PTR(-ENODEV);
1261 }
1262
1263 return i915_gem_object_create_shmem(i915, size);
1264 }
1265
1266 static struct drm_i915_gem_object *
1267 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1268 {
1269 return i915_gem_object_create_internal(i915, size);
1270 }
1271
1272 static struct drm_i915_gem_object *
1273 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1274 {
1275 return huge_pages_object(i915, size, size);
1276 }
1277
1278 static struct drm_i915_gem_object *
1279 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1280 {
1281 return i915_gem_object_create_lmem(i915, size, flags);
1282 }
1283
1284 static u32 igt_random_size(struct rnd_state *prng,
1285 u32 min_page_size,
1286 u32 max_page_size)
1287 {
1288 u64 mask;
1289 u32 size;
1290
1291 GEM_BUG_ON(!is_power_of_2(min_page_size));
1292 GEM_BUG_ON(!is_power_of_2(max_page_size));
1293 GEM_BUG_ON(min_page_size < PAGE_SIZE);
1294 GEM_BUG_ON(min_page_size > max_page_size);
1295
1296 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1297 size = prandom_u32_state(prng) & mask;
1298 if (size < min_page_size)
1299 size |= min_page_size;
1300
1301 return size;
1302 }
1303
1304 static int igt_ppgtt_smoke_huge(void *arg)
1305 {
1306 struct drm_i915_private *i915 = arg;
1307 struct drm_i915_gem_object *obj;
1308 I915_RND_STATE(prng);
1309 struct {
1310 igt_create_fn fn;
1311 u32 min;
1312 u32 max;
1313 } backends[] = {
1314 { igt_create_internal, SZ_64K, SZ_2M, },
1315 { igt_create_shmem, SZ_64K, SZ_32M, },
1316 { igt_create_local, SZ_64K, SZ_1G, },
1317 };
1318 int err;
1319 int i;
1320
1321
1322
1323
1324
1325
1326 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1327 u32 min = backends[i].min;
1328 u32 max = backends[i].max;
1329 u32 size = max;
1330
1331 try_again:
1332 size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1333
1334 obj = backends[i].fn(i915, size, 0);
1335 if (IS_ERR(obj)) {
1336 err = PTR_ERR(obj);
1337 if (err == -E2BIG) {
1338 size >>= 1;
1339 goto try_again;
1340 } else if (err == -ENODEV) {
1341 err = 0;
1342 continue;
1343 }
1344
1345 return err;
1346 }
1347
1348 err = i915_gem_object_pin_pages_unlocked(obj);
1349 if (err) {
1350 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
1351 i915_gem_object_put(obj);
1352 size >>= 1;
1353 goto try_again;
1354 }
1355 goto out_put;
1356 }
1357
1358 if (obj->mm.page_sizes.phys < min) {
1359 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1360 __func__, size, i);
1361 err = -ENOMEM;
1362 goto out_unpin;
1363 }
1364
1365 err = igt_write_huge(i915, obj);
1366 if (err) {
1367 pr_err("%s write-huge failed with size=%u, i=%d\n",
1368 __func__, size, i);
1369 }
1370 out_unpin:
1371 i915_gem_object_lock(obj, NULL);
1372 i915_gem_object_unpin_pages(obj);
1373 __i915_gem_object_put_pages(obj);
1374 i915_gem_object_unlock(obj);
1375 out_put:
1376 i915_gem_object_put(obj);
1377
1378 if (err == -ENOMEM || err == -ENXIO)
1379 err = 0;
1380
1381 if (err)
1382 break;
1383
1384 cond_resched();
1385 }
1386
1387 return err;
1388 }
1389
1390 static int igt_ppgtt_sanity_check(void *arg)
1391 {
1392 struct drm_i915_private *i915 = arg;
1393 unsigned int supported = INTEL_INFO(i915)->page_sizes;
1394 struct {
1395 igt_create_fn fn;
1396 unsigned int flags;
1397 } backends[] = {
1398 { igt_create_system, 0, },
1399 { igt_create_local, 0, },
1400 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
1401 };
1402 struct {
1403 u32 size;
1404 u32 pages;
1405 } combos[] = {
1406 { SZ_64K, SZ_64K },
1407 { SZ_2M, SZ_2M },
1408 { SZ_2M, SZ_64K },
1409 { SZ_2M - SZ_64K, SZ_64K },
1410 { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
1411 { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
1412 { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
1413 { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
1414 };
1415 int i, j;
1416 int err;
1417
1418 if (supported == I915_GTT_PAGE_SIZE_4K)
1419 return 0;
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1430 for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1431 struct drm_i915_gem_object *obj;
1432 u32 size = combos[j].size;
1433 u32 pages = combos[j].pages;
1434
1435 obj = backends[i].fn(i915, size, backends[i].flags);
1436 if (IS_ERR(obj)) {
1437 err = PTR_ERR(obj);
1438 if (err == -ENODEV) {
1439 pr_info("Device lacks local memory, skipping\n");
1440 err = 0;
1441 break;
1442 }
1443
1444 return err;
1445 }
1446
1447 err = i915_gem_object_pin_pages_unlocked(obj);
1448 if (err) {
1449 i915_gem_object_put(obj);
1450 goto out;
1451 }
1452
1453 GEM_BUG_ON(pages > obj->base.size);
1454 pages = pages & supported;
1455
1456 if (pages)
1457 obj->mm.page_sizes.sg = pages;
1458
1459 err = igt_write_huge(i915, obj);
1460
1461 i915_gem_object_lock(obj, NULL);
1462 i915_gem_object_unpin_pages(obj);
1463 __i915_gem_object_put_pages(obj);
1464 i915_gem_object_unlock(obj);
1465 i915_gem_object_put(obj);
1466
1467 if (err) {
1468 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1469 __func__, size, pages, i, j);
1470 goto out;
1471 }
1472 }
1473
1474 cond_resched();
1475 }
1476
1477 out:
1478 if (err == -ENOMEM)
1479 err = 0;
1480
1481 return err;
1482 }
1483
1484 static int igt_ppgtt_compact(void *arg)
1485 {
1486 struct drm_i915_private *i915 = arg;
1487 struct drm_i915_gem_object *obj;
1488 int err;
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498 if (!HAS_64K_PAGES(i915)) {
1499 pr_info("device lacks compact 64K page support, skipping\n");
1500 return 0;
1501 }
1502
1503 if (!HAS_LMEM(i915)) {
1504 pr_info("device lacks LMEM support, skipping\n");
1505 return 0;
1506 }
1507
1508
1509 obj = i915_gem_object_create_lmem(i915, SZ_4M, 0);
1510 if (IS_ERR(obj))
1511 return PTR_ERR(obj);
1512
1513 err = i915_gem_object_pin_pages_unlocked(obj);
1514 if (err)
1515 goto out_put;
1516
1517 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
1518 pr_info("LMEM compact unable to allocate huge-page(s)\n");
1519 goto out_unpin;
1520 }
1521
1522
1523
1524
1525
1526 obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K;
1527
1528 err = igt_write_huge(i915, obj);
1529 if (err)
1530 pr_err("LMEM compact write-huge failed\n");
1531
1532 out_unpin:
1533 i915_gem_object_unpin_pages(obj);
1534 out_put:
1535 i915_gem_object_put(obj);
1536
1537 if (err == -ENOMEM)
1538 err = 0;
1539
1540 return err;
1541 }
1542
1543 static int igt_tmpfs_fallback(void *arg)
1544 {
1545 struct drm_i915_private *i915 = arg;
1546 struct i915_address_space *vm;
1547 struct i915_gem_context *ctx;
1548 struct vfsmount *gemfs = i915->mm.gemfs;
1549 struct drm_i915_gem_object *obj;
1550 struct i915_vma *vma;
1551 struct file *file;
1552 u32 *vaddr;
1553 int err = 0;
1554
1555 file = mock_file(i915);
1556 if (IS_ERR(file))
1557 return PTR_ERR(file);
1558
1559 ctx = hugepage_ctx(i915, file);
1560 if (IS_ERR(ctx)) {
1561 err = PTR_ERR(ctx);
1562 goto out;
1563 }
1564 vm = i915_gem_context_get_eb_vm(ctx);
1565
1566
1567
1568
1569
1570
1571
1572 i915->mm.gemfs = NULL;
1573
1574 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1575 if (IS_ERR(obj)) {
1576 err = PTR_ERR(obj);
1577 goto out_restore;
1578 }
1579
1580 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1581 if (IS_ERR(vaddr)) {
1582 err = PTR_ERR(vaddr);
1583 goto out_put;
1584 }
1585 *vaddr = 0xdeadbeaf;
1586
1587 __i915_gem_object_flush_map(obj, 0, 64);
1588 i915_gem_object_unpin_map(obj);
1589
1590 vma = i915_vma_instance(obj, vm, NULL);
1591 if (IS_ERR(vma)) {
1592 err = PTR_ERR(vma);
1593 goto out_put;
1594 }
1595
1596 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1597 if (err)
1598 goto out_put;
1599
1600 err = igt_check_page_sizes(vma);
1601
1602 i915_vma_unpin(vma);
1603 out_put:
1604 i915_gem_object_put(obj);
1605 out_restore:
1606 i915->mm.gemfs = gemfs;
1607
1608 i915_vm_put(vm);
1609 out:
1610 fput(file);
1611 return err;
1612 }
1613
1614 static int igt_shrink_thp(void *arg)
1615 {
1616 struct drm_i915_private *i915 = arg;
1617 struct i915_address_space *vm;
1618 struct i915_gem_context *ctx;
1619 struct drm_i915_gem_object *obj;
1620 struct i915_gem_engines_iter it;
1621 struct intel_context *ce;
1622 struct i915_vma *vma;
1623 struct file *file;
1624 unsigned int flags = PIN_USER;
1625 unsigned int n;
1626 intel_wakeref_t wf;
1627 bool should_swap;
1628 int err;
1629
1630 if (!igt_can_allocate_thp(i915)) {
1631 pr_info("missing THP support, skipping\n");
1632 return 0;
1633 }
1634
1635 file = mock_file(i915);
1636 if (IS_ERR(file))
1637 return PTR_ERR(file);
1638
1639 ctx = hugepage_ctx(i915, file);
1640 if (IS_ERR(ctx)) {
1641 err = PTR_ERR(ctx);
1642 goto out;
1643 }
1644 vm = i915_gem_context_get_eb_vm(ctx);
1645
1646
1647
1648
1649
1650
1651 obj = i915_gem_object_create_shmem(i915, SZ_2M);
1652 if (IS_ERR(obj)) {
1653 err = PTR_ERR(obj);
1654 goto out_vm;
1655 }
1656
1657 vma = i915_vma_instance(obj, vm, NULL);
1658 if (IS_ERR(vma)) {
1659 err = PTR_ERR(vma);
1660 goto out_put;
1661 }
1662
1663 wf = intel_runtime_pm_get(&i915->runtime_pm);
1664
1665 err = i915_vma_pin(vma, 0, 0, flags);
1666 if (err)
1667 goto out_wf;
1668
1669 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1670 pr_info("failed to allocate THP, finishing test early\n");
1671 goto out_unpin;
1672 }
1673
1674 err = igt_check_page_sizes(vma);
1675 if (err)
1676 goto out_unpin;
1677
1678 n = 0;
1679
1680 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1681 if (!intel_engine_can_store_dword(ce->engine))
1682 continue;
1683
1684 err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1685 if (err)
1686 break;
1687 }
1688 i915_gem_context_unlock_engines(ctx);
1689
1690
1691
1692
1693
1694 i915_gem_shrink(NULL, i915, -1UL, NULL,
1695 I915_SHRINK_BOUND |
1696 I915_SHRINK_UNBOUND |
1697 I915_SHRINK_ACTIVE);
1698 i915_vma_unpin(vma);
1699 if (err)
1700 goto out_put;
1701
1702
1703
1704
1705
1706 should_swap = get_nr_swap_pages() > 0;
1707 i915_gem_shrink(NULL, i915, -1UL, NULL,
1708 I915_SHRINK_BOUND |
1709 I915_SHRINK_UNBOUND |
1710 I915_SHRINK_ACTIVE |
1711 I915_SHRINK_WRITEBACK);
1712 if (should_swap == i915_gem_object_has_pages(obj)) {
1713 pr_err("unexpected pages mismatch, should_swap=%s\n",
1714 str_yes_no(should_swap));
1715 err = -EINVAL;
1716 goto out_put;
1717 }
1718
1719 if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
1720 pr_err("unexpected residual page-size bits, should_swap=%s\n",
1721 str_yes_no(should_swap));
1722 err = -EINVAL;
1723 goto out_put;
1724 }
1725
1726 err = i915_vma_pin(vma, 0, 0, flags);
1727 if (err)
1728 goto out_put;
1729
1730 while (n--) {
1731 err = cpu_check(obj, n, 0xdeadbeaf);
1732 if (err)
1733 break;
1734 }
1735
1736 out_unpin:
1737 i915_vma_unpin(vma);
1738 out_wf:
1739 intel_runtime_pm_put(&i915->runtime_pm, wf);
1740 out_put:
1741 i915_gem_object_put(obj);
1742 out_vm:
1743 i915_vm_put(vm);
1744 out:
1745 fput(file);
1746 return err;
1747 }
1748
1749 int i915_gem_huge_page_mock_selftests(void)
1750 {
1751 static const struct i915_subtest tests[] = {
1752 SUBTEST(igt_mock_exhaust_device_supported_pages),
1753 SUBTEST(igt_mock_memory_region_huge_pages),
1754 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1755 SUBTEST(igt_mock_ppgtt_huge_fill),
1756 SUBTEST(igt_mock_ppgtt_64K),
1757 };
1758 struct drm_i915_private *dev_priv;
1759 struct i915_ppgtt *ppgtt;
1760 int err;
1761
1762 dev_priv = mock_gem_device();
1763 if (!dev_priv)
1764 return -ENOMEM;
1765
1766
1767 mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1768 mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1769
1770 ppgtt = i915_ppgtt_create(to_gt(dev_priv), 0);
1771 if (IS_ERR(ppgtt)) {
1772 err = PTR_ERR(ppgtt);
1773 goto out_unlock;
1774 }
1775
1776 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1777 pr_err("failed to create 48b PPGTT\n");
1778 err = -EINVAL;
1779 goto out_put;
1780 }
1781
1782
1783 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1784 pr_err("PPGTT missing 64K scratch page\n");
1785 err = -EINVAL;
1786 goto out_put;
1787 }
1788
1789 err = i915_subtests(tests, ppgtt);
1790
1791 out_put:
1792 i915_vm_put(&ppgtt->vm);
1793 out_unlock:
1794 mock_destroy_device(dev_priv);
1795 return err;
1796 }
1797
1798 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1799 {
1800 static const struct i915_subtest tests[] = {
1801 SUBTEST(igt_shrink_thp),
1802 SUBTEST(igt_tmpfs_fallback),
1803 SUBTEST(igt_ppgtt_smoke_huge),
1804 SUBTEST(igt_ppgtt_sanity_check),
1805 SUBTEST(igt_ppgtt_compact),
1806 };
1807
1808 if (!HAS_PPGTT(i915)) {
1809 pr_info("PPGTT not supported, skipping live-selftests\n");
1810 return 0;
1811 }
1812
1813 if (intel_gt_is_wedged(to_gt(i915)))
1814 return 0;
1815
1816 return i915_live_subtests(tests, i915);
1817 }