0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 #include "gem/i915_gem_internal.h"
0026 #include "gem/i915_gem_pm.h"
0027 #include "gem/selftests/igt_gem_utils.h"
0028 #include "gem/selftests/mock_context.h"
0029 #include "gt/intel_gt.h"
0030
0031 #include "i915_selftest.h"
0032
0033 #include "igt_flush_test.h"
0034 #include "lib_sw_fence.h"
0035 #include "mock_drm.h"
0036 #include "mock_gem_device.h"
0037
0038 static void quirk_add(struct drm_i915_gem_object *obj,
0039 struct list_head *objects)
0040 {
0041
0042 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
0043 i915_gem_object_set_tiling_quirk(obj);
0044 list_add(&obj->st_link, objects);
0045 }
0046
0047 static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
0048 {
0049 struct drm_i915_gem_object *obj;
0050 unsigned long count;
0051
0052 count = 0;
0053 do {
0054 struct i915_vma *vma;
0055
0056 obj = i915_gem_object_create_internal(ggtt->vm.i915,
0057 I915_GTT_PAGE_SIZE);
0058 if (IS_ERR(obj))
0059 return PTR_ERR(obj);
0060
0061 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
0062 if (IS_ERR(vma)) {
0063 i915_gem_object_put(obj);
0064 if (vma == ERR_PTR(-ENOSPC))
0065 break;
0066
0067 return PTR_ERR(vma);
0068 }
0069
0070 quirk_add(obj, objects);
0071 count++;
0072 } while (1);
0073 pr_debug("Filled GGTT with %lu pages [%llu total]\n",
0074 count, ggtt->vm.total / PAGE_SIZE);
0075
0076 if (list_empty(&ggtt->vm.bound_list)) {
0077 pr_err("No objects on the GGTT inactive list!\n");
0078 return -EINVAL;
0079 }
0080
0081 return 0;
0082 }
0083
0084 static void unpin_ggtt(struct i915_ggtt *ggtt)
0085 {
0086 struct i915_vma *vma;
0087
0088 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
0089 if (i915_gem_object_has_tiling_quirk(vma->obj))
0090 i915_vma_unpin(vma);
0091 }
0092
0093 static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
0094 {
0095 struct drm_i915_gem_object *obj, *on;
0096
0097 list_for_each_entry_safe(obj, on, list, st_link) {
0098 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
0099 i915_gem_object_set_tiling_quirk(obj);
0100 i915_gem_object_put(obj);
0101 }
0102
0103 i915_gem_drain_freed_objects(ggtt->vm.i915);
0104 }
0105
0106 static int igt_evict_something(void *arg)
0107 {
0108 struct intel_gt *gt = arg;
0109 struct i915_ggtt *ggtt = gt->ggtt;
0110 LIST_HEAD(objects);
0111 int err;
0112
0113
0114
0115 err = populate_ggtt(ggtt, &objects);
0116 if (err)
0117 goto cleanup;
0118
0119
0120 mutex_lock(&ggtt->vm.mutex);
0121 err = i915_gem_evict_something(&ggtt->vm, NULL,
0122 I915_GTT_PAGE_SIZE, 0, 0,
0123 0, U64_MAX,
0124 0);
0125 mutex_unlock(&ggtt->vm.mutex);
0126 if (err != -ENOSPC) {
0127 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
0128 err);
0129 goto cleanup;
0130 }
0131
0132 unpin_ggtt(ggtt);
0133
0134
0135 mutex_lock(&ggtt->vm.mutex);
0136 err = i915_gem_evict_something(&ggtt->vm, NULL,
0137 I915_GTT_PAGE_SIZE, 0, 0,
0138 0, U64_MAX,
0139 0);
0140 mutex_unlock(&ggtt->vm.mutex);
0141 if (err) {
0142 pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
0143 err);
0144 goto cleanup;
0145 }
0146
0147 cleanup:
0148 cleanup_objects(ggtt, &objects);
0149 return err;
0150 }
0151
0152 static int igt_overcommit(void *arg)
0153 {
0154 struct intel_gt *gt = arg;
0155 struct i915_ggtt *ggtt = gt->ggtt;
0156 struct drm_i915_gem_object *obj;
0157 struct i915_vma *vma;
0158 LIST_HEAD(objects);
0159 int err;
0160
0161
0162
0163
0164
0165 err = populate_ggtt(ggtt, &objects);
0166 if (err)
0167 goto cleanup;
0168
0169 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
0170 if (IS_ERR(obj)) {
0171 err = PTR_ERR(obj);
0172 goto cleanup;
0173 }
0174
0175 quirk_add(obj, &objects);
0176
0177 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
0178 if (vma != ERR_PTR(-ENOSPC)) {
0179 pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR_OR_ZERO(vma));
0180 err = -EINVAL;
0181 goto cleanup;
0182 }
0183
0184 cleanup:
0185 cleanup_objects(ggtt, &objects);
0186 return err;
0187 }
0188
0189 static int igt_evict_for_vma(void *arg)
0190 {
0191 struct intel_gt *gt = arg;
0192 struct i915_ggtt *ggtt = gt->ggtt;
0193 struct drm_mm_node target = {
0194 .start = 0,
0195 .size = 4096,
0196 };
0197 LIST_HEAD(objects);
0198 int err;
0199
0200
0201
0202 err = populate_ggtt(ggtt, &objects);
0203 if (err)
0204 goto cleanup;
0205
0206
0207 mutex_lock(&ggtt->vm.mutex);
0208 err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
0209 mutex_unlock(&ggtt->vm.mutex);
0210 if (err != -ENOSPC) {
0211 pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
0212 err);
0213 goto cleanup;
0214 }
0215
0216 unpin_ggtt(ggtt);
0217
0218
0219 mutex_lock(&ggtt->vm.mutex);
0220 err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
0221 mutex_unlock(&ggtt->vm.mutex);
0222 if (err) {
0223 pr_err("i915_gem_evict_for_node returned err=%d\n",
0224 err);
0225 goto cleanup;
0226 }
0227
0228 cleanup:
0229 cleanup_objects(ggtt, &objects);
0230 return err;
0231 }
0232
0233 static void mock_color_adjust(const struct drm_mm_node *node,
0234 unsigned long color,
0235 u64 *start,
0236 u64 *end)
0237 {
0238 }
0239
0240 static int igt_evict_for_cache_color(void *arg)
0241 {
0242 struct intel_gt *gt = arg;
0243 struct i915_ggtt *ggtt = gt->ggtt;
0244 const unsigned long flags = PIN_OFFSET_FIXED;
0245 struct drm_mm_node target = {
0246 .start = I915_GTT_PAGE_SIZE * 2,
0247 .size = I915_GTT_PAGE_SIZE,
0248 .color = I915_CACHE_LLC,
0249 };
0250 struct drm_i915_gem_object *obj;
0251 struct i915_vma *vma;
0252 LIST_HEAD(objects);
0253 int err;
0254
0255
0256
0257
0258
0259
0260
0261 ggtt->vm.mm.color_adjust = mock_color_adjust;
0262 GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
0263
0264 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
0265 if (IS_ERR(obj)) {
0266 err = PTR_ERR(obj);
0267 goto cleanup;
0268 }
0269 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
0270 quirk_add(obj, &objects);
0271
0272 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
0273 I915_GTT_PAGE_SIZE | flags);
0274 if (IS_ERR(vma)) {
0275 pr_err("[0]i915_gem_object_ggtt_pin failed\n");
0276 err = PTR_ERR(vma);
0277 goto cleanup;
0278 }
0279
0280 obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
0281 if (IS_ERR(obj)) {
0282 err = PTR_ERR(obj);
0283 goto cleanup;
0284 }
0285 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
0286 quirk_add(obj, &objects);
0287
0288
0289 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
0290 (I915_GTT_PAGE_SIZE * 2) | flags);
0291 if (IS_ERR(vma)) {
0292 pr_err("[1]i915_gem_object_ggtt_pin failed\n");
0293 err = PTR_ERR(vma);
0294 goto cleanup;
0295 }
0296
0297 i915_vma_unpin(vma);
0298
0299
0300 mutex_lock(&ggtt->vm.mutex);
0301 err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
0302 mutex_unlock(&ggtt->vm.mutex);
0303 if (err) {
0304 pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
0305 goto cleanup;
0306 }
0307
0308
0309
0310
0311 target.color = I915_CACHE_L3_LLC;
0312
0313 mutex_lock(&ggtt->vm.mutex);
0314 err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
0315 mutex_unlock(&ggtt->vm.mutex);
0316 if (!err) {
0317 pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
0318 err = -EINVAL;
0319 goto cleanup;
0320 }
0321
0322 err = 0;
0323
0324 cleanup:
0325 unpin_ggtt(ggtt);
0326 cleanup_objects(ggtt, &objects);
0327 ggtt->vm.mm.color_adjust = NULL;
0328 return err;
0329 }
0330
0331 static int igt_evict_vm(void *arg)
0332 {
0333 struct intel_gt *gt = arg;
0334 struct i915_ggtt *ggtt = gt->ggtt;
0335 struct i915_gem_ww_ctx ww;
0336 LIST_HEAD(objects);
0337 int err;
0338
0339
0340
0341 err = populate_ggtt(ggtt, &objects);
0342 if (err)
0343 goto cleanup;
0344
0345
0346 mutex_lock(&ggtt->vm.mutex);
0347 err = i915_gem_evict_vm(&ggtt->vm, NULL);
0348 mutex_unlock(&ggtt->vm.mutex);
0349 if (err) {
0350 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
0351 err);
0352 goto cleanup;
0353 }
0354
0355 unpin_ggtt(ggtt);
0356
0357 for_i915_gem_ww(&ww, err, false) {
0358 mutex_lock(&ggtt->vm.mutex);
0359 err = i915_gem_evict_vm(&ggtt->vm, &ww);
0360 mutex_unlock(&ggtt->vm.mutex);
0361 }
0362
0363 if (err) {
0364 pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
0365 err);
0366 goto cleanup;
0367 }
0368
0369 cleanup:
0370 cleanup_objects(ggtt, &objects);
0371 return err;
0372 }
0373
0374 static int igt_evict_contexts(void *arg)
0375 {
0376 const u64 PRETEND_GGTT_SIZE = 16ull << 20;
0377 struct intel_gt *gt = arg;
0378 struct i915_ggtt *ggtt = gt->ggtt;
0379 struct drm_i915_private *i915 = gt->i915;
0380 struct intel_engine_cs *engine;
0381 enum intel_engine_id id;
0382 struct reserved {
0383 struct drm_mm_node node;
0384 struct reserved *next;
0385 } *reserved = NULL;
0386 intel_wakeref_t wakeref;
0387 struct drm_mm_node hole;
0388 unsigned long count;
0389 int err;
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 if (!HAS_FULL_PPGTT(i915))
0403 return 0;
0404
0405 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
0406
0407
0408 memset(&hole, 0, sizeof(hole));
0409 mutex_lock(&ggtt->vm.mutex);
0410 err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
0411 PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0412 0, ggtt->vm.total,
0413 PIN_NOEVICT);
0414 if (err)
0415 goto out_locked;
0416
0417
0418 count = 0;
0419 do {
0420 struct reserved *r;
0421
0422 mutex_unlock(&ggtt->vm.mutex);
0423 r = kcalloc(1, sizeof(*r), GFP_KERNEL);
0424 mutex_lock(&ggtt->vm.mutex);
0425 if (!r) {
0426 err = -ENOMEM;
0427 goto out_locked;
0428 }
0429
0430 if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
0431 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0432 0, ggtt->vm.total,
0433 PIN_NOEVICT)) {
0434 kfree(r);
0435 break;
0436 }
0437
0438 r->next = reserved;
0439 reserved = r;
0440
0441 count++;
0442 } while (1);
0443 drm_mm_remove_node(&hole);
0444 mutex_unlock(&ggtt->vm.mutex);
0445 pr_info("Filled GGTT with %lu 1MiB nodes\n", count);
0446
0447
0448 for_each_engine(engine, gt, id) {
0449 struct i915_sw_fence fence;
0450 struct i915_request *last = NULL;
0451
0452 count = 0;
0453 onstack_fence_init(&fence);
0454 do {
0455 struct intel_context *ce;
0456 struct i915_request *rq;
0457
0458 ce = intel_context_create(engine);
0459 if (IS_ERR(ce))
0460 break;
0461
0462
0463 igt_evict_ctl.fail_if_busy = true;
0464 rq = intel_context_create_request(ce);
0465 igt_evict_ctl.fail_if_busy = false;
0466 intel_context_put(ce);
0467
0468 if (IS_ERR(rq)) {
0469
0470 if (PTR_ERR(rq) != -EBUSY) {
0471 pr_err("Unexpected error from request alloc (on %s): %d\n",
0472 engine->name,
0473 (int)PTR_ERR(rq));
0474 err = PTR_ERR(rq);
0475 }
0476 break;
0477 }
0478
0479
0480 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
0481 &fence,
0482 GFP_KERNEL);
0483 if (err < 0)
0484 break;
0485
0486 i915_request_add(rq);
0487 count++;
0488 if (last)
0489 i915_request_put(last);
0490 last = i915_request_get(rq);
0491 err = 0;
0492 } while(1);
0493 onstack_fence_fini(&fence);
0494 pr_info("Submitted %lu contexts/requests on %s\n",
0495 count, engine->name);
0496 if (err)
0497 break;
0498 if (last) {
0499 if (i915_request_wait(last, 0, HZ) < 0) {
0500 err = -EIO;
0501 i915_request_put(last);
0502 pr_err("Failed waiting for last request (on %s)",
0503 engine->name);
0504 break;
0505 }
0506 i915_request_put(last);
0507 }
0508 err = intel_gt_wait_for_idle(engine->gt, HZ * 3);
0509 if (err) {
0510 pr_err("Failed to idle GT (on %s)", engine->name);
0511 break;
0512 }
0513 }
0514
0515 mutex_lock(&ggtt->vm.mutex);
0516 out_locked:
0517 if (igt_flush_test(i915))
0518 err = -EIO;
0519 while (reserved) {
0520 struct reserved *next = reserved->next;
0521
0522 drm_mm_remove_node(&reserved->node);
0523 kfree(reserved);
0524
0525 reserved = next;
0526 }
0527 if (drm_mm_node_allocated(&hole))
0528 drm_mm_remove_node(&hole);
0529 mutex_unlock(&ggtt->vm.mutex);
0530 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
0531
0532 return err;
0533 }
0534
0535 int i915_gem_evict_mock_selftests(void)
0536 {
0537 static const struct i915_subtest tests[] = {
0538 SUBTEST(igt_evict_something),
0539 SUBTEST(igt_evict_for_vma),
0540 SUBTEST(igt_evict_for_cache_color),
0541 SUBTEST(igt_evict_vm),
0542 SUBTEST(igt_overcommit),
0543 };
0544 struct drm_i915_private *i915;
0545 intel_wakeref_t wakeref;
0546 int err = 0;
0547
0548 i915 = mock_gem_device();
0549 if (!i915)
0550 return -ENOMEM;
0551
0552 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
0553 err = i915_subtests(tests, to_gt(i915));
0554
0555 mock_destroy_device(i915);
0556 return err;
0557 }
0558
0559 int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
0560 {
0561 static const struct i915_subtest tests[] = {
0562 SUBTEST(igt_evict_contexts),
0563 };
0564
0565 if (intel_gt_is_wedged(to_gt(i915)))
0566 return 0;
0567
0568 return intel_gt_live_subtests(tests, to_gt(i915));
0569 }