0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) "drm_mm: " fmt
0007
0008 #include <linux/module.h>
0009 #include <linux/prime_numbers.h>
0010 #include <linux/slab.h>
0011 #include <linux/random.h>
0012 #include <linux/vmalloc.h>
0013 #include <linux/ktime.h>
0014
0015 #include <drm/drm_mm.h>
0016
0017 #include "../lib/drm_random.h"
0018
0019 #define TESTS "drm_mm_selftests.h"
0020 #include "drm_selftest.h"
0021
0022 static unsigned int random_seed;
0023 static unsigned int max_iterations = 8192;
0024 static unsigned int max_prime = 128;
0025
0026 enum {
0027 BEST,
0028 BOTTOMUP,
0029 TOPDOWN,
0030 EVICT,
0031 };
0032
0033 static const struct insert_mode {
0034 const char *name;
0035 enum drm_mm_insert_mode mode;
0036 } insert_modes[] = {
0037 [BEST] = { "best", DRM_MM_INSERT_BEST },
0038 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
0039 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
0040 [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
0041 {}
0042 }, evict_modes[] = {
0043 { "bottom-up", DRM_MM_INSERT_LOW },
0044 { "top-down", DRM_MM_INSERT_HIGH },
0045 {}
0046 };
0047
0048 static int igt_sanitycheck(void *ignored)
0049 {
0050 pr_info("%s - ok!\n", __func__);
0051 return 0;
0052 }
0053
0054 static bool assert_no_holes(const struct drm_mm *mm)
0055 {
0056 struct drm_mm_node *hole;
0057 u64 hole_start, __always_unused hole_end;
0058 unsigned long count;
0059
0060 count = 0;
0061 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
0062 count++;
0063 if (count) {
0064 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
0065 return false;
0066 }
0067
0068 drm_mm_for_each_node(hole, mm) {
0069 if (drm_mm_hole_follows(hole)) {
0070 pr_err("Hole follows node, expected none!\n");
0071 return false;
0072 }
0073 }
0074
0075 return true;
0076 }
0077
0078 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
0079 {
0080 struct drm_mm_node *hole;
0081 u64 hole_start, hole_end;
0082 unsigned long count;
0083 bool ok = true;
0084
0085 if (end <= start)
0086 return true;
0087
0088 count = 0;
0089 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
0090 if (start != hole_start || end != hole_end) {
0091 if (ok)
0092 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
0093 hole_start, hole_end,
0094 start, end);
0095 ok = false;
0096 }
0097 count++;
0098 }
0099 if (count != 1) {
0100 pr_err("Expected to find one hole, found %lu instead\n", count);
0101 ok = false;
0102 }
0103
0104 return ok;
0105 }
0106
0107 static bool assert_continuous(const struct drm_mm *mm, u64 size)
0108 {
0109 struct drm_mm_node *node, *check, *found;
0110 unsigned long n;
0111 u64 addr;
0112
0113 if (!assert_no_holes(mm))
0114 return false;
0115
0116 n = 0;
0117 addr = 0;
0118 drm_mm_for_each_node(node, mm) {
0119 if (node->start != addr) {
0120 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
0121 n, addr, node->start);
0122 return false;
0123 }
0124
0125 if (node->size != size) {
0126 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
0127 n, size, node->size);
0128 return false;
0129 }
0130
0131 if (drm_mm_hole_follows(node)) {
0132 pr_err("node[%ld] is followed by a hole!\n", n);
0133 return false;
0134 }
0135
0136 found = NULL;
0137 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
0138 if (node != check) {
0139 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
0140 node->start, check->start);
0141 return false;
0142 }
0143 found = check;
0144 }
0145 if (!found) {
0146 pr_err("lookup failed for node %llx + %llx\n",
0147 addr, size);
0148 return false;
0149 }
0150
0151 addr += size;
0152 n++;
0153 }
0154
0155 return true;
0156 }
0157
0158 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
0159 {
0160 u64 rem;
0161
0162 if (!alignment)
0163 return 0;
0164
0165 div64_u64_rem(node->start, alignment, &rem);
0166 return rem;
0167 }
0168
0169 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
0170 u64 size, u64 alignment, unsigned long color)
0171 {
0172 bool ok = true;
0173
0174 if (!drm_mm_node_allocated(node) || node->mm != mm) {
0175 pr_err("node not allocated\n");
0176 ok = false;
0177 }
0178
0179 if (node->size != size) {
0180 pr_err("node has wrong size, found %llu, expected %llu\n",
0181 node->size, size);
0182 ok = false;
0183 }
0184
0185 if (misalignment(node, alignment)) {
0186 pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
0187 node->start, misalignment(node, alignment), alignment);
0188 ok = false;
0189 }
0190
0191 if (node->color != color) {
0192 pr_err("node has wrong color, found %lu, expected %lu\n",
0193 node->color, color);
0194 ok = false;
0195 }
0196
0197 return ok;
0198 }
0199
0200 #define show_mm(mm) do { \
0201 struct drm_printer __p = drm_debug_printer(__func__); \
0202 drm_mm_print((mm), &__p); } while (0)
0203
0204 static int igt_init(void *ignored)
0205 {
0206 const unsigned int size = 4096;
0207 struct drm_mm mm;
0208 struct drm_mm_node tmp;
0209 int ret = -EINVAL;
0210
0211
0212 memset(&mm, 0, sizeof(mm));
0213 if (drm_mm_initialized(&mm)) {
0214 pr_err("zeroed mm claims to be initialized\n");
0215 return ret;
0216 }
0217
0218 memset(&mm, 0xff, sizeof(mm));
0219 drm_mm_init(&mm, 0, size);
0220 if (!drm_mm_initialized(&mm)) {
0221 pr_err("mm claims not to be initialized\n");
0222 goto out;
0223 }
0224
0225 if (!drm_mm_clean(&mm)) {
0226 pr_err("mm not empty on creation\n");
0227 goto out;
0228 }
0229
0230
0231 if (!assert_one_hole(&mm, 0, size)) {
0232 ret = -EINVAL;
0233 goto out;
0234 }
0235
0236 memset(&tmp, 0, sizeof(tmp));
0237 tmp.start = 0;
0238 tmp.size = size;
0239 ret = drm_mm_reserve_node(&mm, &tmp);
0240 if (ret) {
0241 pr_err("failed to reserve whole drm_mm\n");
0242 goto out;
0243 }
0244
0245
0246 if (!assert_no_holes(&mm)) {
0247 ret = -EINVAL;
0248 goto out;
0249 }
0250
0251
0252 drm_mm_remove_node(&tmp);
0253 if (!assert_one_hole(&mm, 0, size)) {
0254 ret = -EINVAL;
0255 goto out;
0256 }
0257
0258 out:
0259 if (ret)
0260 show_mm(&mm);
0261 drm_mm_takedown(&mm);
0262 return ret;
0263 }
0264
0265 static int igt_debug(void *ignored)
0266 {
0267 struct drm_mm mm;
0268 struct drm_mm_node nodes[2];
0269 int ret;
0270
0271
0272
0273
0274
0275 drm_mm_init(&mm, 0, 4096);
0276
0277 memset(nodes, 0, sizeof(nodes));
0278 nodes[0].start = 512;
0279 nodes[0].size = 1024;
0280 ret = drm_mm_reserve_node(&mm, &nodes[0]);
0281 if (ret) {
0282 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
0283 nodes[0].start, nodes[0].size);
0284 return ret;
0285 }
0286
0287 nodes[1].size = 1024;
0288 nodes[1].start = 4096 - 512 - nodes[1].size;
0289 ret = drm_mm_reserve_node(&mm, &nodes[1]);
0290 if (ret) {
0291 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
0292 nodes[1].start, nodes[1].size);
0293 return ret;
0294 }
0295
0296 show_mm(&mm);
0297 return 0;
0298 }
0299
0300 static struct drm_mm_node *set_node(struct drm_mm_node *node,
0301 u64 start, u64 size)
0302 {
0303 node->start = start;
0304 node->size = size;
0305 return node;
0306 }
0307
0308 static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
0309 {
0310 int err;
0311
0312 err = drm_mm_reserve_node(mm, node);
0313 if (likely(err == -ENOSPC))
0314 return true;
0315
0316 if (!err) {
0317 pr_err("impossible reserve succeeded, node %llu + %llu\n",
0318 node->start, node->size);
0319 drm_mm_remove_node(node);
0320 } else {
0321 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
0322 err, -ENOSPC, node->start, node->size);
0323 }
0324 return false;
0325 }
0326
0327 static bool check_reserve_boundaries(struct drm_mm *mm,
0328 unsigned int count,
0329 u64 size)
0330 {
0331 const struct boundary {
0332 u64 start, size;
0333 const char *name;
0334 } boundaries[] = {
0335 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
0336 B(0, 0),
0337 B(-size, 0),
0338 B(size, 0),
0339 B(size * count, 0),
0340 B(-size, size),
0341 B(-size, -size),
0342 B(-size, 2*size),
0343 B(0, -size),
0344 B(size, -size),
0345 B(count*size, size),
0346 B(count*size, -size),
0347 B(count*size, count*size),
0348 B(count*size, -count*size),
0349 B(count*size, -(count+1)*size),
0350 B((count+1)*size, size),
0351 B((count+1)*size, -size),
0352 B((count+1)*size, -2*size),
0353 #undef B
0354 };
0355 struct drm_mm_node tmp = {};
0356 int n;
0357
0358 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
0359 if (!expect_reserve_fail(mm,
0360 set_node(&tmp,
0361 boundaries[n].start,
0362 boundaries[n].size))) {
0363 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
0364 n, boundaries[n].name, count, size);
0365 return false;
0366 }
0367 }
0368
0369 return true;
0370 }
0371
0372 static int __igt_reserve(unsigned int count, u64 size)
0373 {
0374 DRM_RND_STATE(prng, random_seed);
0375 struct drm_mm mm;
0376 struct drm_mm_node tmp, *nodes, *node, *next;
0377 unsigned int *order, n, m, o = 0;
0378 int ret, err;
0379
0380
0381
0382
0383
0384
0385
0386 DRM_MM_BUG_ON(!count);
0387 DRM_MM_BUG_ON(!size);
0388
0389 ret = -ENOMEM;
0390 order = drm_random_order(count, &prng);
0391 if (!order)
0392 goto err;
0393
0394 nodes = vzalloc(array_size(count, sizeof(*nodes)));
0395 if (!nodes)
0396 goto err_order;
0397
0398 ret = -EINVAL;
0399 drm_mm_init(&mm, 0, count * size);
0400
0401 if (!check_reserve_boundaries(&mm, count, size))
0402 goto out;
0403
0404 for (n = 0; n < count; n++) {
0405 nodes[n].start = order[n] * size;
0406 nodes[n].size = size;
0407
0408 err = drm_mm_reserve_node(&mm, &nodes[n]);
0409 if (err) {
0410 pr_err("reserve failed, step %d, start %llu\n",
0411 n, nodes[n].start);
0412 ret = err;
0413 goto out;
0414 }
0415
0416 if (!drm_mm_node_allocated(&nodes[n])) {
0417 pr_err("reserved node not allocated! step %d, start %llu\n",
0418 n, nodes[n].start);
0419 goto out;
0420 }
0421
0422 if (!expect_reserve_fail(&mm, &nodes[n]))
0423 goto out;
0424 }
0425
0426
0427 if (!assert_continuous(&mm, size))
0428 goto out;
0429
0430
0431 drm_random_reorder(order, count, &prng);
0432 for (n = 0; n < count; n++) {
0433 if (!expect_reserve_fail(&mm,
0434 set_node(&tmp, order[n] * size, 1)))
0435 goto out;
0436
0437
0438 drm_mm_remove_node(&nodes[order[n]]);
0439 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
0440 if (err) {
0441 pr_err("reserve failed, step %d, start %llu\n",
0442 n, nodes[n].start);
0443 ret = err;
0444 goto out;
0445 }
0446 }
0447
0448 if (!assert_continuous(&mm, size))
0449 goto out;
0450
0451
0452 for (n = 0; n < count; n++) {
0453 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
0454 goto out;
0455 }
0456 for (n = 0; n < count; n++) {
0457 if (!expect_reserve_fail(&mm,
0458 set_node(&tmp,
0459 size * n,
0460 size * (count - n))))
0461 goto out;
0462 }
0463
0464
0465 for_each_prime_number(n, min(max_prime, count)) {
0466 for (m = 0; m < n; m++) {
0467 node = &nodes[order[(o + m) % count]];
0468 drm_mm_remove_node(node);
0469 }
0470
0471 for (m = 0; m < n; m++) {
0472 node = &nodes[order[(o + m) % count]];
0473 err = drm_mm_reserve_node(&mm, node);
0474 if (err) {
0475 pr_err("reserve failed, step %d/%d, start %llu\n",
0476 m, n, node->start);
0477 ret = err;
0478 goto out;
0479 }
0480 }
0481
0482 o += n;
0483
0484 if (!assert_continuous(&mm, size))
0485 goto out;
0486 }
0487
0488 ret = 0;
0489 out:
0490 drm_mm_for_each_node_safe(node, next, &mm)
0491 drm_mm_remove_node(node);
0492 drm_mm_takedown(&mm);
0493 vfree(nodes);
0494 err_order:
0495 kfree(order);
0496 err:
0497 return ret;
0498 }
0499
0500 static int igt_reserve(void *ignored)
0501 {
0502 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
0503 int n, ret;
0504
0505 for_each_prime_number_from(n, 1, 54) {
0506 u64 size = BIT_ULL(n);
0507
0508 ret = __igt_reserve(count, size - 1);
0509 if (ret)
0510 return ret;
0511
0512 ret = __igt_reserve(count, size);
0513 if (ret)
0514 return ret;
0515
0516 ret = __igt_reserve(count, size + 1);
0517 if (ret)
0518 return ret;
0519
0520 cond_resched();
0521 }
0522
0523 return 0;
0524 }
0525
0526 static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
0527 u64 size, u64 alignment, unsigned long color,
0528 const struct insert_mode *mode)
0529 {
0530 int err;
0531
0532 err = drm_mm_insert_node_generic(mm, node,
0533 size, alignment, color,
0534 mode->mode);
0535 if (err) {
0536 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
0537 size, alignment, color, mode->name, err);
0538 return false;
0539 }
0540
0541 if (!assert_node(node, mm, size, alignment, color)) {
0542 drm_mm_remove_node(node);
0543 return false;
0544 }
0545
0546 return true;
0547 }
0548
0549 static bool expect_insert_fail(struct drm_mm *mm, u64 size)
0550 {
0551 struct drm_mm_node tmp = {};
0552 int err;
0553
0554 err = drm_mm_insert_node(mm, &tmp, size);
0555 if (likely(err == -ENOSPC))
0556 return true;
0557
0558 if (!err) {
0559 pr_err("impossible insert succeeded, node %llu + %llu\n",
0560 tmp.start, tmp.size);
0561 drm_mm_remove_node(&tmp);
0562 } else {
0563 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
0564 err, -ENOSPC, size);
0565 }
0566 return false;
0567 }
0568
0569 static int __igt_insert(unsigned int count, u64 size, bool replace)
0570 {
0571 DRM_RND_STATE(prng, random_seed);
0572 const struct insert_mode *mode;
0573 struct drm_mm mm;
0574 struct drm_mm_node *nodes, *node, *next;
0575 unsigned int *order, n, m, o = 0;
0576 int ret;
0577
0578
0579
0580 DRM_MM_BUG_ON(!count);
0581 DRM_MM_BUG_ON(!size);
0582
0583 ret = -ENOMEM;
0584 nodes = vmalloc(array_size(count, sizeof(*nodes)));
0585 if (!nodes)
0586 goto err;
0587
0588 order = drm_random_order(count, &prng);
0589 if (!order)
0590 goto err_nodes;
0591
0592 ret = -EINVAL;
0593 drm_mm_init(&mm, 0, count * size);
0594
0595 for (mode = insert_modes; mode->name; mode++) {
0596 for (n = 0; n < count; n++) {
0597 struct drm_mm_node tmp;
0598
0599 node = replace ? &tmp : &nodes[n];
0600 memset(node, 0, sizeof(*node));
0601 if (!expect_insert(&mm, node, size, 0, n, mode)) {
0602 pr_err("%s insert failed, size %llu step %d\n",
0603 mode->name, size, n);
0604 goto out;
0605 }
0606
0607 if (replace) {
0608 drm_mm_replace_node(&tmp, &nodes[n]);
0609 if (drm_mm_node_allocated(&tmp)) {
0610 pr_err("replaced old-node still allocated! step %d\n",
0611 n);
0612 goto out;
0613 }
0614
0615 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
0616 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
0617 size, n);
0618 goto out;
0619 }
0620
0621 if (tmp.start != nodes[n].start) {
0622 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
0623 tmp.start, size,
0624 nodes[n].start, nodes[n].size);
0625 goto out;
0626 }
0627 }
0628 }
0629
0630
0631 if (!assert_continuous(&mm, size))
0632 goto out;
0633
0634
0635 if (!expect_insert_fail(&mm, size))
0636 goto out;
0637
0638
0639 for (n = 0; n < count; n++) {
0640 u64 addr = nodes[n].start;
0641
0642 drm_mm_remove_node(&nodes[n]);
0643 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
0644 pr_err("%s reinsert failed, size %llu step %d\n",
0645 mode->name, size, n);
0646 goto out;
0647 }
0648
0649 if (nodes[n].start != addr) {
0650 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
0651 mode->name, n, addr, nodes[n].start);
0652 goto out;
0653 }
0654
0655 if (!assert_continuous(&mm, size))
0656 goto out;
0657 }
0658
0659
0660 for_each_prime_number(n, min(max_prime, count)) {
0661 for (m = 0; m < n; m++) {
0662 node = &nodes[order[(o + m) % count]];
0663 drm_mm_remove_node(node);
0664 }
0665
0666 for (m = 0; m < n; m++) {
0667 node = &nodes[order[(o + m) % count]];
0668 if (!expect_insert(&mm, node, size, 0, n, mode)) {
0669 pr_err("%s multiple reinsert failed, size %llu step %d\n",
0670 mode->name, size, n);
0671 goto out;
0672 }
0673 }
0674
0675 o += n;
0676
0677 if (!assert_continuous(&mm, size))
0678 goto out;
0679
0680 if (!expect_insert_fail(&mm, size))
0681 goto out;
0682 }
0683
0684 drm_mm_for_each_node_safe(node, next, &mm)
0685 drm_mm_remove_node(node);
0686 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
0687
0688 cond_resched();
0689 }
0690
0691 ret = 0;
0692 out:
0693 drm_mm_for_each_node_safe(node, next, &mm)
0694 drm_mm_remove_node(node);
0695 drm_mm_takedown(&mm);
0696 kfree(order);
0697 err_nodes:
0698 vfree(nodes);
0699 err:
0700 return ret;
0701 }
0702
0703 static int igt_insert(void *ignored)
0704 {
0705 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
0706 unsigned int n;
0707 int ret;
0708
0709 for_each_prime_number_from(n, 1, 54) {
0710 u64 size = BIT_ULL(n);
0711
0712 ret = __igt_insert(count, size - 1, false);
0713 if (ret)
0714 return ret;
0715
0716 ret = __igt_insert(count, size, false);
0717 if (ret)
0718 return ret;
0719
0720 ret = __igt_insert(count, size + 1, false);
0721 if (ret)
0722 return ret;
0723
0724 cond_resched();
0725 }
0726
0727 return 0;
0728 }
0729
0730 static int igt_replace(void *ignored)
0731 {
0732 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
0733 unsigned int n;
0734 int ret;
0735
0736
0737
0738
0739
0740
0741
0742 for_each_prime_number_from(n, 1, 54) {
0743 u64 size = BIT_ULL(n);
0744
0745 ret = __igt_insert(count, size - 1, true);
0746 if (ret)
0747 return ret;
0748
0749 ret = __igt_insert(count, size, true);
0750 if (ret)
0751 return ret;
0752
0753 ret = __igt_insert(count, size + 1, true);
0754 if (ret)
0755 return ret;
0756
0757 cond_resched();
0758 }
0759
0760 return 0;
0761 }
0762
0763 static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
0764 u64 size, u64 alignment, unsigned long color,
0765 u64 range_start, u64 range_end,
0766 const struct insert_mode *mode)
0767 {
0768 int err;
0769
0770 err = drm_mm_insert_node_in_range(mm, node,
0771 size, alignment, color,
0772 range_start, range_end,
0773 mode->mode);
0774 if (err) {
0775 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
0776 size, alignment, color, mode->name,
0777 range_start, range_end, err);
0778 return false;
0779 }
0780
0781 if (!assert_node(node, mm, size, alignment, color)) {
0782 drm_mm_remove_node(node);
0783 return false;
0784 }
0785
0786 return true;
0787 }
0788
0789 static bool expect_insert_in_range_fail(struct drm_mm *mm,
0790 u64 size,
0791 u64 range_start,
0792 u64 range_end)
0793 {
0794 struct drm_mm_node tmp = {};
0795 int err;
0796
0797 err = drm_mm_insert_node_in_range(mm, &tmp,
0798 size, 0, 0,
0799 range_start, range_end,
0800 0);
0801 if (likely(err == -ENOSPC))
0802 return true;
0803
0804 if (!err) {
0805 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
0806 tmp.start, tmp.size, range_start, range_end);
0807 drm_mm_remove_node(&tmp);
0808 } else {
0809 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
0810 err, -ENOSPC, size, range_start, range_end);
0811 }
0812
0813 return false;
0814 }
0815
0816 static bool assert_contiguous_in_range(struct drm_mm *mm,
0817 u64 size,
0818 u64 start,
0819 u64 end)
0820 {
0821 struct drm_mm_node *node;
0822 unsigned int n;
0823
0824 if (!expect_insert_in_range_fail(mm, size, start, end))
0825 return false;
0826
0827 n = div64_u64(start + size - 1, size);
0828 drm_mm_for_each_node(node, mm) {
0829 if (node->start < start || node->start + node->size > end) {
0830 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
0831 n, node->start, node->start + node->size, start, end);
0832 return false;
0833 }
0834
0835 if (node->start != n * size) {
0836 pr_err("node %d out of order, expected start %llx, found %llx\n",
0837 n, n * size, node->start);
0838 return false;
0839 }
0840
0841 if (node->size != size) {
0842 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
0843 n, size, node->size);
0844 return false;
0845 }
0846
0847 if (drm_mm_hole_follows(node) &&
0848 drm_mm_hole_node_end(node) < end) {
0849 pr_err("node %d is followed by a hole!\n", n);
0850 return false;
0851 }
0852
0853 n++;
0854 }
0855
0856 if (start > 0) {
0857 node = __drm_mm_interval_first(mm, 0, start - 1);
0858 if (drm_mm_node_allocated(node)) {
0859 pr_err("node before start: node=%llx+%llu, start=%llx\n",
0860 node->start, node->size, start);
0861 return false;
0862 }
0863 }
0864
0865 if (end < U64_MAX) {
0866 node = __drm_mm_interval_first(mm, end, U64_MAX);
0867 if (drm_mm_node_allocated(node)) {
0868 pr_err("node after end: node=%llx+%llu, end=%llx\n",
0869 node->start, node->size, end);
0870 return false;
0871 }
0872 }
0873
0874 return true;
0875 }
0876
0877 static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
0878 {
0879 const struct insert_mode *mode;
0880 struct drm_mm mm;
0881 struct drm_mm_node *nodes, *node, *next;
0882 unsigned int n, start_n, end_n;
0883 int ret;
0884
0885 DRM_MM_BUG_ON(!count);
0886 DRM_MM_BUG_ON(!size);
0887 DRM_MM_BUG_ON(end <= start);
0888
0889
0890
0891
0892
0893 ret = -ENOMEM;
0894 nodes = vzalloc(array_size(count, sizeof(*nodes)));
0895 if (!nodes)
0896 goto err;
0897
0898 ret = -EINVAL;
0899 drm_mm_init(&mm, 0, count * size);
0900
0901 start_n = div64_u64(start + size - 1, size);
0902 end_n = div64_u64(end - size, size);
0903
0904 for (mode = insert_modes; mode->name; mode++) {
0905 for (n = start_n; n <= end_n; n++) {
0906 if (!expect_insert_in_range(&mm, &nodes[n],
0907 size, size, n,
0908 start, end, mode)) {
0909 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
0910 mode->name, size, n,
0911 start_n, end_n,
0912 start, end);
0913 goto out;
0914 }
0915 }
0916
0917 if (!assert_contiguous_in_range(&mm, size, start, end)) {
0918 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
0919 mode->name, start, end, size);
0920 goto out;
0921 }
0922
0923
0924 for (n = start_n; n <= end_n; n++) {
0925 u64 addr = nodes[n].start;
0926
0927 drm_mm_remove_node(&nodes[n]);
0928 if (!expect_insert_in_range(&mm, &nodes[n],
0929 size, size, n,
0930 start, end, mode)) {
0931 pr_err("%s reinsert failed, step %d\n", mode->name, n);
0932 goto out;
0933 }
0934
0935 if (nodes[n].start != addr) {
0936 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
0937 mode->name, n, addr, nodes[n].start);
0938 goto out;
0939 }
0940 }
0941
0942 if (!assert_contiguous_in_range(&mm, size, start, end)) {
0943 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
0944 mode->name, start, end, size);
0945 goto out;
0946 }
0947
0948 drm_mm_for_each_node_safe(node, next, &mm)
0949 drm_mm_remove_node(node);
0950 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
0951
0952 cond_resched();
0953 }
0954
0955 ret = 0;
0956 out:
0957 drm_mm_for_each_node_safe(node, next, &mm)
0958 drm_mm_remove_node(node);
0959 drm_mm_takedown(&mm);
0960 vfree(nodes);
0961 err:
0962 return ret;
0963 }
0964
0965 static int insert_outside_range(void)
0966 {
0967 struct drm_mm mm;
0968 const unsigned int start = 1024;
0969 const unsigned int end = 2048;
0970 const unsigned int size = end - start;
0971
0972 drm_mm_init(&mm, start, size);
0973
0974 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
0975 return -EINVAL;
0976
0977 if (!expect_insert_in_range_fail(&mm, size,
0978 start - size/2, start + (size+1)/2))
0979 return -EINVAL;
0980
0981 if (!expect_insert_in_range_fail(&mm, size,
0982 end - (size+1)/2, end + size/2))
0983 return -EINVAL;
0984
0985 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
0986 return -EINVAL;
0987
0988 drm_mm_takedown(&mm);
0989 return 0;
0990 }
0991
0992 static int igt_insert_range(void *ignored)
0993 {
0994 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
0995 unsigned int n;
0996 int ret;
0997
0998
0999 ret = insert_outside_range();
1000 if (ret)
1001 return ret;
1002
1003 for_each_prime_number_from(n, 1, 50) {
1004 const u64 size = BIT_ULL(n);
1005 const u64 max = count * size;
1006
1007 ret = __igt_insert_range(count, size, 0, max);
1008 if (ret)
1009 return ret;
1010
1011 ret = __igt_insert_range(count, size, 1, max);
1012 if (ret)
1013 return ret;
1014
1015 ret = __igt_insert_range(count, size, 0, max - 1);
1016 if (ret)
1017 return ret;
1018
1019 ret = __igt_insert_range(count, size, 0, max/2);
1020 if (ret)
1021 return ret;
1022
1023 ret = __igt_insert_range(count, size, max/2, max);
1024 if (ret)
1025 return ret;
1026
1027 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1028 if (ret)
1029 return ret;
1030
1031 cond_resched();
1032 }
1033
1034 return 0;
1035 }
1036
1037 static int prepare_igt_frag(struct drm_mm *mm,
1038 struct drm_mm_node *nodes,
1039 unsigned int num_insert,
1040 const struct insert_mode *mode)
1041 {
1042 unsigned int size = 4096;
1043 unsigned int i;
1044
1045 for (i = 0; i < num_insert; i++) {
1046 if (!expect_insert(mm, &nodes[i], size, 0, i,
1047 mode) != 0) {
1048 pr_err("%s insert failed\n", mode->name);
1049 return -EINVAL;
1050 }
1051 }
1052
1053
1054 for (i = 0; i < num_insert; i++) {
1055 if (i % 2 == 0)
1056 drm_mm_remove_node(&nodes[i]);
1057 }
1058
1059 return 0;
1060
1061 }
1062
1063 static u64 get_insert_time(struct drm_mm *mm,
1064 unsigned int num_insert,
1065 struct drm_mm_node *nodes,
1066 const struct insert_mode *mode)
1067 {
1068 unsigned int size = 8192;
1069 ktime_t start;
1070 unsigned int i;
1071
1072 start = ktime_get();
1073 for (i = 0; i < num_insert; i++) {
1074 if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
1075 pr_err("%s insert failed\n", mode->name);
1076 return 0;
1077 }
1078 }
1079
1080 return ktime_to_ns(ktime_sub(ktime_get(), start));
1081 }
1082
1083 static int igt_frag(void *ignored)
1084 {
1085 struct drm_mm mm;
1086 const struct insert_mode *mode;
1087 struct drm_mm_node *nodes, *node, *next;
1088 unsigned int insert_size = 10000;
1089 unsigned int scale_factor = 4;
1090 int ret = -EINVAL;
1091
1092
1093
1094
1095
1096
1097
1098 nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
1099 if (!nodes)
1100 return -ENOMEM;
1101
1102
1103
1104
1105
1106 drm_mm_init(&mm, 1, U64_MAX - 2);
1107 for (mode = insert_modes; mode->name; mode++) {
1108 u64 insert_time1, insert_time2;
1109
1110 if (mode->mode != DRM_MM_INSERT_LOW &&
1111 mode->mode != DRM_MM_INSERT_HIGH)
1112 continue;
1113
1114 ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
1115 if (ret)
1116 goto err;
1117
1118 insert_time1 = get_insert_time(&mm, insert_size,
1119 nodes + insert_size, mode);
1120 if (insert_time1 == 0)
1121 goto err;
1122
1123 insert_time2 = get_insert_time(&mm, (insert_size * 2),
1124 nodes + insert_size * 2, mode);
1125 if (insert_time2 == 0)
1126 goto err;
1127
1128 pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
1129 mode->name, insert_size, insert_size * 2,
1130 insert_time1, insert_time2);
1131
1132 if (insert_time2 > (scale_factor * insert_time1)) {
1133 pr_err("%s fragmented insert took %llu nsecs more\n",
1134 mode->name,
1135 insert_time2 - (scale_factor * insert_time1));
1136 goto err;
1137 }
1138
1139 drm_mm_for_each_node_safe(node, next, &mm)
1140 drm_mm_remove_node(node);
1141 }
1142
1143 ret = 0;
1144 err:
1145 drm_mm_for_each_node_safe(node, next, &mm)
1146 drm_mm_remove_node(node);
1147 drm_mm_takedown(&mm);
1148 vfree(nodes);
1149
1150 return ret;
1151 }
1152
1153 static int igt_align(void *ignored)
1154 {
1155 const struct insert_mode *mode;
1156 const unsigned int max_count = min(8192u, max_prime);
1157 struct drm_mm mm;
1158 struct drm_mm_node *nodes, *node, *next;
1159 unsigned int prime;
1160 int ret = -EINVAL;
1161
1162
1163
1164
1165
1166
1167 nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1168 if (!nodes)
1169 goto err;
1170
1171 drm_mm_init(&mm, 1, U64_MAX - 2);
1172
1173 for (mode = insert_modes; mode->name; mode++) {
1174 unsigned int i = 0;
1175
1176 for_each_prime_number_from(prime, 1, max_count) {
1177 u64 size = next_prime_number(prime);
1178
1179 if (!expect_insert(&mm, &nodes[i],
1180 size, prime, i,
1181 mode)) {
1182 pr_err("%s insert failed with alignment=%d",
1183 mode->name, prime);
1184 goto out;
1185 }
1186
1187 i++;
1188 }
1189
1190 drm_mm_for_each_node_safe(node, next, &mm)
1191 drm_mm_remove_node(node);
1192 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1193
1194 cond_resched();
1195 }
1196
1197 ret = 0;
1198 out:
1199 drm_mm_for_each_node_safe(node, next, &mm)
1200 drm_mm_remove_node(node);
1201 drm_mm_takedown(&mm);
1202 vfree(nodes);
1203 err:
1204 return ret;
1205 }
1206
1207 static int igt_align_pot(int max)
1208 {
1209 struct drm_mm mm;
1210 struct drm_mm_node *node, *next;
1211 int bit;
1212 int ret = -EINVAL;
1213
1214
1215
1216 drm_mm_init(&mm, 1, U64_MAX - 2);
1217
1218 for (bit = max - 1; bit; bit--) {
1219 u64 align, size;
1220
1221 node = kzalloc(sizeof(*node), GFP_KERNEL);
1222 if (!node) {
1223 ret = -ENOMEM;
1224 goto out;
1225 }
1226
1227 align = BIT_ULL(bit);
1228 size = BIT_ULL(bit-1) + 1;
1229 if (!expect_insert(&mm, node,
1230 size, align, bit,
1231 &insert_modes[0])) {
1232 pr_err("insert failed with alignment=%llx [%d]",
1233 align, bit);
1234 goto out;
1235 }
1236
1237 cond_resched();
1238 }
1239
1240 ret = 0;
1241 out:
1242 drm_mm_for_each_node_safe(node, next, &mm) {
1243 drm_mm_remove_node(node);
1244 kfree(node);
1245 }
1246 drm_mm_takedown(&mm);
1247 return ret;
1248 }
1249
1250 static int igt_align32(void *ignored)
1251 {
1252 return igt_align_pot(32);
1253 }
1254
1255 static int igt_align64(void *ignored)
1256 {
1257 return igt_align_pot(64);
1258 }
1259
1260 static void show_scan(const struct drm_mm_scan *scan)
1261 {
1262 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1263 scan->hit_start, scan->hit_end,
1264 scan->size, scan->alignment, scan->color);
1265 }
1266
1267 static void show_holes(const struct drm_mm *mm, int count)
1268 {
1269 u64 hole_start, hole_end;
1270 struct drm_mm_node *hole;
1271
1272 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1273 struct drm_mm_node *next = list_next_entry(hole, node_list);
1274 const char *node1 = NULL, *node2 = NULL;
1275
1276 if (drm_mm_node_allocated(hole))
1277 node1 = kasprintf(GFP_KERNEL,
1278 "[%llx + %lld, color=%ld], ",
1279 hole->start, hole->size, hole->color);
1280
1281 if (drm_mm_node_allocated(next))
1282 node2 = kasprintf(GFP_KERNEL,
1283 ", [%llx + %lld, color=%ld]",
1284 next->start, next->size, next->color);
1285
1286 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1287 node1,
1288 hole_start, hole_end, hole_end - hole_start,
1289 node2);
1290
1291 kfree(node2);
1292 kfree(node1);
1293
1294 if (!--count)
1295 break;
1296 }
1297 }
1298
1299 struct evict_node {
1300 struct drm_mm_node node;
1301 struct list_head link;
1302 };
1303
1304 static bool evict_nodes(struct drm_mm_scan *scan,
1305 struct evict_node *nodes,
1306 unsigned int *order,
1307 unsigned int count,
1308 bool use_color,
1309 struct list_head *evict_list)
1310 {
1311 struct evict_node *e, *en;
1312 unsigned int i;
1313
1314 for (i = 0; i < count; i++) {
1315 e = &nodes[order ? order[i] : i];
1316 list_add(&e->link, evict_list);
1317 if (drm_mm_scan_add_block(scan, &e->node))
1318 break;
1319 }
1320 list_for_each_entry_safe(e, en, evict_list, link) {
1321 if (!drm_mm_scan_remove_block(scan, &e->node))
1322 list_del(&e->link);
1323 }
1324 if (list_empty(evict_list)) {
1325 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1326 scan->size, count, scan->alignment, scan->color);
1327 return false;
1328 }
1329
1330 list_for_each_entry(e, evict_list, link)
1331 drm_mm_remove_node(&e->node);
1332
1333 if (use_color) {
1334 struct drm_mm_node *node;
1335
1336 while ((node = drm_mm_scan_color_evict(scan))) {
1337 e = container_of(node, typeof(*e), node);
1338 drm_mm_remove_node(&e->node);
1339 list_add(&e->link, evict_list);
1340 }
1341 } else {
1342 if (drm_mm_scan_color_evict(scan)) {
1343 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1344 return false;
1345 }
1346 }
1347
1348 return true;
1349 }
1350
1351 static bool evict_nothing(struct drm_mm *mm,
1352 unsigned int total_size,
1353 struct evict_node *nodes)
1354 {
1355 struct drm_mm_scan scan;
1356 LIST_HEAD(evict_list);
1357 struct evict_node *e;
1358 struct drm_mm_node *node;
1359 unsigned int n;
1360
1361 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1362 for (n = 0; n < total_size; n++) {
1363 e = &nodes[n];
1364 list_add(&e->link, &evict_list);
1365 drm_mm_scan_add_block(&scan, &e->node);
1366 }
1367 list_for_each_entry(e, &evict_list, link)
1368 drm_mm_scan_remove_block(&scan, &e->node);
1369
1370 for (n = 0; n < total_size; n++) {
1371 e = &nodes[n];
1372
1373 if (!drm_mm_node_allocated(&e->node)) {
1374 pr_err("node[%d] no longer allocated!\n", n);
1375 return false;
1376 }
1377
1378 e->link.next = NULL;
1379 }
1380
1381 drm_mm_for_each_node(node, mm) {
1382 e = container_of(node, typeof(*e), node);
1383 e->link.next = &e->link;
1384 }
1385
1386 for (n = 0; n < total_size; n++) {
1387 e = &nodes[n];
1388
1389 if (!e->link.next) {
1390 pr_err("node[%d] no longer connected!\n", n);
1391 return false;
1392 }
1393 }
1394
1395 return assert_continuous(mm, nodes[0].node.size);
1396 }
1397
1398 static bool evict_everything(struct drm_mm *mm,
1399 unsigned int total_size,
1400 struct evict_node *nodes)
1401 {
1402 struct drm_mm_scan scan;
1403 LIST_HEAD(evict_list);
1404 struct evict_node *e;
1405 unsigned int n;
1406 int err;
1407
1408 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1409 for (n = 0; n < total_size; n++) {
1410 e = &nodes[n];
1411 list_add(&e->link, &evict_list);
1412 if (drm_mm_scan_add_block(&scan, &e->node))
1413 break;
1414 }
1415
1416 err = 0;
1417 list_for_each_entry(e, &evict_list, link) {
1418 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1419 if (!err) {
1420 pr_err("Node %lld not marked for eviction!\n",
1421 e->node.start);
1422 err = -EINVAL;
1423 }
1424 }
1425 }
1426 if (err)
1427 return false;
1428
1429 list_for_each_entry(e, &evict_list, link)
1430 drm_mm_remove_node(&e->node);
1431
1432 if (!assert_one_hole(mm, 0, total_size))
1433 return false;
1434
1435 list_for_each_entry(e, &evict_list, link) {
1436 err = drm_mm_reserve_node(mm, &e->node);
1437 if (err) {
1438 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1439 e->node.start);
1440 return false;
1441 }
1442 }
1443
1444 return assert_continuous(mm, nodes[0].node.size);
1445 }
1446
1447 static int evict_something(struct drm_mm *mm,
1448 u64 range_start, u64 range_end,
1449 struct evict_node *nodes,
1450 unsigned int *order,
1451 unsigned int count,
1452 unsigned int size,
1453 unsigned int alignment,
1454 const struct insert_mode *mode)
1455 {
1456 struct drm_mm_scan scan;
1457 LIST_HEAD(evict_list);
1458 struct evict_node *e;
1459 struct drm_mm_node tmp;
1460 int err;
1461
1462 drm_mm_scan_init_with_range(&scan, mm,
1463 size, alignment, 0,
1464 range_start, range_end,
1465 mode->mode);
1466 if (!evict_nodes(&scan,
1467 nodes, order, count, false,
1468 &evict_list))
1469 return -EINVAL;
1470
1471 memset(&tmp, 0, sizeof(tmp));
1472 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1473 DRM_MM_INSERT_EVICT);
1474 if (err) {
1475 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1476 size, alignment);
1477 show_scan(&scan);
1478 show_holes(mm, 3);
1479 return err;
1480 }
1481
1482 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1483 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1484 tmp.start, tmp.size, range_start, range_end);
1485 err = -EINVAL;
1486 }
1487
1488 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1489 drm_mm_hole_follows(&tmp)) {
1490 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1491 tmp.size, size,
1492 alignment, misalignment(&tmp, alignment),
1493 tmp.start, drm_mm_hole_follows(&tmp));
1494 err = -EINVAL;
1495 }
1496
1497 drm_mm_remove_node(&tmp);
1498 if (err)
1499 return err;
1500
1501 list_for_each_entry(e, &evict_list, link) {
1502 err = drm_mm_reserve_node(mm, &e->node);
1503 if (err) {
1504 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1505 e->node.start);
1506 return err;
1507 }
1508 }
1509
1510 if (!assert_continuous(mm, nodes[0].node.size)) {
1511 pr_err("range is no longer continuous\n");
1512 return -EINVAL;
1513 }
1514
1515 return 0;
1516 }
1517
1518 static int igt_evict(void *ignored)
1519 {
1520 DRM_RND_STATE(prng, random_seed);
1521 const unsigned int size = 8192;
1522 const struct insert_mode *mode;
1523 struct drm_mm mm;
1524 struct evict_node *nodes;
1525 struct drm_mm_node *node, *next;
1526 unsigned int *order, n;
1527 int ret, err;
1528
1529
1530
1531
1532
1533
1534
1535
1536 ret = -ENOMEM;
1537 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1538 if (!nodes)
1539 goto err;
1540
1541 order = drm_random_order(size, &prng);
1542 if (!order)
1543 goto err_nodes;
1544
1545 ret = -EINVAL;
1546 drm_mm_init(&mm, 0, size);
1547 for (n = 0; n < size; n++) {
1548 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1549 if (err) {
1550 pr_err("insert failed, step %d\n", n);
1551 ret = err;
1552 goto out;
1553 }
1554 }
1555
1556
1557 if (!evict_nothing(&mm, size, nodes)) {
1558 pr_err("evict_nothing() failed\n");
1559 goto out;
1560 }
1561 if (!evict_everything(&mm, size, nodes)) {
1562 pr_err("evict_everything() failed\n");
1563 goto out;
1564 }
1565
1566 for (mode = evict_modes; mode->name; mode++) {
1567 for (n = 1; n <= size; n <<= 1) {
1568 drm_random_reorder(order, size, &prng);
1569 err = evict_something(&mm, 0, U64_MAX,
1570 nodes, order, size,
1571 n, 1,
1572 mode);
1573 if (err) {
1574 pr_err("%s evict_something(size=%u) failed\n",
1575 mode->name, n);
1576 ret = err;
1577 goto out;
1578 }
1579 }
1580
1581 for (n = 1; n < size; n <<= 1) {
1582 drm_random_reorder(order, size, &prng);
1583 err = evict_something(&mm, 0, U64_MAX,
1584 nodes, order, size,
1585 size/2, n,
1586 mode);
1587 if (err) {
1588 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1589 mode->name, size/2, n);
1590 ret = err;
1591 goto out;
1592 }
1593 }
1594
1595 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1596 unsigned int nsize = (size - n + 1) / 2;
1597
1598 DRM_MM_BUG_ON(!nsize);
1599
1600 drm_random_reorder(order, size, &prng);
1601 err = evict_something(&mm, 0, U64_MAX,
1602 nodes, order, size,
1603 nsize, n,
1604 mode);
1605 if (err) {
1606 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1607 mode->name, nsize, n);
1608 ret = err;
1609 goto out;
1610 }
1611 }
1612
1613 cond_resched();
1614 }
1615
1616 ret = 0;
1617 out:
1618 drm_mm_for_each_node_safe(node, next, &mm)
1619 drm_mm_remove_node(node);
1620 drm_mm_takedown(&mm);
1621 kfree(order);
1622 err_nodes:
1623 vfree(nodes);
1624 err:
1625 return ret;
1626 }
1627
1628 static int igt_evict_range(void *ignored)
1629 {
1630 DRM_RND_STATE(prng, random_seed);
1631 const unsigned int size = 8192;
1632 const unsigned int range_size = size / 2;
1633 const unsigned int range_start = size / 4;
1634 const unsigned int range_end = range_start + range_size;
1635 const struct insert_mode *mode;
1636 struct drm_mm mm;
1637 struct evict_node *nodes;
1638 struct drm_mm_node *node, *next;
1639 unsigned int *order, n;
1640 int ret, err;
1641
1642
1643
1644
1645
1646 ret = -ENOMEM;
1647 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1648 if (!nodes)
1649 goto err;
1650
1651 order = drm_random_order(size, &prng);
1652 if (!order)
1653 goto err_nodes;
1654
1655 ret = -EINVAL;
1656 drm_mm_init(&mm, 0, size);
1657 for (n = 0; n < size; n++) {
1658 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1659 if (err) {
1660 pr_err("insert failed, step %d\n", n);
1661 ret = err;
1662 goto out;
1663 }
1664 }
1665
1666 for (mode = evict_modes; mode->name; mode++) {
1667 for (n = 1; n <= range_size; n <<= 1) {
1668 drm_random_reorder(order, size, &prng);
1669 err = evict_something(&mm, range_start, range_end,
1670 nodes, order, size,
1671 n, 1,
1672 mode);
1673 if (err) {
1674 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1675 mode->name, n, range_start, range_end);
1676 goto out;
1677 }
1678 }
1679
1680 for (n = 1; n <= range_size; n <<= 1) {
1681 drm_random_reorder(order, size, &prng);
1682 err = evict_something(&mm, range_start, range_end,
1683 nodes, order, size,
1684 range_size/2, n,
1685 mode);
1686 if (err) {
1687 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1688 mode->name, range_size/2, n, range_start, range_end);
1689 goto out;
1690 }
1691 }
1692
1693 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1694 unsigned int nsize = (range_size - n + 1) / 2;
1695
1696 DRM_MM_BUG_ON(!nsize);
1697
1698 drm_random_reorder(order, size, &prng);
1699 err = evict_something(&mm, range_start, range_end,
1700 nodes, order, size,
1701 nsize, n,
1702 mode);
1703 if (err) {
1704 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1705 mode->name, nsize, n, range_start, range_end);
1706 goto out;
1707 }
1708 }
1709
1710 cond_resched();
1711 }
1712
1713 ret = 0;
1714 out:
1715 drm_mm_for_each_node_safe(node, next, &mm)
1716 drm_mm_remove_node(node);
1717 drm_mm_takedown(&mm);
1718 kfree(order);
1719 err_nodes:
1720 vfree(nodes);
1721 err:
1722 return ret;
1723 }
1724
1725 static unsigned int node_index(const struct drm_mm_node *node)
1726 {
1727 return div64_u64(node->start, node->size);
1728 }
1729
1730 static int igt_topdown(void *ignored)
1731 {
1732 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1733 DRM_RND_STATE(prng, random_seed);
1734 const unsigned int count = 8192;
1735 unsigned int size;
1736 unsigned long *bitmap;
1737 struct drm_mm mm;
1738 struct drm_mm_node *nodes, *node, *next;
1739 unsigned int *order, n, m, o = 0;
1740 int ret;
1741
1742
1743
1744
1745
1746
1747 ret = -ENOMEM;
1748 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1749 if (!nodes)
1750 goto err;
1751
1752 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1753 if (!bitmap)
1754 goto err_nodes;
1755
1756 order = drm_random_order(count, &prng);
1757 if (!order)
1758 goto err_bitmap;
1759
1760 ret = -EINVAL;
1761 for (size = 1; size <= 64; size <<= 1) {
1762 drm_mm_init(&mm, 0, size*count);
1763 for (n = 0; n < count; n++) {
1764 if (!expect_insert(&mm, &nodes[n],
1765 size, 0, n,
1766 topdown)) {
1767 pr_err("insert failed, size %u step %d\n", size, n);
1768 goto out;
1769 }
1770
1771 if (drm_mm_hole_follows(&nodes[n])) {
1772 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1773 n, nodes[n].start, size);
1774 goto out;
1775 }
1776
1777 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1778 goto out;
1779 }
1780
1781 if (!assert_continuous(&mm, size))
1782 goto out;
1783
1784 drm_random_reorder(order, count, &prng);
1785 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1786 for (m = 0; m < n; m++) {
1787 node = &nodes[order[(o + m) % count]];
1788 drm_mm_remove_node(node);
1789 __set_bit(node_index(node), bitmap);
1790 }
1791
1792 for (m = 0; m < n; m++) {
1793 unsigned int last;
1794
1795 node = &nodes[order[(o + m) % count]];
1796 if (!expect_insert(&mm, node,
1797 size, 0, 0,
1798 topdown)) {
1799 pr_err("insert failed, step %d/%d\n", m, n);
1800 goto out;
1801 }
1802
1803 if (drm_mm_hole_follows(node)) {
1804 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1805 m, n, node->start);
1806 goto out;
1807 }
1808
1809 last = find_last_bit(bitmap, count);
1810 if (node_index(node) != last) {
1811 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1812 m, n, size, last, node_index(node));
1813 goto out;
1814 }
1815
1816 __clear_bit(last, bitmap);
1817 }
1818
1819 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1820
1821 o += n;
1822 }
1823
1824 drm_mm_for_each_node_safe(node, next, &mm)
1825 drm_mm_remove_node(node);
1826 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1827 cond_resched();
1828 }
1829
1830 ret = 0;
1831 out:
1832 drm_mm_for_each_node_safe(node, next, &mm)
1833 drm_mm_remove_node(node);
1834 drm_mm_takedown(&mm);
1835 kfree(order);
1836 err_bitmap:
1837 bitmap_free(bitmap);
1838 err_nodes:
1839 vfree(nodes);
1840 err:
1841 return ret;
1842 }
1843
1844 static int igt_bottomup(void *ignored)
1845 {
1846 const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1847 DRM_RND_STATE(prng, random_seed);
1848 const unsigned int count = 8192;
1849 unsigned int size;
1850 unsigned long *bitmap;
1851 struct drm_mm mm;
1852 struct drm_mm_node *nodes, *node, *next;
1853 unsigned int *order, n, m, o = 0;
1854 int ret;
1855
1856
1857
1858
1859
1860 ret = -ENOMEM;
1861 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1862 if (!nodes)
1863 goto err;
1864
1865 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1866 if (!bitmap)
1867 goto err_nodes;
1868
1869 order = drm_random_order(count, &prng);
1870 if (!order)
1871 goto err_bitmap;
1872
1873 ret = -EINVAL;
1874 for (size = 1; size <= 64; size <<= 1) {
1875 drm_mm_init(&mm, 0, size*count);
1876 for (n = 0; n < count; n++) {
1877 if (!expect_insert(&mm, &nodes[n],
1878 size, 0, n,
1879 bottomup)) {
1880 pr_err("bottomup insert failed, size %u step %d\n", size, n);
1881 goto out;
1882 }
1883
1884 if (!assert_one_hole(&mm, size*(n + 1), size*count))
1885 goto out;
1886 }
1887
1888 if (!assert_continuous(&mm, size))
1889 goto out;
1890
1891 drm_random_reorder(order, count, &prng);
1892 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1893 for (m = 0; m < n; m++) {
1894 node = &nodes[order[(o + m) % count]];
1895 drm_mm_remove_node(node);
1896 __set_bit(node_index(node), bitmap);
1897 }
1898
1899 for (m = 0; m < n; m++) {
1900 unsigned int first;
1901
1902 node = &nodes[order[(o + m) % count]];
1903 if (!expect_insert(&mm, node,
1904 size, 0, 0,
1905 bottomup)) {
1906 pr_err("insert failed, step %d/%d\n", m, n);
1907 goto out;
1908 }
1909
1910 first = find_first_bit(bitmap, count);
1911 if (node_index(node) != first) {
1912 pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1913 m, n, first, node_index(node));
1914 goto out;
1915 }
1916 __clear_bit(first, bitmap);
1917 }
1918
1919 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1920
1921 o += n;
1922 }
1923
1924 drm_mm_for_each_node_safe(node, next, &mm)
1925 drm_mm_remove_node(node);
1926 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1927 cond_resched();
1928 }
1929
1930 ret = 0;
1931 out:
1932 drm_mm_for_each_node_safe(node, next, &mm)
1933 drm_mm_remove_node(node);
1934 drm_mm_takedown(&mm);
1935 kfree(order);
1936 err_bitmap:
1937 bitmap_free(bitmap);
1938 err_nodes:
1939 vfree(nodes);
1940 err:
1941 return ret;
1942 }
1943
1944 static int __igt_once(unsigned int mode)
1945 {
1946 struct drm_mm mm;
1947 struct drm_mm_node rsvd_lo, rsvd_hi, node;
1948 int err;
1949
1950 drm_mm_init(&mm, 0, 7);
1951
1952 memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1953 rsvd_lo.start = 1;
1954 rsvd_lo.size = 1;
1955 err = drm_mm_reserve_node(&mm, &rsvd_lo);
1956 if (err) {
1957 pr_err("Could not reserve low node\n");
1958 goto err;
1959 }
1960
1961 memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1962 rsvd_hi.start = 5;
1963 rsvd_hi.size = 1;
1964 err = drm_mm_reserve_node(&mm, &rsvd_hi);
1965 if (err) {
1966 pr_err("Could not reserve low node\n");
1967 goto err_lo;
1968 }
1969
1970 if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1971 pr_err("Expected a hole after lo and high nodes!\n");
1972 err = -EINVAL;
1973 goto err_hi;
1974 }
1975
1976 memset(&node, 0, sizeof(node));
1977 err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1978 if (err) {
1979 pr_err("Could not insert the node into the available hole!\n");
1980 err = -EINVAL;
1981 goto err_hi;
1982 }
1983
1984 drm_mm_remove_node(&node);
1985 err_hi:
1986 drm_mm_remove_node(&rsvd_hi);
1987 err_lo:
1988 drm_mm_remove_node(&rsvd_lo);
1989 err:
1990 drm_mm_takedown(&mm);
1991 return err;
1992 }
1993
1994 static int igt_lowest(void *ignored)
1995 {
1996 return __igt_once(DRM_MM_INSERT_LOW);
1997 }
1998
1999 static int igt_highest(void *ignored)
2000 {
2001 return __igt_once(DRM_MM_INSERT_HIGH);
2002 }
2003
2004 static void separate_adjacent_colors(const struct drm_mm_node *node,
2005 unsigned long color,
2006 u64 *start,
2007 u64 *end)
2008 {
2009 if (drm_mm_node_allocated(node) && node->color != color)
2010 ++*start;
2011
2012 node = list_next_entry(node, node_list);
2013 if (drm_mm_node_allocated(node) && node->color != color)
2014 --*end;
2015 }
2016
2017 static bool colors_abutt(const struct drm_mm_node *node)
2018 {
2019 if (!drm_mm_hole_follows(node) &&
2020 drm_mm_node_allocated(list_next_entry(node, node_list))) {
2021 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
2022 node->color, node->start, node->size,
2023 list_next_entry(node, node_list)->color,
2024 list_next_entry(node, node_list)->start,
2025 list_next_entry(node, node_list)->size);
2026 return true;
2027 }
2028
2029 return false;
2030 }
2031
2032 static int igt_color(void *ignored)
2033 {
2034 const unsigned int count = min(4096u, max_iterations);
2035 const struct insert_mode *mode;
2036 struct drm_mm mm;
2037 struct drm_mm_node *node, *nn;
2038 unsigned int n;
2039 int ret = -EINVAL, err;
2040
2041
2042
2043
2044
2045
2046
2047
2048 drm_mm_init(&mm, 0, U64_MAX);
2049
2050 for (n = 1; n <= count; n++) {
2051 node = kzalloc(sizeof(*node), GFP_KERNEL);
2052 if (!node) {
2053 ret = -ENOMEM;
2054 goto out;
2055 }
2056
2057 if (!expect_insert(&mm, node,
2058 n, 0, n,
2059 &insert_modes[0])) {
2060 pr_err("insert failed, step %d\n", n);
2061 kfree(node);
2062 goto out;
2063 }
2064 }
2065
2066 drm_mm_for_each_node_safe(node, nn, &mm) {
2067 if (node->color != node->size) {
2068 pr_err("invalid color stored: expected %lld, found %ld\n",
2069 node->size, node->color);
2070
2071 goto out;
2072 }
2073
2074 drm_mm_remove_node(node);
2075 kfree(node);
2076 }
2077
2078
2079 mm.color_adjust = separate_adjacent_colors;
2080 for (mode = insert_modes; mode->name; mode++) {
2081 u64 last;
2082
2083 node = kzalloc(sizeof(*node), GFP_KERNEL);
2084 if (!node) {
2085 ret = -ENOMEM;
2086 goto out;
2087 }
2088
2089 node->size = 1 + 2*count;
2090 node->color = node->size;
2091
2092 err = drm_mm_reserve_node(&mm, node);
2093 if (err) {
2094 pr_err("initial reserve failed!\n");
2095 ret = err;
2096 goto out;
2097 }
2098
2099 last = node->start + node->size;
2100
2101 for (n = 1; n <= count; n++) {
2102 int rem;
2103
2104 node = kzalloc(sizeof(*node), GFP_KERNEL);
2105 if (!node) {
2106 ret = -ENOMEM;
2107 goto out;
2108 }
2109
2110 node->start = last;
2111 node->size = n + count;
2112 node->color = node->size;
2113
2114 err = drm_mm_reserve_node(&mm, node);
2115 if (err != -ENOSPC) {
2116 pr_err("reserve %d did not report color overlap! err=%d\n",
2117 n, err);
2118 goto out;
2119 }
2120
2121 node->start += n + 1;
2122 rem = misalignment(node, n + count);
2123 node->start += n + count - rem;
2124
2125 err = drm_mm_reserve_node(&mm, node);
2126 if (err) {
2127 pr_err("reserve %d failed, err=%d\n", n, err);
2128 ret = err;
2129 goto out;
2130 }
2131
2132 last = node->start + node->size;
2133 }
2134
2135 for (n = 1; n <= count; n++) {
2136 node = kzalloc(sizeof(*node), GFP_KERNEL);
2137 if (!node) {
2138 ret = -ENOMEM;
2139 goto out;
2140 }
2141
2142 if (!expect_insert(&mm, node,
2143 n, n, n,
2144 mode)) {
2145 pr_err("%s insert failed, step %d\n",
2146 mode->name, n);
2147 kfree(node);
2148 goto out;
2149 }
2150 }
2151
2152 drm_mm_for_each_node_safe(node, nn, &mm) {
2153 u64 rem;
2154
2155 if (node->color != node->size) {
2156 pr_err("%s invalid color stored: expected %lld, found %ld\n",
2157 mode->name, node->size, node->color);
2158
2159 goto out;
2160 }
2161
2162 if (colors_abutt(node))
2163 goto out;
2164
2165 div64_u64_rem(node->start, node->size, &rem);
2166 if (rem) {
2167 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2168 mode->name, node->start, node->size, rem);
2169 goto out;
2170 }
2171
2172 drm_mm_remove_node(node);
2173 kfree(node);
2174 }
2175
2176 cond_resched();
2177 }
2178
2179 ret = 0;
2180 out:
2181 drm_mm_for_each_node_safe(node, nn, &mm) {
2182 drm_mm_remove_node(node);
2183 kfree(node);
2184 }
2185 drm_mm_takedown(&mm);
2186 return ret;
2187 }
2188
2189 static int evict_color(struct drm_mm *mm,
2190 u64 range_start, u64 range_end,
2191 struct evict_node *nodes,
2192 unsigned int *order,
2193 unsigned int count,
2194 unsigned int size,
2195 unsigned int alignment,
2196 unsigned long color,
2197 const struct insert_mode *mode)
2198 {
2199 struct drm_mm_scan scan;
2200 LIST_HEAD(evict_list);
2201 struct evict_node *e;
2202 struct drm_mm_node tmp;
2203 int err;
2204
2205 drm_mm_scan_init_with_range(&scan, mm,
2206 size, alignment, color,
2207 range_start, range_end,
2208 mode->mode);
2209 if (!evict_nodes(&scan,
2210 nodes, order, count, true,
2211 &evict_list))
2212 return -EINVAL;
2213
2214 memset(&tmp, 0, sizeof(tmp));
2215 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2216 DRM_MM_INSERT_EVICT);
2217 if (err) {
2218 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2219 size, alignment, color, err);
2220 show_scan(&scan);
2221 show_holes(mm, 3);
2222 return err;
2223 }
2224
2225 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2226 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2227 tmp.start, tmp.size, range_start, range_end);
2228 err = -EINVAL;
2229 }
2230
2231 if (colors_abutt(&tmp))
2232 err = -EINVAL;
2233
2234 if (!assert_node(&tmp, mm, size, alignment, color)) {
2235 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2236 tmp.size, size,
2237 alignment, misalignment(&tmp, alignment), tmp.start);
2238 err = -EINVAL;
2239 }
2240
2241 drm_mm_remove_node(&tmp);
2242 if (err)
2243 return err;
2244
2245 list_for_each_entry(e, &evict_list, link) {
2246 err = drm_mm_reserve_node(mm, &e->node);
2247 if (err) {
2248 pr_err("Failed to reinsert node after eviction: start=%llx\n",
2249 e->node.start);
2250 return err;
2251 }
2252 }
2253
2254 cond_resched();
2255 return 0;
2256 }
2257
2258 static int igt_color_evict(void *ignored)
2259 {
2260 DRM_RND_STATE(prng, random_seed);
2261 const unsigned int total_size = min(8192u, max_iterations);
2262 const struct insert_mode *mode;
2263 unsigned long color = 0;
2264 struct drm_mm mm;
2265 struct evict_node *nodes;
2266 struct drm_mm_node *node, *next;
2267 unsigned int *order, n;
2268 int ret, err;
2269
2270
2271
2272
2273
2274
2275
2276 ret = -ENOMEM;
2277 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2278 if (!nodes)
2279 goto err;
2280
2281 order = drm_random_order(total_size, &prng);
2282 if (!order)
2283 goto err_nodes;
2284
2285 ret = -EINVAL;
2286 drm_mm_init(&mm, 0, 2*total_size - 1);
2287 mm.color_adjust = separate_adjacent_colors;
2288 for (n = 0; n < total_size; n++) {
2289 if (!expect_insert(&mm, &nodes[n].node,
2290 1, 0, color++,
2291 &insert_modes[0])) {
2292 pr_err("insert failed, step %d\n", n);
2293 goto out;
2294 }
2295 }
2296
2297 for (mode = evict_modes; mode->name; mode++) {
2298 for (n = 1; n <= total_size; n <<= 1) {
2299 drm_random_reorder(order, total_size, &prng);
2300 err = evict_color(&mm, 0, U64_MAX,
2301 nodes, order, total_size,
2302 n, 1, color++,
2303 mode);
2304 if (err) {
2305 pr_err("%s evict_color(size=%u) failed\n",
2306 mode->name, n);
2307 goto out;
2308 }
2309 }
2310
2311 for (n = 1; n < total_size; n <<= 1) {
2312 drm_random_reorder(order, total_size, &prng);
2313 err = evict_color(&mm, 0, U64_MAX,
2314 nodes, order, total_size,
2315 total_size/2, n, color++,
2316 mode);
2317 if (err) {
2318 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2319 mode->name, total_size/2, n);
2320 goto out;
2321 }
2322 }
2323
2324 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2325 unsigned int nsize = (total_size - n + 1) / 2;
2326
2327 DRM_MM_BUG_ON(!nsize);
2328
2329 drm_random_reorder(order, total_size, &prng);
2330 err = evict_color(&mm, 0, U64_MAX,
2331 nodes, order, total_size,
2332 nsize, n, color++,
2333 mode);
2334 if (err) {
2335 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2336 mode->name, nsize, n);
2337 goto out;
2338 }
2339 }
2340
2341 cond_resched();
2342 }
2343
2344 ret = 0;
2345 out:
2346 if (ret)
2347 show_mm(&mm);
2348 drm_mm_for_each_node_safe(node, next, &mm)
2349 drm_mm_remove_node(node);
2350 drm_mm_takedown(&mm);
2351 kfree(order);
2352 err_nodes:
2353 vfree(nodes);
2354 err:
2355 return ret;
2356 }
2357
2358 static int igt_color_evict_range(void *ignored)
2359 {
2360 DRM_RND_STATE(prng, random_seed);
2361 const unsigned int total_size = 8192;
2362 const unsigned int range_size = total_size / 2;
2363 const unsigned int range_start = total_size / 4;
2364 const unsigned int range_end = range_start + range_size;
2365 const struct insert_mode *mode;
2366 unsigned long color = 0;
2367 struct drm_mm mm;
2368 struct evict_node *nodes;
2369 struct drm_mm_node *node, *next;
2370 unsigned int *order, n;
2371 int ret, err;
2372
2373
2374
2375
2376
2377 ret = -ENOMEM;
2378 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2379 if (!nodes)
2380 goto err;
2381
2382 order = drm_random_order(total_size, &prng);
2383 if (!order)
2384 goto err_nodes;
2385
2386 ret = -EINVAL;
2387 drm_mm_init(&mm, 0, 2*total_size - 1);
2388 mm.color_adjust = separate_adjacent_colors;
2389 for (n = 0; n < total_size; n++) {
2390 if (!expect_insert(&mm, &nodes[n].node,
2391 1, 0, color++,
2392 &insert_modes[0])) {
2393 pr_err("insert failed, step %d\n", n);
2394 goto out;
2395 }
2396 }
2397
2398 for (mode = evict_modes; mode->name; mode++) {
2399 for (n = 1; n <= range_size; n <<= 1) {
2400 drm_random_reorder(order, range_size, &prng);
2401 err = evict_color(&mm, range_start, range_end,
2402 nodes, order, total_size,
2403 n, 1, color++,
2404 mode);
2405 if (err) {
2406 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2407 mode->name, n, range_start, range_end);
2408 goto out;
2409 }
2410 }
2411
2412 for (n = 1; n < range_size; n <<= 1) {
2413 drm_random_reorder(order, total_size, &prng);
2414 err = evict_color(&mm, range_start, range_end,
2415 nodes, order, total_size,
2416 range_size/2, n, color++,
2417 mode);
2418 if (err) {
2419 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2420 mode->name, total_size/2, n, range_start, range_end);
2421 goto out;
2422 }
2423 }
2424
2425 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2426 unsigned int nsize = (range_size - n + 1) / 2;
2427
2428 DRM_MM_BUG_ON(!nsize);
2429
2430 drm_random_reorder(order, total_size, &prng);
2431 err = evict_color(&mm, range_start, range_end,
2432 nodes, order, total_size,
2433 nsize, n, color++,
2434 mode);
2435 if (err) {
2436 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2437 mode->name, nsize, n, range_start, range_end);
2438 goto out;
2439 }
2440 }
2441
2442 cond_resched();
2443 }
2444
2445 ret = 0;
2446 out:
2447 if (ret)
2448 show_mm(&mm);
2449 drm_mm_for_each_node_safe(node, next, &mm)
2450 drm_mm_remove_node(node);
2451 drm_mm_takedown(&mm);
2452 kfree(order);
2453 err_nodes:
2454 vfree(nodes);
2455 err:
2456 return ret;
2457 }
2458
2459 #include "drm_selftest.c"
2460
2461 static int __init test_drm_mm_init(void)
2462 {
2463 int err;
2464
2465 while (!random_seed)
2466 random_seed = get_random_int();
2467
2468 pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2469 random_seed, max_iterations, max_prime);
2470 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2471
2472 return err > 0 ? 0 : err;
2473 }
2474
2475 static void __exit test_drm_mm_exit(void)
2476 {
2477 }
2478
2479 module_init(test_drm_mm_init);
2480 module_exit(test_drm_mm_exit);
2481
2482 module_param(random_seed, uint, 0400);
2483 module_param(max_iterations, uint, 0400);
2484 module_param(max_prime, uint, 0400);
2485
2486 MODULE_AUTHOR("Intel Corporation");
2487 MODULE_LICENSE("GPL");