0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #include <linux/export.h>
0046 #include <linux/interval_tree_generic.h>
0047 #include <linux/seq_file.h>
0048 #include <linux/slab.h>
0049 #include <linux/stacktrace.h>
0050
0051 #include <drm/drm_mm.h>
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 #ifdef CONFIG_DRM_DEBUG_MM
0102 #include <linux/stackdepot.h>
0103
0104 #define STACKDEPTH 32
0105 #define BUFSZ 4096
0106
0107 static noinline void save_stack(struct drm_mm_node *node)
0108 {
0109 unsigned long entries[STACKDEPTH];
0110 unsigned int n;
0111
0112 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
0113
0114
0115 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
0116 }
0117
0118 static void show_leaks(struct drm_mm *mm)
0119 {
0120 struct drm_mm_node *node;
0121 char *buf;
0122
0123 buf = kmalloc(BUFSZ, GFP_KERNEL);
0124 if (!buf)
0125 return;
0126
0127 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
0128 if (!node->stack) {
0129 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
0130 node->start, node->size);
0131 continue;
0132 }
0133
0134 stack_depot_snprint(node->stack, buf, BUFSZ, 0);
0135 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
0136 node->start, node->size, buf);
0137 }
0138
0139 kfree(buf);
0140 }
0141
0142 #undef STACKDEPTH
0143 #undef BUFSZ
0144 #else
0145 static void save_stack(struct drm_mm_node *node) { }
0146 static void show_leaks(struct drm_mm *mm) { }
0147 #endif
0148
0149 #define START(node) ((node)->start)
0150 #define LAST(node) ((node)->start + (node)->size - 1)
0151
0152 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
0153 u64, __subtree_last,
0154 START, LAST, static inline, drm_mm_interval_tree)
0155
0156 struct drm_mm_node *
0157 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
0158 {
0159 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
0160 start, last) ?: (struct drm_mm_node *)&mm->head_node;
0161 }
0162 EXPORT_SYMBOL(__drm_mm_interval_first);
0163
0164 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
0165 struct drm_mm_node *node)
0166 {
0167 struct drm_mm *mm = hole_node->mm;
0168 struct rb_node **link, *rb;
0169 struct drm_mm_node *parent;
0170 bool leftmost;
0171
0172 node->__subtree_last = LAST(node);
0173
0174 if (drm_mm_node_allocated(hole_node)) {
0175 rb = &hole_node->rb;
0176 while (rb) {
0177 parent = rb_entry(rb, struct drm_mm_node, rb);
0178 if (parent->__subtree_last >= node->__subtree_last)
0179 break;
0180
0181 parent->__subtree_last = node->__subtree_last;
0182 rb = rb_parent(rb);
0183 }
0184
0185 rb = &hole_node->rb;
0186 link = &hole_node->rb.rb_right;
0187 leftmost = false;
0188 } else {
0189 rb = NULL;
0190 link = &mm->interval_tree.rb_root.rb_node;
0191 leftmost = true;
0192 }
0193
0194 while (*link) {
0195 rb = *link;
0196 parent = rb_entry(rb, struct drm_mm_node, rb);
0197 if (parent->__subtree_last < node->__subtree_last)
0198 parent->__subtree_last = node->__subtree_last;
0199 if (node->start < parent->start) {
0200 link = &parent->rb.rb_left;
0201 } else {
0202 link = &parent->rb.rb_right;
0203 leftmost = false;
0204 }
0205 }
0206
0207 rb_link_node(&node->rb, rb, link);
0208 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
0209 &drm_mm_interval_tree_augment);
0210 }
0211
0212 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
0213 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
0214
0215 static u64 rb_to_hole_size(struct rb_node *rb)
0216 {
0217 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
0218 }
0219
0220 static void insert_hole_size(struct rb_root_cached *root,
0221 struct drm_mm_node *node)
0222 {
0223 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
0224 u64 x = node->hole_size;
0225 bool first = true;
0226
0227 while (*link) {
0228 rb = *link;
0229 if (x > rb_to_hole_size(rb)) {
0230 link = &rb->rb_left;
0231 } else {
0232 link = &rb->rb_right;
0233 first = false;
0234 }
0235 }
0236
0237 rb_link_node(&node->rb_hole_size, rb, link);
0238 rb_insert_color_cached(&node->rb_hole_size, root, first);
0239 }
0240
0241 RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
0242 struct drm_mm_node, rb_hole_addr,
0243 u64, subtree_max_hole, HOLE_SIZE)
0244
0245 static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
0246 {
0247 struct rb_node **link = &root->rb_node, *rb_parent = NULL;
0248 u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
0249 struct drm_mm_node *parent;
0250
0251 while (*link) {
0252 rb_parent = *link;
0253 parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
0254 if (parent->subtree_max_hole < subtree_max_hole)
0255 parent->subtree_max_hole = subtree_max_hole;
0256 if (start < HOLE_ADDR(parent))
0257 link = &parent->rb_hole_addr.rb_left;
0258 else
0259 link = &parent->rb_hole_addr.rb_right;
0260 }
0261
0262 rb_link_node(&node->rb_hole_addr, rb_parent, link);
0263 rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
0264 }
0265
0266 static void add_hole(struct drm_mm_node *node)
0267 {
0268 struct drm_mm *mm = node->mm;
0269
0270 node->hole_size =
0271 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
0272 node->subtree_max_hole = node->hole_size;
0273 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
0274
0275 insert_hole_size(&mm->holes_size, node);
0276 insert_hole_addr(&mm->holes_addr, node);
0277
0278 list_add(&node->hole_stack, &mm->hole_stack);
0279 }
0280
0281 static void rm_hole(struct drm_mm_node *node)
0282 {
0283 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
0284
0285 list_del(&node->hole_stack);
0286 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
0287 rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
0288 &augment_callbacks);
0289 node->hole_size = 0;
0290 node->subtree_max_hole = 0;
0291
0292 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
0293 }
0294
0295 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
0296 {
0297 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
0298 }
0299
0300 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
0301 {
0302 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
0303 }
0304
0305 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
0306 {
0307 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
0308 struct drm_mm_node *best = NULL;
0309
0310 do {
0311 struct drm_mm_node *node =
0312 rb_entry(rb, struct drm_mm_node, rb_hole_size);
0313
0314 if (size <= node->hole_size) {
0315 best = node;
0316 rb = rb->rb_right;
0317 } else {
0318 rb = rb->rb_left;
0319 }
0320 } while (rb);
0321
0322 return best;
0323 }
0324
0325 static bool usable_hole_addr(struct rb_node *rb, u64 size)
0326 {
0327 return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
0328 }
0329
0330 static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
0331 {
0332 struct rb_node *rb = mm->holes_addr.rb_node;
0333 struct drm_mm_node *node = NULL;
0334
0335 while (rb) {
0336 u64 hole_start;
0337
0338 if (!usable_hole_addr(rb, size))
0339 break;
0340
0341 node = rb_hole_addr_to_node(rb);
0342 hole_start = __drm_mm_hole_node_start(node);
0343
0344 if (addr < hole_start)
0345 rb = node->rb_hole_addr.rb_left;
0346 else if (addr > hole_start + node->hole_size)
0347 rb = node->rb_hole_addr.rb_right;
0348 else
0349 break;
0350 }
0351
0352 return node;
0353 }
0354
0355 static struct drm_mm_node *
0356 first_hole(struct drm_mm *mm,
0357 u64 start, u64 end, u64 size,
0358 enum drm_mm_insert_mode mode)
0359 {
0360 switch (mode) {
0361 default:
0362 case DRM_MM_INSERT_BEST:
0363 return best_hole(mm, size);
0364
0365 case DRM_MM_INSERT_LOW:
0366 return find_hole_addr(mm, start, size);
0367
0368 case DRM_MM_INSERT_HIGH:
0369 return find_hole_addr(mm, end, size);
0370
0371 case DRM_MM_INSERT_EVICT:
0372 return list_first_entry_or_null(&mm->hole_stack,
0373 struct drm_mm_node,
0374 hole_stack);
0375 }
0376 }
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
0390 static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
0391 { \
0392 struct rb_node *parent, *node = &entry->rb_hole_addr; \
0393 \
0394 if (!entry || RB_EMPTY_NODE(node)) \
0395 return NULL; \
0396 \
0397 if (usable_hole_addr(node->first, size)) { \
0398 node = node->first; \
0399 while (usable_hole_addr(node->last, size)) \
0400 node = node->last; \
0401 return rb_hole_addr_to_node(node); \
0402 } \
0403 \
0404 while ((parent = rb_parent(node)) && node == parent->first) \
0405 node = parent; \
0406 \
0407 return rb_hole_addr_to_node(parent); \
0408 }
0409
0410 DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
0411 DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
0412
0413 static struct drm_mm_node *
0414 next_hole(struct drm_mm *mm,
0415 struct drm_mm_node *node,
0416 u64 size,
0417 enum drm_mm_insert_mode mode)
0418 {
0419 switch (mode) {
0420 default:
0421 case DRM_MM_INSERT_BEST:
0422 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
0423
0424 case DRM_MM_INSERT_LOW:
0425 return next_hole_low_addr(node, size);
0426
0427 case DRM_MM_INSERT_HIGH:
0428 return next_hole_high_addr(node, size);
0429
0430 case DRM_MM_INSERT_EVICT:
0431 node = list_next_entry(node, hole_stack);
0432 return &node->hole_stack == &mm->hole_stack ? NULL : node;
0433 }
0434 }
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
0451 {
0452 struct drm_mm_node *hole;
0453 u64 hole_start, hole_end;
0454 u64 adj_start, adj_end;
0455 u64 end;
0456
0457 end = node->start + node->size;
0458 if (unlikely(end <= node->start))
0459 return -ENOSPC;
0460
0461
0462 hole = find_hole_addr(mm, node->start, 0);
0463 if (!hole)
0464 return -ENOSPC;
0465
0466 adj_start = hole_start = __drm_mm_hole_node_start(hole);
0467 adj_end = hole_end = hole_start + hole->hole_size;
0468
0469 if (mm->color_adjust)
0470 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
0471
0472 if (adj_start > node->start || adj_end < end)
0473 return -ENOSPC;
0474
0475 node->mm = mm;
0476
0477 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
0478 list_add(&node->node_list, &hole->node_list);
0479 drm_mm_interval_tree_add_node(hole, node);
0480 node->hole_size = 0;
0481
0482 rm_hole(hole);
0483 if (node->start > hole_start)
0484 add_hole(hole);
0485 if (end < hole_end)
0486 add_hole(node);
0487
0488 save_stack(node);
0489 return 0;
0490 }
0491 EXPORT_SYMBOL(drm_mm_reserve_node);
0492
0493 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
0494 {
0495 return rb ? rb_to_hole_size(rb) : 0;
0496 }
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
0515 struct drm_mm_node * const node,
0516 u64 size, u64 alignment,
0517 unsigned long color,
0518 u64 range_start, u64 range_end,
0519 enum drm_mm_insert_mode mode)
0520 {
0521 struct drm_mm_node *hole;
0522 u64 remainder_mask;
0523 bool once;
0524
0525 DRM_MM_BUG_ON(range_start > range_end);
0526
0527 if (unlikely(size == 0 || range_end - range_start < size))
0528 return -ENOSPC;
0529
0530 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
0531 return -ENOSPC;
0532
0533 if (alignment <= 1)
0534 alignment = 0;
0535
0536 once = mode & DRM_MM_INSERT_ONCE;
0537 mode &= ~DRM_MM_INSERT_ONCE;
0538
0539 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
0540 for (hole = first_hole(mm, range_start, range_end, size, mode);
0541 hole;
0542 hole = once ? NULL : next_hole(mm, hole, size, mode)) {
0543 u64 hole_start = __drm_mm_hole_node_start(hole);
0544 u64 hole_end = hole_start + hole->hole_size;
0545 u64 adj_start, adj_end;
0546 u64 col_start, col_end;
0547
0548 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
0549 break;
0550
0551 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
0552 break;
0553
0554 col_start = hole_start;
0555 col_end = hole_end;
0556 if (mm->color_adjust)
0557 mm->color_adjust(hole, color, &col_start, &col_end);
0558
0559 adj_start = max(col_start, range_start);
0560 adj_end = min(col_end, range_end);
0561
0562 if (adj_end <= adj_start || adj_end - adj_start < size)
0563 continue;
0564
0565 if (mode == DRM_MM_INSERT_HIGH)
0566 adj_start = adj_end - size;
0567
0568 if (alignment) {
0569 u64 rem;
0570
0571 if (likely(remainder_mask))
0572 rem = adj_start & remainder_mask;
0573 else
0574 div64_u64_rem(adj_start, alignment, &rem);
0575 if (rem) {
0576 adj_start -= rem;
0577 if (mode != DRM_MM_INSERT_HIGH)
0578 adj_start += alignment;
0579
0580 if (adj_start < max(col_start, range_start) ||
0581 min(col_end, range_end) - adj_start < size)
0582 continue;
0583
0584 if (adj_end <= adj_start ||
0585 adj_end - adj_start < size)
0586 continue;
0587 }
0588 }
0589
0590 node->mm = mm;
0591 node->size = size;
0592 node->start = adj_start;
0593 node->color = color;
0594 node->hole_size = 0;
0595
0596 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
0597 list_add(&node->node_list, &hole->node_list);
0598 drm_mm_interval_tree_add_node(hole, node);
0599
0600 rm_hole(hole);
0601 if (adj_start > hole_start)
0602 add_hole(hole);
0603 if (adj_start + size < hole_end)
0604 add_hole(node);
0605
0606 save_stack(node);
0607 return 0;
0608 }
0609
0610 return -ENOSPC;
0611 }
0612 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
0613
0614 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
0615 {
0616 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 void drm_mm_remove_node(struct drm_mm_node *node)
0628 {
0629 struct drm_mm *mm = node->mm;
0630 struct drm_mm_node *prev_node;
0631
0632 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
0633 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
0634
0635 prev_node = list_prev_entry(node, node_list);
0636
0637 if (drm_mm_hole_follows(node))
0638 rm_hole(node);
0639
0640 drm_mm_interval_tree_remove(node, &mm->interval_tree);
0641 list_del(&node->node_list);
0642
0643 if (drm_mm_hole_follows(prev_node))
0644 rm_hole(prev_node);
0645 add_hole(prev_node);
0646
0647 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
0648 }
0649 EXPORT_SYMBOL(drm_mm_remove_node);
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
0661 {
0662 struct drm_mm *mm = old->mm;
0663
0664 DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
0665
0666 *new = *old;
0667
0668 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
0669 list_replace(&old->node_list, &new->node_list);
0670 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
0671
0672 if (drm_mm_hole_follows(old)) {
0673 list_replace(&old->hole_stack, &new->hole_stack);
0674 rb_replace_node_cached(&old->rb_hole_size,
0675 &new->rb_hole_size,
0676 &mm->holes_size);
0677 rb_replace_node(&old->rb_hole_addr,
0678 &new->rb_hole_addr,
0679 &mm->holes_addr);
0680 }
0681
0682 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
0683 }
0684 EXPORT_SYMBOL(drm_mm_replace_node);
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
0737 struct drm_mm *mm,
0738 u64 size,
0739 u64 alignment,
0740 unsigned long color,
0741 u64 start,
0742 u64 end,
0743 enum drm_mm_insert_mode mode)
0744 {
0745 DRM_MM_BUG_ON(start >= end);
0746 DRM_MM_BUG_ON(!size || size > end - start);
0747 DRM_MM_BUG_ON(mm->scan_active);
0748
0749 scan->mm = mm;
0750
0751 if (alignment <= 1)
0752 alignment = 0;
0753
0754 scan->color = color;
0755 scan->alignment = alignment;
0756 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
0757 scan->size = size;
0758 scan->mode = mode;
0759
0760 DRM_MM_BUG_ON(end <= start);
0761 scan->range_start = start;
0762 scan->range_end = end;
0763
0764 scan->hit_start = U64_MAX;
0765 scan->hit_end = 0;
0766 }
0767 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
0781 struct drm_mm_node *node)
0782 {
0783 struct drm_mm *mm = scan->mm;
0784 struct drm_mm_node *hole;
0785 u64 hole_start, hole_end;
0786 u64 col_start, col_end;
0787 u64 adj_start, adj_end;
0788
0789 DRM_MM_BUG_ON(node->mm != mm);
0790 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
0791 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
0792 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
0793 mm->scan_active++;
0794
0795
0796
0797
0798
0799
0800 hole = list_prev_entry(node, node_list);
0801 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
0802 __list_del_entry(&node->node_list);
0803
0804 hole_start = __drm_mm_hole_node_start(hole);
0805 hole_end = __drm_mm_hole_node_end(hole);
0806
0807 col_start = hole_start;
0808 col_end = hole_end;
0809 if (mm->color_adjust)
0810 mm->color_adjust(hole, scan->color, &col_start, &col_end);
0811
0812 adj_start = max(col_start, scan->range_start);
0813 adj_end = min(col_end, scan->range_end);
0814 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
0815 return false;
0816
0817 if (scan->mode == DRM_MM_INSERT_HIGH)
0818 adj_start = adj_end - scan->size;
0819
0820 if (scan->alignment) {
0821 u64 rem;
0822
0823 if (likely(scan->remainder_mask))
0824 rem = adj_start & scan->remainder_mask;
0825 else
0826 div64_u64_rem(adj_start, scan->alignment, &rem);
0827 if (rem) {
0828 adj_start -= rem;
0829 if (scan->mode != DRM_MM_INSERT_HIGH)
0830 adj_start += scan->alignment;
0831 if (adj_start < max(col_start, scan->range_start) ||
0832 min(col_end, scan->range_end) - adj_start < scan->size)
0833 return false;
0834
0835 if (adj_end <= adj_start ||
0836 adj_end - adj_start < scan->size)
0837 return false;
0838 }
0839 }
0840
0841 scan->hit_start = adj_start;
0842 scan->hit_end = adj_start + scan->size;
0843
0844 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
0845 DRM_MM_BUG_ON(scan->hit_start < hole_start);
0846 DRM_MM_BUG_ON(scan->hit_end > hole_end);
0847
0848 return true;
0849 }
0850 EXPORT_SYMBOL(drm_mm_scan_add_block);
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
0872 struct drm_mm_node *node)
0873 {
0874 struct drm_mm_node *prev_node;
0875
0876 DRM_MM_BUG_ON(node->mm != scan->mm);
0877 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
0878 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
0879
0880 DRM_MM_BUG_ON(!node->mm->scan_active);
0881 node->mm->scan_active--;
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891 prev_node = list_prev_entry(node, node_list);
0892 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
0893 list_next_entry(node, node_list));
0894 list_add(&node->node_list, &prev_node->node_list);
0895
0896 return (node->start + node->size > scan->hit_start &&
0897 node->start < scan->hit_end);
0898 }
0899 EXPORT_SYMBOL(drm_mm_scan_remove_block);
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
0913 {
0914 struct drm_mm *mm = scan->mm;
0915 struct drm_mm_node *hole;
0916 u64 hole_start, hole_end;
0917
0918 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
0919
0920 if (!mm->color_adjust)
0921 return NULL;
0922
0923
0924
0925
0926
0927
0928 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
0929 hole_start = __drm_mm_hole_node_start(hole);
0930 hole_end = hole_start + hole->hole_size;
0931
0932 if (hole_start <= scan->hit_start &&
0933 hole_end >= scan->hit_end)
0934 break;
0935 }
0936
0937
0938 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
0939 if (unlikely(&hole->hole_stack == &mm->hole_stack))
0940 return NULL;
0941
0942 DRM_MM_BUG_ON(hole_start > scan->hit_start);
0943 DRM_MM_BUG_ON(hole_end < scan->hit_end);
0944
0945 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
0946 if (hole_start > scan->hit_start)
0947 return hole;
0948 if (hole_end < scan->hit_end)
0949 return list_next_entry(hole, node_list);
0950
0951 return NULL;
0952 }
0953 EXPORT_SYMBOL(drm_mm_scan_color_evict);
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
0964 {
0965 DRM_MM_BUG_ON(start + size <= start);
0966
0967 mm->color_adjust = NULL;
0968
0969 INIT_LIST_HEAD(&mm->hole_stack);
0970 mm->interval_tree = RB_ROOT_CACHED;
0971 mm->holes_size = RB_ROOT_CACHED;
0972 mm->holes_addr = RB_ROOT;
0973
0974
0975 INIT_LIST_HEAD(&mm->head_node.node_list);
0976 mm->head_node.flags = 0;
0977 mm->head_node.mm = mm;
0978 mm->head_node.start = start + size;
0979 mm->head_node.size = -size;
0980 add_hole(&mm->head_node);
0981
0982 mm->scan_active = 0;
0983
0984 #ifdef CONFIG_DRM_DEBUG_MM
0985 stack_depot_init();
0986 #endif
0987 }
0988 EXPORT_SYMBOL(drm_mm_init);
0989
0990
0991
0992
0993
0994
0995
0996
0997 void drm_mm_takedown(struct drm_mm *mm)
0998 {
0999 if (WARN(!drm_mm_clean(mm),
1000 "Memory manager not clean during takedown.\n"))
1001 show_leaks(mm);
1002 }
1003 EXPORT_SYMBOL(drm_mm_takedown);
1004
1005 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1006 {
1007 u64 start, size;
1008
1009 size = entry->hole_size;
1010 if (size) {
1011 start = drm_mm_hole_node_start(entry);
1012 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1013 start, start + size, size);
1014 }
1015
1016 return size;
1017 }
1018
1019
1020
1021
1022
1023 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1024 {
1025 const struct drm_mm_node *entry;
1026 u64 total_used = 0, total_free = 0, total = 0;
1027
1028 total_free += drm_mm_dump_hole(p, &mm->head_node);
1029
1030 drm_mm_for_each_node(entry, mm) {
1031 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1032 entry->start + entry->size, entry->size);
1033 total_used += entry->size;
1034 total_free += drm_mm_dump_hole(p, entry);
1035 }
1036 total = total_free + total_used;
1037
1038 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1039 total_used, total_free);
1040 }
1041 EXPORT_SYMBOL(drm_mm_print);