0001
0002
0003 #include <linux/err.h>
0004 #include <linux/slab.h>
0005 #include <linux/spinlock.h>
0006 #include "ctree.h"
0007 #include "volumes.h"
0008 #include "extent_map.h"
0009 #include "compression.h"
0010
0011
0012 static struct kmem_cache *extent_map_cache;
0013
0014 int __init extent_map_init(void)
0015 {
0016 extent_map_cache = kmem_cache_create("btrfs_extent_map",
0017 sizeof(struct extent_map), 0,
0018 SLAB_MEM_SPREAD, NULL);
0019 if (!extent_map_cache)
0020 return -ENOMEM;
0021 return 0;
0022 }
0023
0024 void __cold extent_map_exit(void)
0025 {
0026 kmem_cache_destroy(extent_map_cache);
0027 }
0028
0029
0030
0031
0032
0033
0034
0035
0036 void extent_map_tree_init(struct extent_map_tree *tree)
0037 {
0038 tree->map = RB_ROOT_CACHED;
0039 INIT_LIST_HEAD(&tree->modified_extents);
0040 rwlock_init(&tree->lock);
0041 }
0042
0043
0044
0045
0046
0047
0048
0049
0050 struct extent_map *alloc_extent_map(void)
0051 {
0052 struct extent_map *em;
0053 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
0054 if (!em)
0055 return NULL;
0056 RB_CLEAR_NODE(&em->rb_node);
0057 em->flags = 0;
0058 em->compress_type = BTRFS_COMPRESS_NONE;
0059 em->generation = 0;
0060 refcount_set(&em->refs, 1);
0061 INIT_LIST_HEAD(&em->list);
0062 return em;
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072 void free_extent_map(struct extent_map *em)
0073 {
0074 if (!em)
0075 return;
0076 WARN_ON(refcount_read(&em->refs) == 0);
0077 if (refcount_dec_and_test(&em->refs)) {
0078 WARN_ON(extent_map_in_tree(em));
0079 WARN_ON(!list_empty(&em->list));
0080 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
0081 kfree(em->map_lookup);
0082 kmem_cache_free(extent_map_cache, em);
0083 }
0084 }
0085
0086
0087 static u64 range_end(u64 start, u64 len)
0088 {
0089 if (start + len < start)
0090 return (u64)-1;
0091 return start + len;
0092 }
0093
0094 static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
0095 {
0096 struct rb_node **p = &root->rb_root.rb_node;
0097 struct rb_node *parent = NULL;
0098 struct extent_map *entry = NULL;
0099 struct rb_node *orig_parent = NULL;
0100 u64 end = range_end(em->start, em->len);
0101 bool leftmost = true;
0102
0103 while (*p) {
0104 parent = *p;
0105 entry = rb_entry(parent, struct extent_map, rb_node);
0106
0107 if (em->start < entry->start) {
0108 p = &(*p)->rb_left;
0109 } else if (em->start >= extent_map_end(entry)) {
0110 p = &(*p)->rb_right;
0111 leftmost = false;
0112 } else {
0113 return -EEXIST;
0114 }
0115 }
0116
0117 orig_parent = parent;
0118 while (parent && em->start >= extent_map_end(entry)) {
0119 parent = rb_next(parent);
0120 entry = rb_entry(parent, struct extent_map, rb_node);
0121 }
0122 if (parent)
0123 if (end > entry->start && em->start < extent_map_end(entry))
0124 return -EEXIST;
0125
0126 parent = orig_parent;
0127 entry = rb_entry(parent, struct extent_map, rb_node);
0128 while (parent && em->start < entry->start) {
0129 parent = rb_prev(parent);
0130 entry = rb_entry(parent, struct extent_map, rb_node);
0131 }
0132 if (parent)
0133 if (end > entry->start && em->start < extent_map_end(entry))
0134 return -EEXIST;
0135
0136 rb_link_node(&em->rb_node, orig_parent, p);
0137 rb_insert_color_cached(&em->rb_node, root, leftmost);
0138 return 0;
0139 }
0140
0141
0142
0143
0144
0145 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
0146 struct rb_node **prev_ret,
0147 struct rb_node **next_ret)
0148 {
0149 struct rb_node *n = root->rb_node;
0150 struct rb_node *prev = NULL;
0151 struct rb_node *orig_prev = NULL;
0152 struct extent_map *entry;
0153 struct extent_map *prev_entry = NULL;
0154
0155 while (n) {
0156 entry = rb_entry(n, struct extent_map, rb_node);
0157 prev = n;
0158 prev_entry = entry;
0159
0160 if (offset < entry->start)
0161 n = n->rb_left;
0162 else if (offset >= extent_map_end(entry))
0163 n = n->rb_right;
0164 else
0165 return n;
0166 }
0167
0168 if (prev_ret) {
0169 orig_prev = prev;
0170 while (prev && offset >= extent_map_end(prev_entry)) {
0171 prev = rb_next(prev);
0172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
0173 }
0174 *prev_ret = prev;
0175 prev = orig_prev;
0176 }
0177
0178 if (next_ret) {
0179 prev_entry = rb_entry(prev, struct extent_map, rb_node);
0180 while (prev && offset < prev_entry->start) {
0181 prev = rb_prev(prev);
0182 prev_entry = rb_entry(prev, struct extent_map, rb_node);
0183 }
0184 *next_ret = prev;
0185 }
0186 return NULL;
0187 }
0188
0189
0190 static int mergable_maps(struct extent_map *prev, struct extent_map *next)
0191 {
0192 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
0193 return 0;
0194
0195
0196
0197
0198
0199 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
0200 return 0;
0201
0202 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
0203 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
0204 return 0;
0205
0206
0207
0208
0209
0210
0211 if (!list_empty(&prev->list) || !list_empty(&next->list))
0212 return 0;
0213
0214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
0215 prev->block_start != EXTENT_MAP_DELALLOC);
0216
0217 if (prev->map_lookup || next->map_lookup)
0218 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
0219 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
0220
0221 if (extent_map_end(prev) == next->start &&
0222 prev->flags == next->flags &&
0223 prev->map_lookup == next->map_lookup &&
0224 ((next->block_start == EXTENT_MAP_HOLE &&
0225 prev->block_start == EXTENT_MAP_HOLE) ||
0226 (next->block_start == EXTENT_MAP_INLINE &&
0227 prev->block_start == EXTENT_MAP_INLINE) ||
0228 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
0229 next->block_start == extent_map_block_end(prev)))) {
0230 return 1;
0231 }
0232 return 0;
0233 }
0234
0235 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
0236 {
0237 struct extent_map *merge = NULL;
0238 struct rb_node *rb;
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248 if (refcount_read(&em->refs) > 2)
0249 return;
0250
0251 if (em->start != 0) {
0252 rb = rb_prev(&em->rb_node);
0253 if (rb)
0254 merge = rb_entry(rb, struct extent_map, rb_node);
0255 if (rb && mergable_maps(merge, em)) {
0256 em->start = merge->start;
0257 em->orig_start = merge->orig_start;
0258 em->len += merge->len;
0259 em->block_len += merge->block_len;
0260 em->block_start = merge->block_start;
0261 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
0262 em->mod_start = merge->mod_start;
0263 em->generation = max(em->generation, merge->generation);
0264 set_bit(EXTENT_FLAG_MERGED, &em->flags);
0265
0266 rb_erase_cached(&merge->rb_node, &tree->map);
0267 RB_CLEAR_NODE(&merge->rb_node);
0268 free_extent_map(merge);
0269 }
0270 }
0271
0272 rb = rb_next(&em->rb_node);
0273 if (rb)
0274 merge = rb_entry(rb, struct extent_map, rb_node);
0275 if (rb && mergable_maps(em, merge)) {
0276 em->len += merge->len;
0277 em->block_len += merge->block_len;
0278 rb_erase_cached(&merge->rb_node, &tree->map);
0279 RB_CLEAR_NODE(&merge->rb_node);
0280 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
0281 em->generation = max(em->generation, merge->generation);
0282 set_bit(EXTENT_FLAG_MERGED, &em->flags);
0283 free_extent_map(merge);
0284 }
0285 }
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
0299 u64 gen)
0300 {
0301 int ret = 0;
0302 struct extent_map *em;
0303 bool prealloc = false;
0304
0305 write_lock(&tree->lock);
0306 em = lookup_extent_mapping(tree, start, len);
0307
0308 WARN_ON(!em || em->start != start);
0309
0310 if (!em)
0311 goto out;
0312
0313 em->generation = gen;
0314 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
0315 em->mod_start = em->start;
0316 em->mod_len = em->len;
0317
0318 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
0319 prealloc = true;
0320 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
0321 }
0322
0323 try_merge_map(tree, em);
0324
0325 if (prealloc) {
0326 em->mod_start = em->start;
0327 em->mod_len = em->len;
0328 }
0329
0330 free_extent_map(em);
0331 out:
0332 write_unlock(&tree->lock);
0333 return ret;
0334
0335 }
0336
0337 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
0338 {
0339 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
0340 if (extent_map_in_tree(em))
0341 try_merge_map(tree, em);
0342 }
0343
0344 static inline void setup_extent_mapping(struct extent_map_tree *tree,
0345 struct extent_map *em,
0346 int modified)
0347 {
0348 refcount_inc(&em->refs);
0349 em->mod_start = em->start;
0350 em->mod_len = em->len;
0351
0352 if (modified)
0353 list_move(&em->list, &tree->modified_extents);
0354 else
0355 try_merge_map(tree, em);
0356 }
0357
0358 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
0359 {
0360 struct map_lookup *map = em->map_lookup;
0361 u64 stripe_size = em->orig_block_len;
0362 int i;
0363
0364 for (i = 0; i < map->num_stripes; i++) {
0365 struct btrfs_io_stripe *stripe = &map->stripes[i];
0366 struct btrfs_device *device = stripe->dev;
0367
0368 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
0369 stripe->physical + stripe_size - 1, bits);
0370 }
0371 }
0372
0373 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
0374 {
0375 struct map_lookup *map = em->map_lookup;
0376 u64 stripe_size = em->orig_block_len;
0377 int i;
0378
0379 for (i = 0; i < map->num_stripes; i++) {
0380 struct btrfs_io_stripe *stripe = &map->stripes[i];
0381 struct btrfs_device *device = stripe->dev;
0382
0383 __clear_extent_bit(&device->alloc_state, stripe->physical,
0384 stripe->physical + stripe_size - 1, bits,
0385 0, 0, NULL, GFP_NOWAIT, NULL);
0386 }
0387 }
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 int add_extent_mapping(struct extent_map_tree *tree,
0403 struct extent_map *em, int modified)
0404 {
0405 int ret = 0;
0406
0407 lockdep_assert_held_write(&tree->lock);
0408
0409 ret = tree_insert(&tree->map, em);
0410 if (ret)
0411 goto out;
0412
0413 setup_extent_mapping(tree, em, modified);
0414 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
0415 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
0416 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
0417 }
0418 out:
0419 return ret;
0420 }
0421
0422 static struct extent_map *
0423 __lookup_extent_mapping(struct extent_map_tree *tree,
0424 u64 start, u64 len, int strict)
0425 {
0426 struct extent_map *em;
0427 struct rb_node *rb_node;
0428 struct rb_node *prev = NULL;
0429 struct rb_node *next = NULL;
0430 u64 end = range_end(start, len);
0431
0432 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
0433 if (!rb_node) {
0434 if (prev)
0435 rb_node = prev;
0436 else if (next)
0437 rb_node = next;
0438 else
0439 return NULL;
0440 }
0441
0442 em = rb_entry(rb_node, struct extent_map, rb_node);
0443
0444 if (strict && !(end > em->start && start < extent_map_end(em)))
0445 return NULL;
0446
0447 refcount_inc(&em->refs);
0448 return em;
0449 }
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
0463 u64 start, u64 len)
0464 {
0465 return __lookup_extent_mapping(tree, start, len, 1);
0466 }
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
0480 u64 start, u64 len)
0481 {
0482 return __lookup_extent_mapping(tree, start, len, 0);
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
0494 {
0495 lockdep_assert_held_write(&tree->lock);
0496
0497 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
0498 rb_erase_cached(&em->rb_node, &tree->map);
0499 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
0500 list_del_init(&em->list);
0501 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
0502 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
0503 RB_CLEAR_NODE(&em->rb_node);
0504 }
0505
0506 void replace_extent_mapping(struct extent_map_tree *tree,
0507 struct extent_map *cur,
0508 struct extent_map *new,
0509 int modified)
0510 {
0511 lockdep_assert_held_write(&tree->lock);
0512
0513 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
0514 ASSERT(extent_map_in_tree(cur));
0515 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
0516 list_del_init(&cur->list);
0517 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
0518 RB_CLEAR_NODE(&cur->rb_node);
0519
0520 setup_extent_mapping(tree, new, modified);
0521 }
0522
0523 static struct extent_map *next_extent_map(struct extent_map *em)
0524 {
0525 struct rb_node *next;
0526
0527 next = rb_next(&em->rb_node);
0528 if (!next)
0529 return NULL;
0530 return container_of(next, struct extent_map, rb_node);
0531 }
0532
0533 static struct extent_map *prev_extent_map(struct extent_map *em)
0534 {
0535 struct rb_node *prev;
0536
0537 prev = rb_prev(&em->rb_node);
0538 if (!prev)
0539 return NULL;
0540 return container_of(prev, struct extent_map, rb_node);
0541 }
0542
0543
0544
0545
0546
0547
0548
0549 static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
0550 struct extent_map *existing,
0551 struct extent_map *em,
0552 u64 map_start)
0553 {
0554 struct extent_map *prev;
0555 struct extent_map *next;
0556 u64 start;
0557 u64 end;
0558 u64 start_diff;
0559
0560 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
0561
0562 if (existing->start > map_start) {
0563 next = existing;
0564 prev = prev_extent_map(next);
0565 } else {
0566 prev = existing;
0567 next = next_extent_map(prev);
0568 }
0569
0570 start = prev ? extent_map_end(prev) : em->start;
0571 start = max_t(u64, start, em->start);
0572 end = next ? next->start : extent_map_end(em);
0573 end = min_t(u64, end, extent_map_end(em));
0574 start_diff = start - em->start;
0575 em->start = start;
0576 em->len = end - start;
0577 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
0578 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
0579 em->block_start += start_diff;
0580 em->block_len = em->len;
0581 }
0582 return add_extent_mapping(em_tree, em, 0);
0583 }
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606 int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
0607 struct extent_map_tree *em_tree,
0608 struct extent_map **em_in, u64 start, u64 len)
0609 {
0610 int ret;
0611 struct extent_map *em = *em_in;
0612
0613 ret = add_extent_mapping(em_tree, em, 0);
0614
0615
0616
0617
0618 if (ret == -EEXIST) {
0619 struct extent_map *existing;
0620
0621 ret = 0;
0622
0623 existing = search_extent_mapping(em_tree, start, len);
0624
0625 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
0626
0627
0628
0629
0630
0631 if (start >= existing->start &&
0632 start < extent_map_end(existing)) {
0633 free_extent_map(em);
0634 *em_in = existing;
0635 ret = 0;
0636 } else {
0637 u64 orig_start = em->start;
0638 u64 orig_len = em->len;
0639
0640
0641
0642
0643
0644 ret = merge_extent_mapping(em_tree, existing,
0645 em, start);
0646 if (ret) {
0647 free_extent_map(em);
0648 *em_in = NULL;
0649 WARN_ONCE(ret,
0650 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
0651 ret, existing->start, existing->len,
0652 orig_start, orig_len);
0653 }
0654 free_extent_map(existing);
0655 }
0656 }
0657
0658 ASSERT(ret == 0 || ret == -EEXIST);
0659 return ret;
0660 }