0001
0002
0003
0004
0005
0006 #include <linux/mm.h>
0007 #include <linux/rbtree.h>
0008 #include <trace/events/btrfs.h>
0009 #include "ctree.h"
0010 #include "disk-io.h"
0011 #include "backref.h"
0012 #include "ulist.h"
0013 #include "transaction.h"
0014 #include "delayed-ref.h"
0015 #include "locking.h"
0016 #include "misc.h"
0017 #include "tree-mod-log.h"
0018
0019
0020 #define BACKREF_FOUND_SHARED 6
0021
0022 struct extent_inode_elem {
0023 u64 inum;
0024 u64 offset;
0025 struct extent_inode_elem *next;
0026 };
0027
0028 static int check_extent_in_eb(const struct btrfs_key *key,
0029 const struct extent_buffer *eb,
0030 const struct btrfs_file_extent_item *fi,
0031 u64 extent_item_pos,
0032 struct extent_inode_elem **eie,
0033 bool ignore_offset)
0034 {
0035 u64 offset = 0;
0036 struct extent_inode_elem *e;
0037
0038 if (!ignore_offset &&
0039 !btrfs_file_extent_compression(eb, fi) &&
0040 !btrfs_file_extent_encryption(eb, fi) &&
0041 !btrfs_file_extent_other_encoding(eb, fi)) {
0042 u64 data_offset;
0043 u64 data_len;
0044
0045 data_offset = btrfs_file_extent_offset(eb, fi);
0046 data_len = btrfs_file_extent_num_bytes(eb, fi);
0047
0048 if (extent_item_pos < data_offset ||
0049 extent_item_pos >= data_offset + data_len)
0050 return 1;
0051 offset = extent_item_pos - data_offset;
0052 }
0053
0054 e = kmalloc(sizeof(*e), GFP_NOFS);
0055 if (!e)
0056 return -ENOMEM;
0057
0058 e->next = *eie;
0059 e->inum = key->objectid;
0060 e->offset = key->offset + offset;
0061 *eie = e;
0062
0063 return 0;
0064 }
0065
0066 static void free_inode_elem_list(struct extent_inode_elem *eie)
0067 {
0068 struct extent_inode_elem *eie_next;
0069
0070 for (; eie; eie = eie_next) {
0071 eie_next = eie->next;
0072 kfree(eie);
0073 }
0074 }
0075
0076 static int find_extent_in_eb(const struct extent_buffer *eb,
0077 u64 wanted_disk_byte, u64 extent_item_pos,
0078 struct extent_inode_elem **eie,
0079 bool ignore_offset)
0080 {
0081 u64 disk_byte;
0082 struct btrfs_key key;
0083 struct btrfs_file_extent_item *fi;
0084 int slot;
0085 int nritems;
0086 int extent_type;
0087 int ret;
0088
0089
0090
0091
0092
0093
0094 nritems = btrfs_header_nritems(eb);
0095 for (slot = 0; slot < nritems; ++slot) {
0096 btrfs_item_key_to_cpu(eb, &key, slot);
0097 if (key.type != BTRFS_EXTENT_DATA_KEY)
0098 continue;
0099 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
0100 extent_type = btrfs_file_extent_type(eb, fi);
0101 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
0102 continue;
0103
0104 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
0105 if (disk_byte != wanted_disk_byte)
0106 continue;
0107
0108 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
0109 if (ret < 0)
0110 return ret;
0111 }
0112
0113 return 0;
0114 }
0115
0116 struct preftree {
0117 struct rb_root_cached root;
0118 unsigned int count;
0119 };
0120
0121 #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
0122
0123 struct preftrees {
0124 struct preftree direct;
0125 struct preftree indirect;
0126 struct preftree indirect_missing_keys;
0127 };
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137 struct share_check {
0138 u64 root_objectid;
0139 u64 inum;
0140 int share_count;
0141 };
0142
0143 static inline int extent_is_shared(struct share_check *sc)
0144 {
0145 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
0146 }
0147
0148 static struct kmem_cache *btrfs_prelim_ref_cache;
0149
0150 int __init btrfs_prelim_ref_init(void)
0151 {
0152 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
0153 sizeof(struct prelim_ref),
0154 0,
0155 SLAB_MEM_SPREAD,
0156 NULL);
0157 if (!btrfs_prelim_ref_cache)
0158 return -ENOMEM;
0159 return 0;
0160 }
0161
0162 void __cold btrfs_prelim_ref_exit(void)
0163 {
0164 kmem_cache_destroy(btrfs_prelim_ref_cache);
0165 }
0166
0167 static void free_pref(struct prelim_ref *ref)
0168 {
0169 kmem_cache_free(btrfs_prelim_ref_cache, ref);
0170 }
0171
0172
0173
0174
0175
0176
0177 static int prelim_ref_compare(struct prelim_ref *ref1,
0178 struct prelim_ref *ref2)
0179 {
0180 if (ref1->level < ref2->level)
0181 return -1;
0182 if (ref1->level > ref2->level)
0183 return 1;
0184 if (ref1->root_id < ref2->root_id)
0185 return -1;
0186 if (ref1->root_id > ref2->root_id)
0187 return 1;
0188 if (ref1->key_for_search.type < ref2->key_for_search.type)
0189 return -1;
0190 if (ref1->key_for_search.type > ref2->key_for_search.type)
0191 return 1;
0192 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
0193 return -1;
0194 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
0195 return 1;
0196 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
0197 return -1;
0198 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
0199 return 1;
0200 if (ref1->parent < ref2->parent)
0201 return -1;
0202 if (ref1->parent > ref2->parent)
0203 return 1;
0204
0205 return 0;
0206 }
0207
0208 static void update_share_count(struct share_check *sc, int oldcount,
0209 int newcount)
0210 {
0211 if ((!sc) || (oldcount == 0 && newcount < 1))
0212 return;
0213
0214 if (oldcount > 0 && newcount < 1)
0215 sc->share_count--;
0216 else if (oldcount < 1 && newcount > 0)
0217 sc->share_count++;
0218 }
0219
0220
0221
0222
0223
0224
0225 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
0226 struct preftree *preftree,
0227 struct prelim_ref *newref,
0228 struct share_check *sc)
0229 {
0230 struct rb_root_cached *root;
0231 struct rb_node **p;
0232 struct rb_node *parent = NULL;
0233 struct prelim_ref *ref;
0234 int result;
0235 bool leftmost = true;
0236
0237 root = &preftree->root;
0238 p = &root->rb_root.rb_node;
0239
0240 while (*p) {
0241 parent = *p;
0242 ref = rb_entry(parent, struct prelim_ref, rbnode);
0243 result = prelim_ref_compare(ref, newref);
0244 if (result < 0) {
0245 p = &(*p)->rb_left;
0246 } else if (result > 0) {
0247 p = &(*p)->rb_right;
0248 leftmost = false;
0249 } else {
0250
0251 struct extent_inode_elem *eie = ref->inode_list;
0252
0253 while (eie && eie->next)
0254 eie = eie->next;
0255
0256 if (!eie)
0257 ref->inode_list = newref->inode_list;
0258 else
0259 eie->next = newref->inode_list;
0260 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
0261 preftree->count);
0262
0263
0264
0265
0266
0267 update_share_count(sc, ref->count,
0268 ref->count + newref->count);
0269 ref->count += newref->count;
0270 free_pref(newref);
0271 return;
0272 }
0273 }
0274
0275 update_share_count(sc, 0, newref->count);
0276 preftree->count++;
0277 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
0278 rb_link_node(&newref->rbnode, parent, p);
0279 rb_insert_color_cached(&newref->rbnode, root, leftmost);
0280 }
0281
0282
0283
0284
0285
0286 static void prelim_release(struct preftree *preftree)
0287 {
0288 struct prelim_ref *ref, *next_ref;
0289
0290 rbtree_postorder_for_each_entry_safe(ref, next_ref,
0291 &preftree->root.rb_root, rbnode)
0292 free_pref(ref);
0293
0294 preftree->root = RB_ROOT_CACHED;
0295 preftree->count = 0;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
0337 struct preftree *preftree, u64 root_id,
0338 const struct btrfs_key *key, int level, u64 parent,
0339 u64 wanted_disk_byte, int count,
0340 struct share_check *sc, gfp_t gfp_mask)
0341 {
0342 struct prelim_ref *ref;
0343
0344 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
0345 return 0;
0346
0347 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
0348 if (!ref)
0349 return -ENOMEM;
0350
0351 ref->root_id = root_id;
0352 if (key)
0353 ref->key_for_search = *key;
0354 else
0355 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
0356
0357 ref->inode_list = NULL;
0358 ref->level = level;
0359 ref->count = count;
0360 ref->parent = parent;
0361 ref->wanted_disk_byte = wanted_disk_byte;
0362 prelim_ref_insert(fs_info, preftree, ref, sc);
0363 return extent_is_shared(sc);
0364 }
0365
0366
0367 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
0368 struct preftrees *preftrees, int level, u64 parent,
0369 u64 wanted_disk_byte, int count,
0370 struct share_check *sc, gfp_t gfp_mask)
0371 {
0372 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
0373 parent, wanted_disk_byte, count, sc, gfp_mask);
0374 }
0375
0376
0377 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
0378 struct preftrees *preftrees, u64 root_id,
0379 const struct btrfs_key *key, int level,
0380 u64 wanted_disk_byte, int count,
0381 struct share_check *sc, gfp_t gfp_mask)
0382 {
0383 struct preftree *tree = &preftrees->indirect;
0384
0385 if (!key)
0386 tree = &preftrees->indirect_missing_keys;
0387 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
0388 wanted_disk_byte, count, sc, gfp_mask);
0389 }
0390
0391 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
0392 {
0393 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
0394 struct rb_node *parent = NULL;
0395 struct prelim_ref *ref = NULL;
0396 struct prelim_ref target = {};
0397 int result;
0398
0399 target.parent = bytenr;
0400
0401 while (*p) {
0402 parent = *p;
0403 ref = rb_entry(parent, struct prelim_ref, rbnode);
0404 result = prelim_ref_compare(ref, &target);
0405
0406 if (result < 0)
0407 p = &(*p)->rb_left;
0408 else if (result > 0)
0409 p = &(*p)->rb_right;
0410 else
0411 return 1;
0412 }
0413 return 0;
0414 }
0415
0416 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
0417 struct ulist *parents,
0418 struct preftrees *preftrees, struct prelim_ref *ref,
0419 int level, u64 time_seq, const u64 *extent_item_pos,
0420 bool ignore_offset)
0421 {
0422 int ret = 0;
0423 int slot;
0424 struct extent_buffer *eb;
0425 struct btrfs_key key;
0426 struct btrfs_key *key_for_search = &ref->key_for_search;
0427 struct btrfs_file_extent_item *fi;
0428 struct extent_inode_elem *eie = NULL, *old = NULL;
0429 u64 disk_byte;
0430 u64 wanted_disk_byte = ref->wanted_disk_byte;
0431 u64 count = 0;
0432 u64 data_offset;
0433
0434 if (level != 0) {
0435 eb = path->nodes[level];
0436 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
0437 if (ret < 0)
0438 return ret;
0439 return 0;
0440 }
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 eb = path->nodes[0];
0453 if (path->slots[0] >= btrfs_header_nritems(eb) ||
0454 is_shared_data_backref(preftrees, eb->start) ||
0455 ref->root_id != btrfs_header_owner(eb)) {
0456 if (time_seq == BTRFS_SEQ_LAST)
0457 ret = btrfs_next_leaf(root, path);
0458 else
0459 ret = btrfs_next_old_leaf(root, path, time_seq);
0460 }
0461
0462 while (!ret && count < ref->count) {
0463 eb = path->nodes[0];
0464 slot = path->slots[0];
0465
0466 btrfs_item_key_to_cpu(eb, &key, slot);
0467
0468 if (key.objectid != key_for_search->objectid ||
0469 key.type != BTRFS_EXTENT_DATA_KEY)
0470 break;
0471
0472
0473
0474
0475
0476
0477 if (slot == 0 &&
0478 (is_shared_data_backref(preftrees, eb->start) ||
0479 ref->root_id != btrfs_header_owner(eb))) {
0480 if (time_seq == BTRFS_SEQ_LAST)
0481 ret = btrfs_next_leaf(root, path);
0482 else
0483 ret = btrfs_next_old_leaf(root, path, time_seq);
0484 continue;
0485 }
0486 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
0487 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
0488 data_offset = btrfs_file_extent_offset(eb, fi);
0489
0490 if (disk_byte == wanted_disk_byte) {
0491 eie = NULL;
0492 old = NULL;
0493 if (ref->key_for_search.offset == key.offset - data_offset)
0494 count++;
0495 else
0496 goto next;
0497 if (extent_item_pos) {
0498 ret = check_extent_in_eb(&key, eb, fi,
0499 *extent_item_pos,
0500 &eie, ignore_offset);
0501 if (ret < 0)
0502 break;
0503 }
0504 if (ret > 0)
0505 goto next;
0506 ret = ulist_add_merge_ptr(parents, eb->start,
0507 eie, (void **)&old, GFP_NOFS);
0508 if (ret < 0)
0509 break;
0510 if (!ret && extent_item_pos) {
0511 while (old->next)
0512 old = old->next;
0513 old->next = eie;
0514 }
0515 eie = NULL;
0516 }
0517 next:
0518 if (time_seq == BTRFS_SEQ_LAST)
0519 ret = btrfs_next_item(root, path);
0520 else
0521 ret = btrfs_next_old_item(root, path, time_seq);
0522 }
0523
0524 if (ret > 0)
0525 ret = 0;
0526 else if (ret < 0)
0527 free_inode_elem_list(eie);
0528 return ret;
0529 }
0530
0531
0532
0533
0534
0535 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
0536 struct btrfs_path *path, u64 time_seq,
0537 struct preftrees *preftrees,
0538 struct prelim_ref *ref, struct ulist *parents,
0539 const u64 *extent_item_pos, bool ignore_offset)
0540 {
0541 struct btrfs_root *root;
0542 struct extent_buffer *eb;
0543 int ret = 0;
0544 int root_level;
0545 int level = ref->level;
0546 struct btrfs_key search_key = ref->key_for_search;
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556 if (path->search_commit_root)
0557 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
0558 else
0559 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
0560 if (IS_ERR(root)) {
0561 ret = PTR_ERR(root);
0562 goto out_free;
0563 }
0564
0565 if (!path->search_commit_root &&
0566 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
0567 ret = -ENOENT;
0568 goto out;
0569 }
0570
0571 if (btrfs_is_testing(fs_info)) {
0572 ret = -ENOENT;
0573 goto out;
0574 }
0575
0576 if (path->search_commit_root)
0577 root_level = btrfs_header_level(root->commit_root);
0578 else if (time_seq == BTRFS_SEQ_LAST)
0579 root_level = btrfs_header_level(root->node);
0580 else
0581 root_level = btrfs_old_root_level(root, time_seq);
0582
0583 if (root_level + 1 == level)
0584 goto out;
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
0606 search_key.offset >= LLONG_MAX)
0607 search_key.offset = 0;
0608 path->lowest_level = level;
0609 if (time_seq == BTRFS_SEQ_LAST)
0610 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
0611 else
0612 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
0613
0614 btrfs_debug(fs_info,
0615 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
0616 ref->root_id, level, ref->count, ret,
0617 ref->key_for_search.objectid, ref->key_for_search.type,
0618 ref->key_for_search.offset);
0619 if (ret < 0)
0620 goto out;
0621
0622 eb = path->nodes[level];
0623 while (!eb) {
0624 if (WARN_ON(!level)) {
0625 ret = 1;
0626 goto out;
0627 }
0628 level--;
0629 eb = path->nodes[level];
0630 }
0631
0632 ret = add_all_parents(root, path, parents, preftrees, ref, level,
0633 time_seq, extent_item_pos, ignore_offset);
0634 out:
0635 btrfs_put_root(root);
0636 out_free:
0637 path->lowest_level = 0;
0638 btrfs_release_path(path);
0639 return ret;
0640 }
0641
0642 static struct extent_inode_elem *
0643 unode_aux_to_inode_list(struct ulist_node *node)
0644 {
0645 if (!node)
0646 return NULL;
0647 return (struct extent_inode_elem *)(uintptr_t)node->aux;
0648 }
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
0667 struct btrfs_path *path, u64 time_seq,
0668 struct preftrees *preftrees,
0669 const u64 *extent_item_pos,
0670 struct share_check *sc, bool ignore_offset)
0671 {
0672 int err;
0673 int ret = 0;
0674 struct ulist *parents;
0675 struct ulist_node *node;
0676 struct ulist_iterator uiter;
0677 struct rb_node *rnode;
0678
0679 parents = ulist_alloc(GFP_NOFS);
0680 if (!parents)
0681 return -ENOMEM;
0682
0683
0684
0685
0686
0687
0688
0689 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
0690 struct prelim_ref *ref;
0691
0692 ref = rb_entry(rnode, struct prelim_ref, rbnode);
0693 if (WARN(ref->parent,
0694 "BUG: direct ref found in indirect tree")) {
0695 ret = -EINVAL;
0696 goto out;
0697 }
0698
0699 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
0700 preftrees->indirect.count--;
0701
0702 if (ref->count == 0) {
0703 free_pref(ref);
0704 continue;
0705 }
0706
0707 if (sc && sc->root_objectid &&
0708 ref->root_id != sc->root_objectid) {
0709 free_pref(ref);
0710 ret = BACKREF_FOUND_SHARED;
0711 goto out;
0712 }
0713 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
0714 ref, parents, extent_item_pos,
0715 ignore_offset);
0716
0717
0718
0719
0720 if (err == -ENOENT) {
0721 prelim_ref_insert(fs_info, &preftrees->direct, ref,
0722 NULL);
0723 continue;
0724 } else if (err) {
0725 free_pref(ref);
0726 ret = err;
0727 goto out;
0728 }
0729
0730
0731 ULIST_ITER_INIT(&uiter);
0732 node = ulist_next(parents, &uiter);
0733 ref->parent = node ? node->val : 0;
0734 ref->inode_list = unode_aux_to_inode_list(node);
0735
0736
0737 while ((node = ulist_next(parents, &uiter))) {
0738 struct prelim_ref *new_ref;
0739
0740 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
0741 GFP_NOFS);
0742 if (!new_ref) {
0743 free_pref(ref);
0744 ret = -ENOMEM;
0745 goto out;
0746 }
0747 memcpy(new_ref, ref, sizeof(*ref));
0748 new_ref->parent = node->val;
0749 new_ref->inode_list = unode_aux_to_inode_list(node);
0750 prelim_ref_insert(fs_info, &preftrees->direct,
0751 new_ref, NULL);
0752 }
0753
0754
0755
0756
0757
0758 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
0759
0760 ulist_reinit(parents);
0761 cond_resched();
0762 }
0763 out:
0764 ulist_free(parents);
0765 return ret;
0766 }
0767
0768
0769
0770
0771 static int add_missing_keys(struct btrfs_fs_info *fs_info,
0772 struct preftrees *preftrees, bool lock)
0773 {
0774 struct prelim_ref *ref;
0775 struct extent_buffer *eb;
0776 struct preftree *tree = &preftrees->indirect_missing_keys;
0777 struct rb_node *node;
0778
0779 while ((node = rb_first_cached(&tree->root))) {
0780 ref = rb_entry(node, struct prelim_ref, rbnode);
0781 rb_erase_cached(node, &tree->root);
0782
0783 BUG_ON(ref->parent);
0784 BUG_ON(ref->key_for_search.type);
0785 BUG_ON(!ref->wanted_disk_byte);
0786
0787 eb = read_tree_block(fs_info, ref->wanted_disk_byte,
0788 ref->root_id, 0, ref->level - 1, NULL);
0789 if (IS_ERR(eb)) {
0790 free_pref(ref);
0791 return PTR_ERR(eb);
0792 }
0793 if (!extent_buffer_uptodate(eb)) {
0794 free_pref(ref);
0795 free_extent_buffer(eb);
0796 return -EIO;
0797 }
0798
0799 if (lock)
0800 btrfs_tree_read_lock(eb);
0801 if (btrfs_header_level(eb) == 0)
0802 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
0803 else
0804 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
0805 if (lock)
0806 btrfs_tree_read_unlock(eb);
0807 free_extent_buffer(eb);
0808 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
0809 cond_resched();
0810 }
0811 return 0;
0812 }
0813
0814
0815
0816
0817
0818 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
0819 struct btrfs_delayed_ref_head *head, u64 seq,
0820 struct preftrees *preftrees, struct share_check *sc)
0821 {
0822 struct btrfs_delayed_ref_node *node;
0823 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
0824 struct btrfs_key key;
0825 struct btrfs_key tmp_op_key;
0826 struct rb_node *n;
0827 int count;
0828 int ret = 0;
0829
0830 if (extent_op && extent_op->update_key)
0831 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
0832
0833 spin_lock(&head->lock);
0834 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
0835 node = rb_entry(n, struct btrfs_delayed_ref_node,
0836 ref_node);
0837 if (node->seq > seq)
0838 continue;
0839
0840 switch (node->action) {
0841 case BTRFS_ADD_DELAYED_EXTENT:
0842 case BTRFS_UPDATE_DELAYED_HEAD:
0843 WARN_ON(1);
0844 continue;
0845 case BTRFS_ADD_DELAYED_REF:
0846 count = node->ref_mod;
0847 break;
0848 case BTRFS_DROP_DELAYED_REF:
0849 count = node->ref_mod * -1;
0850 break;
0851 default:
0852 BUG();
0853 }
0854 switch (node->type) {
0855 case BTRFS_TREE_BLOCK_REF_KEY: {
0856
0857 struct btrfs_delayed_tree_ref *ref;
0858
0859 ref = btrfs_delayed_node_to_tree_ref(node);
0860 ret = add_indirect_ref(fs_info, preftrees, ref->root,
0861 &tmp_op_key, ref->level + 1,
0862 node->bytenr, count, sc,
0863 GFP_ATOMIC);
0864 break;
0865 }
0866 case BTRFS_SHARED_BLOCK_REF_KEY: {
0867
0868 struct btrfs_delayed_tree_ref *ref;
0869
0870 ref = btrfs_delayed_node_to_tree_ref(node);
0871
0872 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
0873 ref->parent, node->bytenr, count,
0874 sc, GFP_ATOMIC);
0875 break;
0876 }
0877 case BTRFS_EXTENT_DATA_REF_KEY: {
0878
0879 struct btrfs_delayed_data_ref *ref;
0880 ref = btrfs_delayed_node_to_data_ref(node);
0881
0882 key.objectid = ref->objectid;
0883 key.type = BTRFS_EXTENT_DATA_KEY;
0884 key.offset = ref->offset;
0885
0886
0887
0888
0889
0890 if (sc && sc->inum && ref->objectid != sc->inum) {
0891 ret = BACKREF_FOUND_SHARED;
0892 goto out;
0893 }
0894
0895 ret = add_indirect_ref(fs_info, preftrees, ref->root,
0896 &key, 0, node->bytenr, count, sc,
0897 GFP_ATOMIC);
0898 break;
0899 }
0900 case BTRFS_SHARED_DATA_REF_KEY: {
0901
0902 struct btrfs_delayed_data_ref *ref;
0903
0904 ref = btrfs_delayed_node_to_data_ref(node);
0905
0906 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
0907 node->bytenr, count, sc,
0908 GFP_ATOMIC);
0909 break;
0910 }
0911 default:
0912 WARN_ON(1);
0913 }
0914
0915
0916
0917
0918 if (ret && (ret != BACKREF_FOUND_SHARED))
0919 break;
0920 }
0921 if (!ret)
0922 ret = extent_is_shared(sc);
0923 out:
0924 spin_unlock(&head->lock);
0925 return ret;
0926 }
0927
0928
0929
0930
0931
0932
0933 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
0934 struct btrfs_path *path, u64 bytenr,
0935 int *info_level, struct preftrees *preftrees,
0936 struct share_check *sc)
0937 {
0938 int ret = 0;
0939 int slot;
0940 struct extent_buffer *leaf;
0941 struct btrfs_key key;
0942 struct btrfs_key found_key;
0943 unsigned long ptr;
0944 unsigned long end;
0945 struct btrfs_extent_item *ei;
0946 u64 flags;
0947 u64 item_size;
0948
0949
0950
0951
0952 leaf = path->nodes[0];
0953 slot = path->slots[0];
0954
0955 item_size = btrfs_item_size(leaf, slot);
0956 BUG_ON(item_size < sizeof(*ei));
0957
0958 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
0959 flags = btrfs_extent_flags(leaf, ei);
0960 btrfs_item_key_to_cpu(leaf, &found_key, slot);
0961
0962 ptr = (unsigned long)(ei + 1);
0963 end = (unsigned long)ei + item_size;
0964
0965 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
0966 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
0967 struct btrfs_tree_block_info *info;
0968
0969 info = (struct btrfs_tree_block_info *)ptr;
0970 *info_level = btrfs_tree_block_level(leaf, info);
0971 ptr += sizeof(struct btrfs_tree_block_info);
0972 BUG_ON(ptr > end);
0973 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
0974 *info_level = found_key.offset;
0975 } else {
0976 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
0977 }
0978
0979 while (ptr < end) {
0980 struct btrfs_extent_inline_ref *iref;
0981 u64 offset;
0982 int type;
0983
0984 iref = (struct btrfs_extent_inline_ref *)ptr;
0985 type = btrfs_get_extent_inline_ref_type(leaf, iref,
0986 BTRFS_REF_TYPE_ANY);
0987 if (type == BTRFS_REF_TYPE_INVALID)
0988 return -EUCLEAN;
0989
0990 offset = btrfs_extent_inline_ref_offset(leaf, iref);
0991
0992 switch (type) {
0993 case BTRFS_SHARED_BLOCK_REF_KEY:
0994 ret = add_direct_ref(fs_info, preftrees,
0995 *info_level + 1, offset,
0996 bytenr, 1, NULL, GFP_NOFS);
0997 break;
0998 case BTRFS_SHARED_DATA_REF_KEY: {
0999 struct btrfs_shared_data_ref *sdref;
1000 int count;
1001
1002 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1003 count = btrfs_shared_data_ref_count(leaf, sdref);
1004
1005 ret = add_direct_ref(fs_info, preftrees, 0, offset,
1006 bytenr, count, sc, GFP_NOFS);
1007 break;
1008 }
1009 case BTRFS_TREE_BLOCK_REF_KEY:
1010 ret = add_indirect_ref(fs_info, preftrees, offset,
1011 NULL, *info_level + 1,
1012 bytenr, 1, NULL, GFP_NOFS);
1013 break;
1014 case BTRFS_EXTENT_DATA_REF_KEY: {
1015 struct btrfs_extent_data_ref *dref;
1016 int count;
1017 u64 root;
1018
1019 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1020 count = btrfs_extent_data_ref_count(leaf, dref);
1021 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1022 dref);
1023 key.type = BTRFS_EXTENT_DATA_KEY;
1024 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1025
1026 if (sc && sc->inum && key.objectid != sc->inum) {
1027 ret = BACKREF_FOUND_SHARED;
1028 break;
1029 }
1030
1031 root = btrfs_extent_data_ref_root(leaf, dref);
1032
1033 ret = add_indirect_ref(fs_info, preftrees, root,
1034 &key, 0, bytenr, count,
1035 sc, GFP_NOFS);
1036 break;
1037 }
1038 default:
1039 WARN_ON(1);
1040 }
1041 if (ret)
1042 return ret;
1043 ptr += btrfs_extent_inline_ref_size(type);
1044 }
1045
1046 return 0;
1047 }
1048
1049
1050
1051
1052
1053
1054 static int add_keyed_refs(struct btrfs_root *extent_root,
1055 struct btrfs_path *path, u64 bytenr,
1056 int info_level, struct preftrees *preftrees,
1057 struct share_check *sc)
1058 {
1059 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1060 int ret;
1061 int slot;
1062 struct extent_buffer *leaf;
1063 struct btrfs_key key;
1064
1065 while (1) {
1066 ret = btrfs_next_item(extent_root, path);
1067 if (ret < 0)
1068 break;
1069 if (ret) {
1070 ret = 0;
1071 break;
1072 }
1073
1074 slot = path->slots[0];
1075 leaf = path->nodes[0];
1076 btrfs_item_key_to_cpu(leaf, &key, slot);
1077
1078 if (key.objectid != bytenr)
1079 break;
1080 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1081 continue;
1082 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1083 break;
1084
1085 switch (key.type) {
1086 case BTRFS_SHARED_BLOCK_REF_KEY:
1087
1088 ret = add_direct_ref(fs_info, preftrees,
1089 info_level + 1, key.offset,
1090 bytenr, 1, NULL, GFP_NOFS);
1091 break;
1092 case BTRFS_SHARED_DATA_REF_KEY: {
1093
1094 struct btrfs_shared_data_ref *sdref;
1095 int count;
1096
1097 sdref = btrfs_item_ptr(leaf, slot,
1098 struct btrfs_shared_data_ref);
1099 count = btrfs_shared_data_ref_count(leaf, sdref);
1100 ret = add_direct_ref(fs_info, preftrees, 0,
1101 key.offset, bytenr, count,
1102 sc, GFP_NOFS);
1103 break;
1104 }
1105 case BTRFS_TREE_BLOCK_REF_KEY:
1106
1107 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1108 NULL, info_level + 1, bytenr,
1109 1, NULL, GFP_NOFS);
1110 break;
1111 case BTRFS_EXTENT_DATA_REF_KEY: {
1112
1113 struct btrfs_extent_data_ref *dref;
1114 int count;
1115 u64 root;
1116
1117 dref = btrfs_item_ptr(leaf, slot,
1118 struct btrfs_extent_data_ref);
1119 count = btrfs_extent_data_ref_count(leaf, dref);
1120 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1121 dref);
1122 key.type = BTRFS_EXTENT_DATA_KEY;
1123 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1124
1125 if (sc && sc->inum && key.objectid != sc->inum) {
1126 ret = BACKREF_FOUND_SHARED;
1127 break;
1128 }
1129
1130 root = btrfs_extent_data_ref_root(leaf, dref);
1131 ret = add_indirect_ref(fs_info, preftrees, root,
1132 &key, 0, bytenr, count,
1133 sc, GFP_NOFS);
1134 break;
1135 }
1136 default:
1137 WARN_ON(1);
1138 }
1139 if (ret)
1140 return ret;
1141
1142 }
1143
1144 return ret;
1145 }
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1170 struct btrfs_fs_info *fs_info, u64 bytenr,
1171 u64 time_seq, struct ulist *refs,
1172 struct ulist *roots, const u64 *extent_item_pos,
1173 struct share_check *sc, bool ignore_offset)
1174 {
1175 struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
1176 struct btrfs_key key;
1177 struct btrfs_path *path;
1178 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1179 struct btrfs_delayed_ref_head *head;
1180 int info_level = 0;
1181 int ret;
1182 struct prelim_ref *ref;
1183 struct rb_node *node;
1184 struct extent_inode_elem *eie = NULL;
1185 struct preftrees preftrees = {
1186 .direct = PREFTREE_INIT,
1187 .indirect = PREFTREE_INIT,
1188 .indirect_missing_keys = PREFTREE_INIT
1189 };
1190
1191 key.objectid = bytenr;
1192 key.offset = (u64)-1;
1193 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1194 key.type = BTRFS_METADATA_ITEM_KEY;
1195 else
1196 key.type = BTRFS_EXTENT_ITEM_KEY;
1197
1198 path = btrfs_alloc_path();
1199 if (!path)
1200 return -ENOMEM;
1201 if (!trans) {
1202 path->search_commit_root = 1;
1203 path->skip_locking = 1;
1204 }
1205
1206 if (time_seq == BTRFS_SEQ_LAST)
1207 path->skip_locking = 1;
1208
1209 again:
1210 head = NULL;
1211
1212 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1213 if (ret < 0)
1214 goto out;
1215 if (ret == 0) {
1216
1217 ASSERT(ret != 0);
1218 ret = -EUCLEAN;
1219 goto out;
1220 }
1221
1222 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1223 time_seq != BTRFS_SEQ_LAST) {
1224
1225
1226
1227
1228
1229
1230 delayed_refs = &trans->transaction->delayed_refs;
1231 spin_lock(&delayed_refs->lock);
1232 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1233 if (head) {
1234 if (!mutex_trylock(&head->mutex)) {
1235 refcount_inc(&head->refs);
1236 spin_unlock(&delayed_refs->lock);
1237
1238 btrfs_release_path(path);
1239
1240
1241
1242
1243
1244 mutex_lock(&head->mutex);
1245 mutex_unlock(&head->mutex);
1246 btrfs_put_delayed_ref_head(head);
1247 goto again;
1248 }
1249 spin_unlock(&delayed_refs->lock);
1250 ret = add_delayed_refs(fs_info, head, time_seq,
1251 &preftrees, sc);
1252 mutex_unlock(&head->mutex);
1253 if (ret)
1254 goto out;
1255 } else {
1256 spin_unlock(&delayed_refs->lock);
1257 }
1258 }
1259
1260 if (path->slots[0]) {
1261 struct extent_buffer *leaf;
1262 int slot;
1263
1264 path->slots[0]--;
1265 leaf = path->nodes[0];
1266 slot = path->slots[0];
1267 btrfs_item_key_to_cpu(leaf, &key, slot);
1268 if (key.objectid == bytenr &&
1269 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1270 key.type == BTRFS_METADATA_ITEM_KEY)) {
1271 ret = add_inline_refs(fs_info, path, bytenr,
1272 &info_level, &preftrees, sc);
1273 if (ret)
1274 goto out;
1275 ret = add_keyed_refs(root, path, bytenr, info_level,
1276 &preftrees, sc);
1277 if (ret)
1278 goto out;
1279 }
1280 }
1281
1282 btrfs_release_path(path);
1283
1284 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1285 if (ret)
1286 goto out;
1287
1288 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1289
1290 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1291 extent_item_pos, sc, ignore_offset);
1292 if (ret)
1293 goto out;
1294
1295 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1296
1297
1298
1299
1300
1301
1302
1303
1304 node = rb_first_cached(&preftrees.direct.root);
1305 while (node) {
1306 ref = rb_entry(node, struct prelim_ref, rbnode);
1307 node = rb_next(&ref->rbnode);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1319 if (sc && sc->root_objectid &&
1320 ref->root_id != sc->root_objectid) {
1321 ret = BACKREF_FOUND_SHARED;
1322 goto out;
1323 }
1324
1325
1326 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1327 if (ret < 0)
1328 goto out;
1329 }
1330 if (ref->count && ref->parent) {
1331 if (extent_item_pos && !ref->inode_list &&
1332 ref->level == 0) {
1333 struct extent_buffer *eb;
1334
1335 eb = read_tree_block(fs_info, ref->parent, 0,
1336 0, ref->level, NULL);
1337 if (IS_ERR(eb)) {
1338 ret = PTR_ERR(eb);
1339 goto out;
1340 }
1341 if (!extent_buffer_uptodate(eb)) {
1342 free_extent_buffer(eb);
1343 ret = -EIO;
1344 goto out;
1345 }
1346
1347 if (!path->skip_locking)
1348 btrfs_tree_read_lock(eb);
1349 ret = find_extent_in_eb(eb, bytenr,
1350 *extent_item_pos, &eie, ignore_offset);
1351 if (!path->skip_locking)
1352 btrfs_tree_read_unlock(eb);
1353 free_extent_buffer(eb);
1354 if (ret < 0)
1355 goto out;
1356 ref->inode_list = eie;
1357 }
1358 ret = ulist_add_merge_ptr(refs, ref->parent,
1359 ref->inode_list,
1360 (void **)&eie, GFP_NOFS);
1361 if (ret < 0)
1362 goto out;
1363 if (!ret && extent_item_pos) {
1364
1365
1366
1367
1368
1369
1370
1371
1372 ASSERT(eie);
1373 if (!eie) {
1374 ret = -EUCLEAN;
1375 goto out;
1376 }
1377 while (eie->next)
1378 eie = eie->next;
1379 eie->next = ref->inode_list;
1380 }
1381 eie = NULL;
1382 }
1383 cond_resched();
1384 }
1385
1386 out:
1387 btrfs_free_path(path);
1388
1389 prelim_release(&preftrees.direct);
1390 prelim_release(&preftrees.indirect);
1391 prelim_release(&preftrees.indirect_missing_keys);
1392
1393 if (ret < 0)
1394 free_inode_elem_list(eie);
1395 return ret;
1396 }
1397
1398 static void free_leaf_list(struct ulist *blocks)
1399 {
1400 struct ulist_node *node = NULL;
1401 struct extent_inode_elem *eie;
1402 struct ulist_iterator uiter;
1403
1404 ULIST_ITER_INIT(&uiter);
1405 while ((node = ulist_next(blocks, &uiter))) {
1406 if (!node->aux)
1407 continue;
1408 eie = unode_aux_to_inode_list(node);
1409 free_inode_elem_list(eie);
1410 node->aux = 0;
1411 }
1412
1413 ulist_free(blocks);
1414 }
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1425 struct btrfs_fs_info *fs_info, u64 bytenr,
1426 u64 time_seq, struct ulist **leafs,
1427 const u64 *extent_item_pos, bool ignore_offset)
1428 {
1429 int ret;
1430
1431 *leafs = ulist_alloc(GFP_NOFS);
1432 if (!*leafs)
1433 return -ENOMEM;
1434
1435 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1436 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1437 if (ret < 0 && ret != -ENOENT) {
1438 free_leaf_list(*leafs);
1439 return ret;
1440 }
1441
1442 return 0;
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1459 struct btrfs_fs_info *fs_info, u64 bytenr,
1460 u64 time_seq, struct ulist **roots,
1461 bool ignore_offset)
1462 {
1463 struct ulist *tmp;
1464 struct ulist_node *node = NULL;
1465 struct ulist_iterator uiter;
1466 int ret;
1467
1468 tmp = ulist_alloc(GFP_NOFS);
1469 if (!tmp)
1470 return -ENOMEM;
1471 *roots = ulist_alloc(GFP_NOFS);
1472 if (!*roots) {
1473 ulist_free(tmp);
1474 return -ENOMEM;
1475 }
1476
1477 ULIST_ITER_INIT(&uiter);
1478 while (1) {
1479 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1480 tmp, *roots, NULL, NULL, ignore_offset);
1481 if (ret < 0 && ret != -ENOENT) {
1482 ulist_free(tmp);
1483 ulist_free(*roots);
1484 *roots = NULL;
1485 return ret;
1486 }
1487 node = ulist_next(tmp, &uiter);
1488 if (!node)
1489 break;
1490 bytenr = node->val;
1491 cond_resched();
1492 }
1493
1494 ulist_free(tmp);
1495 return 0;
1496 }
1497
1498 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1499 struct btrfs_fs_info *fs_info, u64 bytenr,
1500 u64 time_seq, struct ulist **roots,
1501 bool skip_commit_root_sem)
1502 {
1503 int ret;
1504
1505 if (!trans && !skip_commit_root_sem)
1506 down_read(&fs_info->commit_root_sem);
1507 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1508 time_seq, roots, false);
1509 if (!trans && !skip_commit_root_sem)
1510 up_read(&fs_info->commit_root_sem);
1511 return ret;
1512 }
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1535 struct ulist *roots, struct ulist *tmp)
1536 {
1537 struct btrfs_fs_info *fs_info = root->fs_info;
1538 struct btrfs_trans_handle *trans;
1539 struct ulist_iterator uiter;
1540 struct ulist_node *node;
1541 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1542 int ret = 0;
1543 struct share_check shared = {
1544 .root_objectid = root->root_key.objectid,
1545 .inum = inum,
1546 .share_count = 0,
1547 };
1548
1549 ulist_init(roots);
1550 ulist_init(tmp);
1551
1552 trans = btrfs_join_transaction_nostart(root);
1553 if (IS_ERR(trans)) {
1554 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1555 ret = PTR_ERR(trans);
1556 goto out;
1557 }
1558 trans = NULL;
1559 down_read(&fs_info->commit_root_sem);
1560 } else {
1561 btrfs_get_tree_mod_seq(fs_info, &elem);
1562 }
1563
1564 ULIST_ITER_INIT(&uiter);
1565 while (1) {
1566 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1567 roots, NULL, &shared, false);
1568 if (ret == BACKREF_FOUND_SHARED) {
1569
1570 ret = 1;
1571 break;
1572 }
1573 if (ret < 0 && ret != -ENOENT)
1574 break;
1575 ret = 0;
1576 node = ulist_next(tmp, &uiter);
1577 if (!node)
1578 break;
1579 bytenr = node->val;
1580 shared.share_count = 0;
1581 cond_resched();
1582 }
1583
1584 if (trans) {
1585 btrfs_put_tree_mod_seq(fs_info, &elem);
1586 btrfs_end_transaction(trans);
1587 } else {
1588 up_read(&fs_info->commit_root_sem);
1589 }
1590 out:
1591 ulist_release(roots);
1592 ulist_release(tmp);
1593 return ret;
1594 }
1595
1596 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1597 u64 start_off, struct btrfs_path *path,
1598 struct btrfs_inode_extref **ret_extref,
1599 u64 *found_off)
1600 {
1601 int ret, slot;
1602 struct btrfs_key key;
1603 struct btrfs_key found_key;
1604 struct btrfs_inode_extref *extref;
1605 const struct extent_buffer *leaf;
1606 unsigned long ptr;
1607
1608 key.objectid = inode_objectid;
1609 key.type = BTRFS_INODE_EXTREF_KEY;
1610 key.offset = start_off;
1611
1612 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1613 if (ret < 0)
1614 return ret;
1615
1616 while (1) {
1617 leaf = path->nodes[0];
1618 slot = path->slots[0];
1619 if (slot >= btrfs_header_nritems(leaf)) {
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 ret = btrfs_next_leaf(root, path);
1630 if (ret) {
1631 if (ret >= 1)
1632 ret = -ENOENT;
1633 break;
1634 }
1635 continue;
1636 }
1637
1638 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1639
1640
1641
1642
1643
1644
1645
1646 ret = -ENOENT;
1647 if (found_key.objectid != inode_objectid)
1648 break;
1649 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1650 break;
1651
1652 ret = 0;
1653 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1654 extref = (struct btrfs_inode_extref *)ptr;
1655 *ret_extref = extref;
1656 if (found_off)
1657 *found_off = found_key.offset;
1658 break;
1659 }
1660
1661 return ret;
1662 }
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1679 u32 name_len, unsigned long name_off,
1680 struct extent_buffer *eb_in, u64 parent,
1681 char *dest, u32 size)
1682 {
1683 int slot;
1684 u64 next_inum;
1685 int ret;
1686 s64 bytes_left = ((s64)size) - 1;
1687 struct extent_buffer *eb = eb_in;
1688 struct btrfs_key found_key;
1689 struct btrfs_inode_ref *iref;
1690
1691 if (bytes_left >= 0)
1692 dest[bytes_left] = '\0';
1693
1694 while (1) {
1695 bytes_left -= name_len;
1696 if (bytes_left >= 0)
1697 read_extent_buffer(eb, dest + bytes_left,
1698 name_off, name_len);
1699 if (eb != eb_in) {
1700 if (!path->skip_locking)
1701 btrfs_tree_read_unlock(eb);
1702 free_extent_buffer(eb);
1703 }
1704 ret = btrfs_find_item(fs_root, path, parent, 0,
1705 BTRFS_INODE_REF_KEY, &found_key);
1706 if (ret > 0)
1707 ret = -ENOENT;
1708 if (ret)
1709 break;
1710
1711 next_inum = found_key.offset;
1712
1713
1714 if (parent == next_inum)
1715 break;
1716
1717 slot = path->slots[0];
1718 eb = path->nodes[0];
1719
1720 if (eb != eb_in) {
1721 path->nodes[0] = NULL;
1722 path->locks[0] = 0;
1723 }
1724 btrfs_release_path(path);
1725 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1726
1727 name_len = btrfs_inode_ref_name_len(eb, iref);
1728 name_off = (unsigned long)(iref + 1);
1729
1730 parent = next_inum;
1731 --bytes_left;
1732 if (bytes_left >= 0)
1733 dest[bytes_left] = '/';
1734 }
1735
1736 btrfs_release_path(path);
1737
1738 if (ret)
1739 return ERR_PTR(ret);
1740
1741 return dest + bytes_left;
1742 }
1743
1744
1745
1746
1747
1748
1749 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1750 struct btrfs_path *path, struct btrfs_key *found_key,
1751 u64 *flags_ret)
1752 {
1753 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
1754 int ret;
1755 u64 flags;
1756 u64 size = 0;
1757 u32 item_size;
1758 const struct extent_buffer *eb;
1759 struct btrfs_extent_item *ei;
1760 struct btrfs_key key;
1761
1762 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1763 key.type = BTRFS_METADATA_ITEM_KEY;
1764 else
1765 key.type = BTRFS_EXTENT_ITEM_KEY;
1766 key.objectid = logical;
1767 key.offset = (u64)-1;
1768
1769 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1770 if (ret < 0)
1771 return ret;
1772
1773 ret = btrfs_previous_extent_item(extent_root, path, 0);
1774 if (ret) {
1775 if (ret > 0)
1776 ret = -ENOENT;
1777 return ret;
1778 }
1779 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1780 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1781 size = fs_info->nodesize;
1782 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1783 size = found_key->offset;
1784
1785 if (found_key->objectid > logical ||
1786 found_key->objectid + size <= logical) {
1787 btrfs_debug(fs_info,
1788 "logical %llu is not within any extent", logical);
1789 return -ENOENT;
1790 }
1791
1792 eb = path->nodes[0];
1793 item_size = btrfs_item_size(eb, path->slots[0]);
1794 BUG_ON(item_size < sizeof(*ei));
1795
1796 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1797 flags = btrfs_extent_flags(eb, ei);
1798
1799 btrfs_debug(fs_info,
1800 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1801 logical, logical - found_key->objectid, found_key->objectid,
1802 found_key->offset, flags, item_size);
1803
1804 WARN_ON(!flags_ret);
1805 if (flags_ret) {
1806 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1807 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1808 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1809 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1810 else
1811 BUG();
1812 return 0;
1813 }
1814
1815 return -EIO;
1816 }
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 static int get_extent_inline_ref(unsigned long *ptr,
1827 const struct extent_buffer *eb,
1828 const struct btrfs_key *key,
1829 const struct btrfs_extent_item *ei,
1830 u32 item_size,
1831 struct btrfs_extent_inline_ref **out_eiref,
1832 int *out_type)
1833 {
1834 unsigned long end;
1835 u64 flags;
1836 struct btrfs_tree_block_info *info;
1837
1838 if (!*ptr) {
1839
1840 flags = btrfs_extent_flags(eb, ei);
1841 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1842 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1843
1844 *out_eiref =
1845 (struct btrfs_extent_inline_ref *)(ei + 1);
1846 } else {
1847 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1848 info = (struct btrfs_tree_block_info *)(ei + 1);
1849 *out_eiref =
1850 (struct btrfs_extent_inline_ref *)(info + 1);
1851 }
1852 } else {
1853 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1854 }
1855 *ptr = (unsigned long)*out_eiref;
1856 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1857 return -ENOENT;
1858 }
1859
1860 end = (unsigned long)ei + item_size;
1861 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1862 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1863 BTRFS_REF_TYPE_ANY);
1864 if (*out_type == BTRFS_REF_TYPE_INVALID)
1865 return -EUCLEAN;
1866
1867 *ptr += btrfs_extent_inline_ref_size(*out_type);
1868 WARN_ON(*ptr > end);
1869 if (*ptr == end)
1870 return 1;
1871
1872 return 0;
1873 }
1874
1875
1876
1877
1878
1879
1880
1881
1882 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1883 struct btrfs_key *key, struct btrfs_extent_item *ei,
1884 u32 item_size, u64 *out_root, u8 *out_level)
1885 {
1886 int ret;
1887 int type;
1888 struct btrfs_extent_inline_ref *eiref;
1889
1890 if (*ptr == (unsigned long)-1)
1891 return 1;
1892
1893 while (1) {
1894 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1895 &eiref, &type);
1896 if (ret < 0)
1897 return ret;
1898
1899 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1900 type == BTRFS_SHARED_BLOCK_REF_KEY)
1901 break;
1902
1903 if (ret == 1)
1904 return 1;
1905 }
1906
1907
1908 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1909
1910 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1911 struct btrfs_tree_block_info *info;
1912
1913 info = (struct btrfs_tree_block_info *)(ei + 1);
1914 *out_level = btrfs_tree_block_level(eb, info);
1915 } else {
1916 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1917 *out_level = (u8)key->offset;
1918 }
1919
1920 if (ret == 1)
1921 *ptr = (unsigned long)-1;
1922
1923 return 0;
1924 }
1925
1926 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1927 struct extent_inode_elem *inode_list,
1928 u64 root, u64 extent_item_objectid,
1929 iterate_extent_inodes_t *iterate, void *ctx)
1930 {
1931 struct extent_inode_elem *eie;
1932 int ret = 0;
1933
1934 for (eie = inode_list; eie; eie = eie->next) {
1935 btrfs_debug(fs_info,
1936 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1937 extent_item_objectid, eie->inum,
1938 eie->offset, root);
1939 ret = iterate(eie->inum, eie->offset, root, ctx);
1940 if (ret) {
1941 btrfs_debug(fs_info,
1942 "stopping iteration for %llu due to ret=%d",
1943 extent_item_objectid, ret);
1944 break;
1945 }
1946 }
1947
1948 return ret;
1949 }
1950
1951
1952
1953
1954
1955
1956 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1957 u64 extent_item_objectid, u64 extent_item_pos,
1958 int search_commit_root,
1959 iterate_extent_inodes_t *iterate, void *ctx,
1960 bool ignore_offset)
1961 {
1962 int ret;
1963 struct btrfs_trans_handle *trans = NULL;
1964 struct ulist *refs = NULL;
1965 struct ulist *roots = NULL;
1966 struct ulist_node *ref_node = NULL;
1967 struct ulist_node *root_node = NULL;
1968 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
1969 struct ulist_iterator ref_uiter;
1970 struct ulist_iterator root_uiter;
1971
1972 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1973 extent_item_objectid);
1974
1975 if (!search_commit_root) {
1976 trans = btrfs_attach_transaction(fs_info->tree_root);
1977 if (IS_ERR(trans)) {
1978 if (PTR_ERR(trans) != -ENOENT &&
1979 PTR_ERR(trans) != -EROFS)
1980 return PTR_ERR(trans);
1981 trans = NULL;
1982 }
1983 }
1984
1985 if (trans)
1986 btrfs_get_tree_mod_seq(fs_info, &seq_elem);
1987 else
1988 down_read(&fs_info->commit_root_sem);
1989
1990 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1991 seq_elem.seq, &refs,
1992 &extent_item_pos, ignore_offset);
1993 if (ret)
1994 goto out;
1995
1996 ULIST_ITER_INIT(&ref_uiter);
1997 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1998 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1999 seq_elem.seq, &roots,
2000 ignore_offset);
2001 if (ret)
2002 break;
2003 ULIST_ITER_INIT(&root_uiter);
2004 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2005 btrfs_debug(fs_info,
2006 "root %llu references leaf %llu, data list %#llx",
2007 root_node->val, ref_node->val,
2008 ref_node->aux);
2009 ret = iterate_leaf_refs(fs_info,
2010 (struct extent_inode_elem *)
2011 (uintptr_t)ref_node->aux,
2012 root_node->val,
2013 extent_item_objectid,
2014 iterate, ctx);
2015 }
2016 ulist_free(roots);
2017 }
2018
2019 free_leaf_list(refs);
2020 out:
2021 if (trans) {
2022 btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2023 btrfs_end_transaction(trans);
2024 } else {
2025 up_read(&fs_info->commit_root_sem);
2026 }
2027
2028 return ret;
2029 }
2030
2031 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2032 {
2033 struct btrfs_data_container *inodes = ctx;
2034 const size_t c = 3 * sizeof(u64);
2035
2036 if (inodes->bytes_left >= c) {
2037 inodes->bytes_left -= c;
2038 inodes->val[inodes->elem_cnt] = inum;
2039 inodes->val[inodes->elem_cnt + 1] = offset;
2040 inodes->val[inodes->elem_cnt + 2] = root;
2041 inodes->elem_cnt += 3;
2042 } else {
2043 inodes->bytes_missing += c - inodes->bytes_left;
2044 inodes->bytes_left = 0;
2045 inodes->elem_missed += 3;
2046 }
2047
2048 return 0;
2049 }
2050
2051 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2052 struct btrfs_path *path,
2053 void *ctx, bool ignore_offset)
2054 {
2055 int ret;
2056 u64 extent_item_pos;
2057 u64 flags = 0;
2058 struct btrfs_key found_key;
2059 int search_commit_root = path->search_commit_root;
2060
2061 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2062 btrfs_release_path(path);
2063 if (ret < 0)
2064 return ret;
2065 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2066 return -EINVAL;
2067
2068 extent_item_pos = logical - found_key.objectid;
2069 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2070 extent_item_pos, search_commit_root,
2071 build_ino_list, ctx, ignore_offset);
2072
2073 return ret;
2074 }
2075
2076 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2077 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2078
2079 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2080 {
2081 int ret = 0;
2082 int slot;
2083 u32 cur;
2084 u32 len;
2085 u32 name_len;
2086 u64 parent = 0;
2087 int found = 0;
2088 struct btrfs_root *fs_root = ipath->fs_root;
2089 struct btrfs_path *path = ipath->btrfs_path;
2090 struct extent_buffer *eb;
2091 struct btrfs_inode_ref *iref;
2092 struct btrfs_key found_key;
2093
2094 while (!ret) {
2095 ret = btrfs_find_item(fs_root, path, inum,
2096 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2097 &found_key);
2098
2099 if (ret < 0)
2100 break;
2101 if (ret) {
2102 ret = found ? 0 : -ENOENT;
2103 break;
2104 }
2105 ++found;
2106
2107 parent = found_key.offset;
2108 slot = path->slots[0];
2109 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2110 if (!eb) {
2111 ret = -ENOMEM;
2112 break;
2113 }
2114 btrfs_release_path(path);
2115
2116 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2117
2118 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2119 name_len = btrfs_inode_ref_name_len(eb, iref);
2120
2121 btrfs_debug(fs_root->fs_info,
2122 "following ref at offset %u for inode %llu in tree %llu",
2123 cur, found_key.objectid,
2124 fs_root->root_key.objectid);
2125 ret = inode_to_path(parent, name_len,
2126 (unsigned long)(iref + 1), eb, ipath);
2127 if (ret)
2128 break;
2129 len = sizeof(*iref) + name_len;
2130 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2131 }
2132 free_extent_buffer(eb);
2133 }
2134
2135 btrfs_release_path(path);
2136
2137 return ret;
2138 }
2139
2140 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2141 {
2142 int ret;
2143 int slot;
2144 u64 offset = 0;
2145 u64 parent;
2146 int found = 0;
2147 struct btrfs_root *fs_root = ipath->fs_root;
2148 struct btrfs_path *path = ipath->btrfs_path;
2149 struct extent_buffer *eb;
2150 struct btrfs_inode_extref *extref;
2151 u32 item_size;
2152 u32 cur_offset;
2153 unsigned long ptr;
2154
2155 while (1) {
2156 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2157 &offset);
2158 if (ret < 0)
2159 break;
2160 if (ret) {
2161 ret = found ? 0 : -ENOENT;
2162 break;
2163 }
2164 ++found;
2165
2166 slot = path->slots[0];
2167 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2168 if (!eb) {
2169 ret = -ENOMEM;
2170 break;
2171 }
2172 btrfs_release_path(path);
2173
2174 item_size = btrfs_item_size(eb, slot);
2175 ptr = btrfs_item_ptr_offset(eb, slot);
2176 cur_offset = 0;
2177
2178 while (cur_offset < item_size) {
2179 u32 name_len;
2180
2181 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2182 parent = btrfs_inode_extref_parent(eb, extref);
2183 name_len = btrfs_inode_extref_name_len(eb, extref);
2184 ret = inode_to_path(parent, name_len,
2185 (unsigned long)&extref->name, eb, ipath);
2186 if (ret)
2187 break;
2188
2189 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2190 cur_offset += sizeof(*extref);
2191 }
2192 free_extent_buffer(eb);
2193
2194 offset++;
2195 }
2196
2197 btrfs_release_path(path);
2198
2199 return ret;
2200 }
2201
2202
2203
2204
2205
2206 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2207 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2208 {
2209 char *fspath;
2210 char *fspath_min;
2211 int i = ipath->fspath->elem_cnt;
2212 const int s_ptr = sizeof(char *);
2213 u32 bytes_left;
2214
2215 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2216 ipath->fspath->bytes_left - s_ptr : 0;
2217
2218 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2219 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2220 name_off, eb, inum, fspath_min, bytes_left);
2221 if (IS_ERR(fspath))
2222 return PTR_ERR(fspath);
2223
2224 if (fspath > fspath_min) {
2225 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2226 ++ipath->fspath->elem_cnt;
2227 ipath->fspath->bytes_left = fspath - fspath_min;
2228 } else {
2229 ++ipath->fspath->elem_missed;
2230 ipath->fspath->bytes_missing += fspath_min - fspath;
2231 ipath->fspath->bytes_left = 0;
2232 }
2233
2234 return 0;
2235 }
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2248 {
2249 int ret;
2250 int found_refs = 0;
2251
2252 ret = iterate_inode_refs(inum, ipath);
2253 if (!ret)
2254 ++found_refs;
2255 else if (ret != -ENOENT)
2256 return ret;
2257
2258 ret = iterate_inode_extrefs(inum, ipath);
2259 if (ret == -ENOENT && found_refs)
2260 return 0;
2261
2262 return ret;
2263 }
2264
2265 struct btrfs_data_container *init_data_container(u32 total_bytes)
2266 {
2267 struct btrfs_data_container *data;
2268 size_t alloc_bytes;
2269
2270 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2271 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2272 if (!data)
2273 return ERR_PTR(-ENOMEM);
2274
2275 if (total_bytes >= sizeof(*data)) {
2276 data->bytes_left = total_bytes - sizeof(*data);
2277 data->bytes_missing = 0;
2278 } else {
2279 data->bytes_missing = sizeof(*data) - total_bytes;
2280 data->bytes_left = 0;
2281 }
2282
2283 data->elem_cnt = 0;
2284 data->elem_missed = 0;
2285
2286 return data;
2287 }
2288
2289
2290
2291
2292
2293
2294
2295 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2296 struct btrfs_path *path)
2297 {
2298 struct inode_fs_paths *ifp;
2299 struct btrfs_data_container *fspath;
2300
2301 fspath = init_data_container(total_bytes);
2302 if (IS_ERR(fspath))
2303 return ERR_CAST(fspath);
2304
2305 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2306 if (!ifp) {
2307 kvfree(fspath);
2308 return ERR_PTR(-ENOMEM);
2309 }
2310
2311 ifp->btrfs_path = path;
2312 ifp->fspath = fspath;
2313 ifp->fs_root = fs_root;
2314
2315 return ifp;
2316 }
2317
2318 void free_ipath(struct inode_fs_paths *ipath)
2319 {
2320 if (!ipath)
2321 return;
2322 kvfree(ipath->fspath);
2323 kfree(ipath);
2324 }
2325
2326 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2327 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2328 {
2329 struct btrfs_backref_iter *ret;
2330
2331 ret = kzalloc(sizeof(*ret), gfp_flag);
2332 if (!ret)
2333 return NULL;
2334
2335 ret->path = btrfs_alloc_path();
2336 if (!ret->path) {
2337 kfree(ret);
2338 return NULL;
2339 }
2340
2341
2342 ret->path->search_commit_root = 1;
2343 ret->path->skip_locking = 1;
2344 ret->fs_info = fs_info;
2345
2346 return ret;
2347 }
2348
2349 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2350 {
2351 struct btrfs_fs_info *fs_info = iter->fs_info;
2352 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2353 struct btrfs_path *path = iter->path;
2354 struct btrfs_extent_item *ei;
2355 struct btrfs_key key;
2356 int ret;
2357
2358 key.objectid = bytenr;
2359 key.type = BTRFS_METADATA_ITEM_KEY;
2360 key.offset = (u64)-1;
2361 iter->bytenr = bytenr;
2362
2363 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2364 if (ret < 0)
2365 return ret;
2366 if (ret == 0) {
2367 ret = -EUCLEAN;
2368 goto release;
2369 }
2370 if (path->slots[0] == 0) {
2371 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2372 ret = -EUCLEAN;
2373 goto release;
2374 }
2375 path->slots[0]--;
2376
2377 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2378 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2379 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2380 ret = -ENOENT;
2381 goto release;
2382 }
2383 memcpy(&iter->cur_key, &key, sizeof(key));
2384 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2385 path->slots[0]);
2386 iter->end_ptr = (u32)(iter->item_ptr +
2387 btrfs_item_size(path->nodes[0], path->slots[0]));
2388 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2389 struct btrfs_extent_item);
2390
2391
2392
2393
2394
2395
2396
2397
2398 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2399 ret = -ENOTSUPP;
2400 goto release;
2401 }
2402 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2403
2404
2405 if (iter->cur_ptr >= iter->end_ptr) {
2406 ret = btrfs_next_item(extent_root, path);
2407
2408
2409 if (ret > 0) {
2410 ret = -ENOENT;
2411 goto release;
2412 }
2413 if (ret < 0)
2414 goto release;
2415
2416 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2417 path->slots[0]);
2418 if (iter->cur_key.objectid != bytenr ||
2419 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2420 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2421 ret = -ENOENT;
2422 goto release;
2423 }
2424 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2425 path->slots[0]);
2426 iter->item_ptr = iter->cur_ptr;
2427 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2428 path->nodes[0], path->slots[0]));
2429 }
2430
2431 return 0;
2432 release:
2433 btrfs_backref_iter_release(iter);
2434 return ret;
2435 }
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2448 {
2449 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2450 struct btrfs_root *extent_root;
2451 struct btrfs_path *path = iter->path;
2452 struct btrfs_extent_inline_ref *iref;
2453 int ret;
2454 u32 size;
2455
2456 if (btrfs_backref_iter_is_inline_ref(iter)) {
2457
2458 ASSERT(iter->cur_ptr < iter->end_ptr);
2459
2460 if (btrfs_backref_has_tree_block_info(iter)) {
2461
2462 size = sizeof(struct btrfs_tree_block_info);
2463 } else {
2464
2465 int type;
2466
2467 iref = (struct btrfs_extent_inline_ref *)
2468 ((unsigned long)iter->cur_ptr);
2469 type = btrfs_extent_inline_ref_type(eb, iref);
2470
2471 size = btrfs_extent_inline_ref_size(type);
2472 }
2473 iter->cur_ptr += size;
2474 if (iter->cur_ptr < iter->end_ptr)
2475 return 0;
2476
2477
2478 }
2479
2480
2481 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2482 ret = btrfs_next_item(extent_root, iter->path);
2483 if (ret)
2484 return ret;
2485
2486 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2487 if (iter->cur_key.objectid != iter->bytenr ||
2488 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2489 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2490 return 1;
2491 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2492 path->slots[0]);
2493 iter->cur_ptr = iter->item_ptr;
2494 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2495 path->slots[0]);
2496 return 0;
2497 }
2498
2499 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2500 struct btrfs_backref_cache *cache, int is_reloc)
2501 {
2502 int i;
2503
2504 cache->rb_root = RB_ROOT;
2505 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2506 INIT_LIST_HEAD(&cache->pending[i]);
2507 INIT_LIST_HEAD(&cache->changed);
2508 INIT_LIST_HEAD(&cache->detached);
2509 INIT_LIST_HEAD(&cache->leaves);
2510 INIT_LIST_HEAD(&cache->pending_edge);
2511 INIT_LIST_HEAD(&cache->useless_node);
2512 cache->fs_info = fs_info;
2513 cache->is_reloc = is_reloc;
2514 }
2515
2516 struct btrfs_backref_node *btrfs_backref_alloc_node(
2517 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2518 {
2519 struct btrfs_backref_node *node;
2520
2521 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2522 node = kzalloc(sizeof(*node), GFP_NOFS);
2523 if (!node)
2524 return node;
2525
2526 INIT_LIST_HEAD(&node->list);
2527 INIT_LIST_HEAD(&node->upper);
2528 INIT_LIST_HEAD(&node->lower);
2529 RB_CLEAR_NODE(&node->rb_node);
2530 cache->nr_nodes++;
2531 node->level = level;
2532 node->bytenr = bytenr;
2533
2534 return node;
2535 }
2536
2537 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2538 struct btrfs_backref_cache *cache)
2539 {
2540 struct btrfs_backref_edge *edge;
2541
2542 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2543 if (edge)
2544 cache->nr_edges++;
2545 return edge;
2546 }
2547
2548
2549
2550
2551
2552
2553
2554
2555 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2556 struct btrfs_backref_node *node)
2557 {
2558 struct btrfs_backref_node *upper;
2559 struct btrfs_backref_edge *edge;
2560
2561 if (!node)
2562 return;
2563
2564 BUG_ON(!node->lowest && !node->detached);
2565 while (!list_empty(&node->upper)) {
2566 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2567 list[LOWER]);
2568 upper = edge->node[UPPER];
2569 list_del(&edge->list[LOWER]);
2570 list_del(&edge->list[UPPER]);
2571 btrfs_backref_free_edge(cache, edge);
2572
2573
2574
2575
2576
2577 if (list_empty(&upper->lower)) {
2578 list_add_tail(&upper->lower, &cache->leaves);
2579 upper->lowest = 1;
2580 }
2581 }
2582
2583 btrfs_backref_drop_node(cache, node);
2584 }
2585
2586
2587
2588
2589 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2590 {
2591 struct btrfs_backref_node *node;
2592 int i;
2593
2594 while (!list_empty(&cache->detached)) {
2595 node = list_entry(cache->detached.next,
2596 struct btrfs_backref_node, list);
2597 btrfs_backref_cleanup_node(cache, node);
2598 }
2599
2600 while (!list_empty(&cache->leaves)) {
2601 node = list_entry(cache->leaves.next,
2602 struct btrfs_backref_node, lower);
2603 btrfs_backref_cleanup_node(cache, node);
2604 }
2605
2606 cache->last_trans = 0;
2607
2608 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2609 ASSERT(list_empty(&cache->pending[i]));
2610 ASSERT(list_empty(&cache->pending_edge));
2611 ASSERT(list_empty(&cache->useless_node));
2612 ASSERT(list_empty(&cache->changed));
2613 ASSERT(list_empty(&cache->detached));
2614 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2615 ASSERT(!cache->nr_nodes);
2616 ASSERT(!cache->nr_edges);
2617 }
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2632 struct btrfs_key *ref_key,
2633 struct btrfs_backref_node *cur)
2634 {
2635 struct btrfs_backref_edge *edge;
2636 struct btrfs_backref_node *upper;
2637 struct rb_node *rb_node;
2638
2639 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2640
2641
2642 if (ref_key->objectid == ref_key->offset) {
2643 struct btrfs_root *root;
2644
2645 cur->is_reloc_root = 1;
2646
2647 if (cache->is_reloc) {
2648 root = find_reloc_root(cache->fs_info, cur->bytenr);
2649 if (!root)
2650 return -ENOENT;
2651 cur->root = root;
2652 } else {
2653
2654
2655
2656
2657 list_add(&cur->list, &cache->useless_node);
2658 }
2659 return 0;
2660 }
2661
2662 edge = btrfs_backref_alloc_edge(cache);
2663 if (!edge)
2664 return -ENOMEM;
2665
2666 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2667 if (!rb_node) {
2668
2669 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2670 cur->level + 1);
2671 if (!upper) {
2672 btrfs_backref_free_edge(cache, edge);
2673 return -ENOMEM;
2674 }
2675
2676
2677
2678
2679
2680 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2681 } else {
2682
2683 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2684 ASSERT(upper->checked);
2685 INIT_LIST_HEAD(&edge->list[UPPER]);
2686 }
2687 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2688 return 0;
2689 }
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2704 struct btrfs_path *path,
2705 struct btrfs_key *ref_key,
2706 struct btrfs_key *tree_key,
2707 struct btrfs_backref_node *cur)
2708 {
2709 struct btrfs_fs_info *fs_info = cache->fs_info;
2710 struct btrfs_backref_node *upper;
2711 struct btrfs_backref_node *lower;
2712 struct btrfs_backref_edge *edge;
2713 struct extent_buffer *eb;
2714 struct btrfs_root *root;
2715 struct rb_node *rb_node;
2716 int level;
2717 bool need_check = true;
2718 int ret;
2719
2720 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2721 if (IS_ERR(root))
2722 return PTR_ERR(root);
2723 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2724 cur->cowonly = 1;
2725
2726 if (btrfs_root_level(&root->root_item) == cur->level) {
2727
2728 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2740 btrfs_put_root(root);
2741 list_add(&cur->list, &cache->useless_node);
2742 } else {
2743 cur->root = root;
2744 }
2745 return 0;
2746 }
2747
2748 level = cur->level + 1;
2749
2750
2751 path->search_commit_root = 1;
2752 path->skip_locking = 1;
2753 path->lowest_level = level;
2754 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2755 path->lowest_level = 0;
2756 if (ret < 0) {
2757 btrfs_put_root(root);
2758 return ret;
2759 }
2760 if (ret > 0 && path->slots[level] > 0)
2761 path->slots[level]--;
2762
2763 eb = path->nodes[level];
2764 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2765 btrfs_err(fs_info,
2766 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2767 cur->bytenr, level - 1, root->root_key.objectid,
2768 tree_key->objectid, tree_key->type, tree_key->offset);
2769 btrfs_put_root(root);
2770 ret = -ENOENT;
2771 goto out;
2772 }
2773 lower = cur;
2774
2775
2776 for (; level < BTRFS_MAX_LEVEL; level++) {
2777 if (!path->nodes[level]) {
2778 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2779 lower->bytenr);
2780
2781 if (btrfs_should_ignore_reloc_root(root) &&
2782 cache->is_reloc) {
2783 btrfs_put_root(root);
2784 list_add(&lower->list, &cache->useless_node);
2785 } else {
2786 lower->root = root;
2787 }
2788 break;
2789 }
2790
2791 edge = btrfs_backref_alloc_edge(cache);
2792 if (!edge) {
2793 btrfs_put_root(root);
2794 ret = -ENOMEM;
2795 goto out;
2796 }
2797
2798 eb = path->nodes[level];
2799 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2800 if (!rb_node) {
2801 upper = btrfs_backref_alloc_node(cache, eb->start,
2802 lower->level + 1);
2803 if (!upper) {
2804 btrfs_put_root(root);
2805 btrfs_backref_free_edge(cache, edge);
2806 ret = -ENOMEM;
2807 goto out;
2808 }
2809 upper->owner = btrfs_header_owner(eb);
2810 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2811 upper->cowonly = 1;
2812
2813
2814
2815
2816
2817 if (btrfs_block_can_be_shared(root, eb))
2818 upper->checked = 0;
2819 else
2820 upper->checked = 1;
2821
2822
2823
2824
2825
2826
2827 if (!upper->checked && need_check) {
2828 need_check = false;
2829 list_add_tail(&edge->list[UPPER],
2830 &cache->pending_edge);
2831 } else {
2832 if (upper->checked)
2833 need_check = true;
2834 INIT_LIST_HEAD(&edge->list[UPPER]);
2835 }
2836 } else {
2837 upper = rb_entry(rb_node, struct btrfs_backref_node,
2838 rb_node);
2839 ASSERT(upper->checked);
2840 INIT_LIST_HEAD(&edge->list[UPPER]);
2841 if (!upper->owner)
2842 upper->owner = btrfs_header_owner(eb);
2843 }
2844 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2845
2846 if (rb_node) {
2847 btrfs_put_root(root);
2848 break;
2849 }
2850 lower = upper;
2851 upper = NULL;
2852 }
2853 out:
2854 btrfs_release_path(path);
2855 return ret;
2856 }
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2870 struct btrfs_path *path,
2871 struct btrfs_backref_iter *iter,
2872 struct btrfs_key *node_key,
2873 struct btrfs_backref_node *cur)
2874 {
2875 struct btrfs_fs_info *fs_info = cache->fs_info;
2876 struct btrfs_backref_edge *edge;
2877 struct btrfs_backref_node *exist;
2878 int ret;
2879
2880 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2881 if (ret < 0)
2882 return ret;
2883
2884
2885
2886
2887 if (btrfs_backref_has_tree_block_info(iter)) {
2888 ret = btrfs_backref_iter_next(iter);
2889 if (ret < 0)
2890 goto out;
2891
2892 if (ret > 0) {
2893 ret = -EUCLEAN;
2894 goto out;
2895 }
2896 }
2897 WARN_ON(cur->checked);
2898 if (!list_empty(&cur->upper)) {
2899
2900
2901
2902
2903 ASSERT(list_is_singular(&cur->upper));
2904 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2905 list[LOWER]);
2906 ASSERT(list_empty(&edge->list[UPPER]));
2907 exist = edge->node[UPPER];
2908
2909
2910
2911
2912 if (!exist->checked)
2913 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2914 } else {
2915 exist = NULL;
2916 }
2917
2918 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2919 struct extent_buffer *eb;
2920 struct btrfs_key key;
2921 int type;
2922
2923 cond_resched();
2924 eb = btrfs_backref_get_eb(iter);
2925
2926 key.objectid = iter->bytenr;
2927 if (btrfs_backref_iter_is_inline_ref(iter)) {
2928 struct btrfs_extent_inline_ref *iref;
2929
2930
2931 iref = (struct btrfs_extent_inline_ref *)
2932 ((unsigned long)iter->cur_ptr);
2933 type = btrfs_get_extent_inline_ref_type(eb, iref,
2934 BTRFS_REF_TYPE_BLOCK);
2935 if (type == BTRFS_REF_TYPE_INVALID) {
2936 ret = -EUCLEAN;
2937 goto out;
2938 }
2939 key.type = type;
2940 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2941 } else {
2942 key.type = iter->cur_key.type;
2943 key.offset = iter->cur_key.offset;
2944 }
2945
2946
2947
2948
2949
2950 if (exist &&
2951 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2952 exist->owner == key.offset) ||
2953 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2954 exist->bytenr == key.offset))) {
2955 exist = NULL;
2956 continue;
2957 }
2958
2959
2960 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2961 ret = handle_direct_tree_backref(cache, &key, cur);
2962 if (ret < 0)
2963 goto out;
2964 continue;
2965 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2966 ret = -EINVAL;
2967 btrfs_print_v0_err(fs_info);
2968 btrfs_handle_fs_error(fs_info, ret, NULL);
2969 goto out;
2970 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2971 continue;
2972 }
2973
2974
2975
2976
2977
2978
2979 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2980 cur);
2981 if (ret < 0)
2982 goto out;
2983 }
2984 ret = 0;
2985 cur->checked = 1;
2986 WARN_ON(exist);
2987 out:
2988 btrfs_backref_iter_release(iter);
2989 return ret;
2990 }
2991
2992
2993
2994
2995 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2996 struct btrfs_backref_node *start)
2997 {
2998 struct list_head *useless_node = &cache->useless_node;
2999 struct btrfs_backref_edge *edge;
3000 struct rb_node *rb_node;
3001 LIST_HEAD(pending_edge);
3002
3003 ASSERT(start->checked);
3004
3005
3006 if (!start->cowonly) {
3007 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3008 &start->rb_node);
3009 if (rb_node)
3010 btrfs_backref_panic(cache->fs_info, start->bytenr,
3011 -EEXIST);
3012 list_add_tail(&start->lower, &cache->leaves);
3013 }
3014
3015
3016
3017
3018
3019
3020 list_for_each_entry(edge, &start->upper, list[LOWER])
3021 list_add_tail(&edge->list[UPPER], &pending_edge);
3022
3023 while (!list_empty(&pending_edge)) {
3024 struct btrfs_backref_node *upper;
3025 struct btrfs_backref_node *lower;
3026
3027 edge = list_first_entry(&pending_edge,
3028 struct btrfs_backref_edge, list[UPPER]);
3029 list_del_init(&edge->list[UPPER]);
3030 upper = edge->node[UPPER];
3031 lower = edge->node[LOWER];
3032
3033
3034 if (upper->detached) {
3035 list_del(&edge->list[LOWER]);
3036 btrfs_backref_free_edge(cache, edge);
3037
3038
3039 if (list_empty(&lower->upper))
3040 list_add(&lower->list, useless_node);
3041 continue;
3042 }
3043
3044
3045
3046
3047
3048
3049
3050
3051 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3052 if (upper->lowest) {
3053 list_del_init(&upper->lower);
3054 upper->lowest = 0;
3055 }
3056
3057 list_add_tail(&edge->list[UPPER], &upper->lower);
3058 continue;
3059 }
3060
3061
3062 if (!upper->checked) {
3063 ASSERT(0);
3064 return -EUCLEAN;
3065 }
3066
3067
3068 if (start->cowonly != upper->cowonly) {
3069 ASSERT(0);
3070 return -EUCLEAN;
3071 }
3072
3073
3074 if (!upper->cowonly) {
3075 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3076 &upper->rb_node);
3077 if (rb_node) {
3078 btrfs_backref_panic(cache->fs_info,
3079 upper->bytenr, -EEXIST);
3080 return -EUCLEAN;
3081 }
3082 }
3083
3084 list_add_tail(&edge->list[UPPER], &upper->lower);
3085
3086
3087
3088
3089
3090 list_for_each_entry(edge, &upper->upper, list[LOWER])
3091 list_add_tail(&edge->list[UPPER], &pending_edge);
3092 }
3093 return 0;
3094 }
3095
3096 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3097 struct btrfs_backref_node *node)
3098 {
3099 struct btrfs_backref_node *lower;
3100 struct btrfs_backref_node *upper;
3101 struct btrfs_backref_edge *edge;
3102
3103 while (!list_empty(&cache->useless_node)) {
3104 lower = list_first_entry(&cache->useless_node,
3105 struct btrfs_backref_node, list);
3106 list_del_init(&lower->list);
3107 }
3108 while (!list_empty(&cache->pending_edge)) {
3109 edge = list_first_entry(&cache->pending_edge,
3110 struct btrfs_backref_edge, list[UPPER]);
3111 list_del(&edge->list[UPPER]);
3112 list_del(&edge->list[LOWER]);
3113 lower = edge->node[LOWER];
3114 upper = edge->node[UPPER];
3115 btrfs_backref_free_edge(cache, edge);
3116
3117
3118
3119
3120
3121 if (list_empty(&lower->upper) &&
3122 RB_EMPTY_NODE(&lower->rb_node))
3123 list_add(&lower->list, &cache->useless_node);
3124
3125 if (!RB_EMPTY_NODE(&upper->rb_node))
3126 continue;
3127
3128
3129 list_for_each_entry(edge, &upper->upper, list[LOWER])
3130 list_add_tail(&edge->list[UPPER],
3131 &cache->pending_edge);
3132 if (list_empty(&upper->upper))
3133 list_add(&upper->list, &cache->useless_node);
3134 }
3135
3136 while (!list_empty(&cache->useless_node)) {
3137 lower = list_first_entry(&cache->useless_node,
3138 struct btrfs_backref_node, list);
3139 list_del_init(&lower->list);
3140 if (lower == node)
3141 node = NULL;
3142 btrfs_backref_drop_node(cache, lower);
3143 }
3144
3145 btrfs_backref_cleanup_node(cache, node);
3146 ASSERT(list_empty(&cache->useless_node) &&
3147 list_empty(&cache->pending_edge));
3148 }