Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2011 STRATO.  All rights reserved.
0004  */
0005 
0006 #include <linux/mm.h>
0007 #include <linux/rbtree.h>
0008 #include <trace/events/btrfs.h>
0009 #include "ctree.h"
0010 #include "disk-io.h"
0011 #include "backref.h"
0012 #include "ulist.h"
0013 #include "transaction.h"
0014 #include "delayed-ref.h"
0015 #include "locking.h"
0016 #include "misc.h"
0017 #include "tree-mod-log.h"
0018 
0019 /* Just an arbitrary number so we can be sure this happened */
0020 #define BACKREF_FOUND_SHARED 6
0021 
0022 struct extent_inode_elem {
0023     u64 inum;
0024     u64 offset;
0025     struct extent_inode_elem *next;
0026 };
0027 
0028 static int check_extent_in_eb(const struct btrfs_key *key,
0029                   const struct extent_buffer *eb,
0030                   const struct btrfs_file_extent_item *fi,
0031                   u64 extent_item_pos,
0032                   struct extent_inode_elem **eie,
0033                   bool ignore_offset)
0034 {
0035     u64 offset = 0;
0036     struct extent_inode_elem *e;
0037 
0038     if (!ignore_offset &&
0039         !btrfs_file_extent_compression(eb, fi) &&
0040         !btrfs_file_extent_encryption(eb, fi) &&
0041         !btrfs_file_extent_other_encoding(eb, fi)) {
0042         u64 data_offset;
0043         u64 data_len;
0044 
0045         data_offset = btrfs_file_extent_offset(eb, fi);
0046         data_len = btrfs_file_extent_num_bytes(eb, fi);
0047 
0048         if (extent_item_pos < data_offset ||
0049             extent_item_pos >= data_offset + data_len)
0050             return 1;
0051         offset = extent_item_pos - data_offset;
0052     }
0053 
0054     e = kmalloc(sizeof(*e), GFP_NOFS);
0055     if (!e)
0056         return -ENOMEM;
0057 
0058     e->next = *eie;
0059     e->inum = key->objectid;
0060     e->offset = key->offset + offset;
0061     *eie = e;
0062 
0063     return 0;
0064 }
0065 
0066 static void free_inode_elem_list(struct extent_inode_elem *eie)
0067 {
0068     struct extent_inode_elem *eie_next;
0069 
0070     for (; eie; eie = eie_next) {
0071         eie_next = eie->next;
0072         kfree(eie);
0073     }
0074 }
0075 
0076 static int find_extent_in_eb(const struct extent_buffer *eb,
0077                  u64 wanted_disk_byte, u64 extent_item_pos,
0078                  struct extent_inode_elem **eie,
0079                  bool ignore_offset)
0080 {
0081     u64 disk_byte;
0082     struct btrfs_key key;
0083     struct btrfs_file_extent_item *fi;
0084     int slot;
0085     int nritems;
0086     int extent_type;
0087     int ret;
0088 
0089     /*
0090      * from the shared data ref, we only have the leaf but we need
0091      * the key. thus, we must look into all items and see that we
0092      * find one (some) with a reference to our extent item.
0093      */
0094     nritems = btrfs_header_nritems(eb);
0095     for (slot = 0; slot < nritems; ++slot) {
0096         btrfs_item_key_to_cpu(eb, &key, slot);
0097         if (key.type != BTRFS_EXTENT_DATA_KEY)
0098             continue;
0099         fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
0100         extent_type = btrfs_file_extent_type(eb, fi);
0101         if (extent_type == BTRFS_FILE_EXTENT_INLINE)
0102             continue;
0103         /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
0104         disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
0105         if (disk_byte != wanted_disk_byte)
0106             continue;
0107 
0108         ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
0109         if (ret < 0)
0110             return ret;
0111     }
0112 
0113     return 0;
0114 }
0115 
0116 struct preftree {
0117     struct rb_root_cached root;
0118     unsigned int count;
0119 };
0120 
0121 #define PREFTREE_INIT   { .root = RB_ROOT_CACHED, .count = 0 }
0122 
0123 struct preftrees {
0124     struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
0125     struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
0126     struct preftree indirect_missing_keys;
0127 };
0128 
0129 /*
0130  * Checks for a shared extent during backref search.
0131  *
0132  * The share_count tracks prelim_refs (direct and indirect) having a
0133  * ref->count >0:
0134  *  - incremented when a ref->count transitions to >0
0135  *  - decremented when a ref->count transitions to <1
0136  */
0137 struct share_check {
0138     u64 root_objectid;
0139     u64 inum;
0140     int share_count;
0141 };
0142 
0143 static inline int extent_is_shared(struct share_check *sc)
0144 {
0145     return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
0146 }
0147 
0148 static struct kmem_cache *btrfs_prelim_ref_cache;
0149 
0150 int __init btrfs_prelim_ref_init(void)
0151 {
0152     btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
0153                     sizeof(struct prelim_ref),
0154                     0,
0155                     SLAB_MEM_SPREAD,
0156                     NULL);
0157     if (!btrfs_prelim_ref_cache)
0158         return -ENOMEM;
0159     return 0;
0160 }
0161 
0162 void __cold btrfs_prelim_ref_exit(void)
0163 {
0164     kmem_cache_destroy(btrfs_prelim_ref_cache);
0165 }
0166 
0167 static void free_pref(struct prelim_ref *ref)
0168 {
0169     kmem_cache_free(btrfs_prelim_ref_cache, ref);
0170 }
0171 
0172 /*
0173  * Return 0 when both refs are for the same block (and can be merged).
0174  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
0175  * indicates a 'higher' block.
0176  */
0177 static int prelim_ref_compare(struct prelim_ref *ref1,
0178                   struct prelim_ref *ref2)
0179 {
0180     if (ref1->level < ref2->level)
0181         return -1;
0182     if (ref1->level > ref2->level)
0183         return 1;
0184     if (ref1->root_id < ref2->root_id)
0185         return -1;
0186     if (ref1->root_id > ref2->root_id)
0187         return 1;
0188     if (ref1->key_for_search.type < ref2->key_for_search.type)
0189         return -1;
0190     if (ref1->key_for_search.type > ref2->key_for_search.type)
0191         return 1;
0192     if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
0193         return -1;
0194     if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
0195         return 1;
0196     if (ref1->key_for_search.offset < ref2->key_for_search.offset)
0197         return -1;
0198     if (ref1->key_for_search.offset > ref2->key_for_search.offset)
0199         return 1;
0200     if (ref1->parent < ref2->parent)
0201         return -1;
0202     if (ref1->parent > ref2->parent)
0203         return 1;
0204 
0205     return 0;
0206 }
0207 
0208 static void update_share_count(struct share_check *sc, int oldcount,
0209                    int newcount)
0210 {
0211     if ((!sc) || (oldcount == 0 && newcount < 1))
0212         return;
0213 
0214     if (oldcount > 0 && newcount < 1)
0215         sc->share_count--;
0216     else if (oldcount < 1 && newcount > 0)
0217         sc->share_count++;
0218 }
0219 
0220 /*
0221  * Add @newref to the @root rbtree, merging identical refs.
0222  *
0223  * Callers should assume that newref has been freed after calling.
0224  */
0225 static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
0226                   struct preftree *preftree,
0227                   struct prelim_ref *newref,
0228                   struct share_check *sc)
0229 {
0230     struct rb_root_cached *root;
0231     struct rb_node **p;
0232     struct rb_node *parent = NULL;
0233     struct prelim_ref *ref;
0234     int result;
0235     bool leftmost = true;
0236 
0237     root = &preftree->root;
0238     p = &root->rb_root.rb_node;
0239 
0240     while (*p) {
0241         parent = *p;
0242         ref = rb_entry(parent, struct prelim_ref, rbnode);
0243         result = prelim_ref_compare(ref, newref);
0244         if (result < 0) {
0245             p = &(*p)->rb_left;
0246         } else if (result > 0) {
0247             p = &(*p)->rb_right;
0248             leftmost = false;
0249         } else {
0250             /* Identical refs, merge them and free @newref */
0251             struct extent_inode_elem *eie = ref->inode_list;
0252 
0253             while (eie && eie->next)
0254                 eie = eie->next;
0255 
0256             if (!eie)
0257                 ref->inode_list = newref->inode_list;
0258             else
0259                 eie->next = newref->inode_list;
0260             trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
0261                              preftree->count);
0262             /*
0263              * A delayed ref can have newref->count < 0.
0264              * The ref->count is updated to follow any
0265              * BTRFS_[ADD|DROP]_DELAYED_REF actions.
0266              */
0267             update_share_count(sc, ref->count,
0268                        ref->count + newref->count);
0269             ref->count += newref->count;
0270             free_pref(newref);
0271             return;
0272         }
0273     }
0274 
0275     update_share_count(sc, 0, newref->count);
0276     preftree->count++;
0277     trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
0278     rb_link_node(&newref->rbnode, parent, p);
0279     rb_insert_color_cached(&newref->rbnode, root, leftmost);
0280 }
0281 
0282 /*
0283  * Release the entire tree.  We don't care about internal consistency so
0284  * just free everything and then reset the tree root.
0285  */
0286 static void prelim_release(struct preftree *preftree)
0287 {
0288     struct prelim_ref *ref, *next_ref;
0289 
0290     rbtree_postorder_for_each_entry_safe(ref, next_ref,
0291                          &preftree->root.rb_root, rbnode)
0292         free_pref(ref);
0293 
0294     preftree->root = RB_ROOT_CACHED;
0295     preftree->count = 0;
0296 }
0297 
0298 /*
0299  * the rules for all callers of this function are:
0300  * - obtaining the parent is the goal
0301  * - if you add a key, you must know that it is a correct key
0302  * - if you cannot add the parent or a correct key, then we will look into the
0303  *   block later to set a correct key
0304  *
0305  * delayed refs
0306  * ============
0307  *        backref type | shared | indirect | shared | indirect
0308  * information         |   tree |     tree |   data |     data
0309  * --------------------+--------+----------+--------+----------
0310  *      parent logical |    y   |     -    |    -   |     -
0311  *      key to resolve |    -   |     y    |    y   |     y
0312  *  tree block logical |    -   |     -    |    -   |     -
0313  *  root for resolving |    y   |     y    |    y   |     y
0314  *
0315  * - column 1:       we've the parent -> done
0316  * - column 2, 3, 4: we use the key to find the parent
0317  *
0318  * on disk refs (inline or keyed)
0319  * ==============================
0320  *        backref type | shared | indirect | shared | indirect
0321  * information         |   tree |     tree |   data |     data
0322  * --------------------+--------+----------+--------+----------
0323  *      parent logical |    y   |     -    |    y   |     -
0324  *      key to resolve |    -   |     -    |    -   |     y
0325  *  tree block logical |    y   |     y    |    y   |     y
0326  *  root for resolving |    -   |     y    |    y   |     y
0327  *
0328  * - column 1, 3: we've the parent -> done
0329  * - column 2:    we take the first key from the block to find the parent
0330  *                (see add_missing_keys)
0331  * - column 4:    we use the key to find the parent
0332  *
0333  * additional information that's available but not required to find the parent
0334  * block might help in merging entries to gain some speed.
0335  */
0336 static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
0337               struct preftree *preftree, u64 root_id,
0338               const struct btrfs_key *key, int level, u64 parent,
0339               u64 wanted_disk_byte, int count,
0340               struct share_check *sc, gfp_t gfp_mask)
0341 {
0342     struct prelim_ref *ref;
0343 
0344     if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
0345         return 0;
0346 
0347     ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
0348     if (!ref)
0349         return -ENOMEM;
0350 
0351     ref->root_id = root_id;
0352     if (key)
0353         ref->key_for_search = *key;
0354     else
0355         memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
0356 
0357     ref->inode_list = NULL;
0358     ref->level = level;
0359     ref->count = count;
0360     ref->parent = parent;
0361     ref->wanted_disk_byte = wanted_disk_byte;
0362     prelim_ref_insert(fs_info, preftree, ref, sc);
0363     return extent_is_shared(sc);
0364 }
0365 
0366 /* direct refs use root == 0, key == NULL */
0367 static int add_direct_ref(const struct btrfs_fs_info *fs_info,
0368               struct preftrees *preftrees, int level, u64 parent,
0369               u64 wanted_disk_byte, int count,
0370               struct share_check *sc, gfp_t gfp_mask)
0371 {
0372     return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
0373                   parent, wanted_disk_byte, count, sc, gfp_mask);
0374 }
0375 
0376 /* indirect refs use parent == 0 */
0377 static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
0378                 struct preftrees *preftrees, u64 root_id,
0379                 const struct btrfs_key *key, int level,
0380                 u64 wanted_disk_byte, int count,
0381                 struct share_check *sc, gfp_t gfp_mask)
0382 {
0383     struct preftree *tree = &preftrees->indirect;
0384 
0385     if (!key)
0386         tree = &preftrees->indirect_missing_keys;
0387     return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
0388                   wanted_disk_byte, count, sc, gfp_mask);
0389 }
0390 
0391 static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
0392 {
0393     struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
0394     struct rb_node *parent = NULL;
0395     struct prelim_ref *ref = NULL;
0396     struct prelim_ref target = {};
0397     int result;
0398 
0399     target.parent = bytenr;
0400 
0401     while (*p) {
0402         parent = *p;
0403         ref = rb_entry(parent, struct prelim_ref, rbnode);
0404         result = prelim_ref_compare(ref, &target);
0405 
0406         if (result < 0)
0407             p = &(*p)->rb_left;
0408         else if (result > 0)
0409             p = &(*p)->rb_right;
0410         else
0411             return 1;
0412     }
0413     return 0;
0414 }
0415 
0416 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
0417                struct ulist *parents,
0418                struct preftrees *preftrees, struct prelim_ref *ref,
0419                int level, u64 time_seq, const u64 *extent_item_pos,
0420                bool ignore_offset)
0421 {
0422     int ret = 0;
0423     int slot;
0424     struct extent_buffer *eb;
0425     struct btrfs_key key;
0426     struct btrfs_key *key_for_search = &ref->key_for_search;
0427     struct btrfs_file_extent_item *fi;
0428     struct extent_inode_elem *eie = NULL, *old = NULL;
0429     u64 disk_byte;
0430     u64 wanted_disk_byte = ref->wanted_disk_byte;
0431     u64 count = 0;
0432     u64 data_offset;
0433 
0434     if (level != 0) {
0435         eb = path->nodes[level];
0436         ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
0437         if (ret < 0)
0438             return ret;
0439         return 0;
0440     }
0441 
0442     /*
0443      * 1. We normally enter this function with the path already pointing to
0444      *    the first item to check. But sometimes, we may enter it with
0445      *    slot == nritems.
0446      * 2. We are searching for normal backref but bytenr of this leaf
0447      *    matches shared data backref
0448      * 3. The leaf owner is not equal to the root we are searching
0449      *
0450      * For these cases, go to the next leaf before we continue.
0451      */
0452     eb = path->nodes[0];
0453     if (path->slots[0] >= btrfs_header_nritems(eb) ||
0454         is_shared_data_backref(preftrees, eb->start) ||
0455         ref->root_id != btrfs_header_owner(eb)) {
0456         if (time_seq == BTRFS_SEQ_LAST)
0457             ret = btrfs_next_leaf(root, path);
0458         else
0459             ret = btrfs_next_old_leaf(root, path, time_seq);
0460     }
0461 
0462     while (!ret && count < ref->count) {
0463         eb = path->nodes[0];
0464         slot = path->slots[0];
0465 
0466         btrfs_item_key_to_cpu(eb, &key, slot);
0467 
0468         if (key.objectid != key_for_search->objectid ||
0469             key.type != BTRFS_EXTENT_DATA_KEY)
0470             break;
0471 
0472         /*
0473          * We are searching for normal backref but bytenr of this leaf
0474          * matches shared data backref, OR
0475          * the leaf owner is not equal to the root we are searching for
0476          */
0477         if (slot == 0 &&
0478             (is_shared_data_backref(preftrees, eb->start) ||
0479              ref->root_id != btrfs_header_owner(eb))) {
0480             if (time_seq == BTRFS_SEQ_LAST)
0481                 ret = btrfs_next_leaf(root, path);
0482             else
0483                 ret = btrfs_next_old_leaf(root, path, time_seq);
0484             continue;
0485         }
0486         fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
0487         disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
0488         data_offset = btrfs_file_extent_offset(eb, fi);
0489 
0490         if (disk_byte == wanted_disk_byte) {
0491             eie = NULL;
0492             old = NULL;
0493             if (ref->key_for_search.offset == key.offset - data_offset)
0494                 count++;
0495             else
0496                 goto next;
0497             if (extent_item_pos) {
0498                 ret = check_extent_in_eb(&key, eb, fi,
0499                         *extent_item_pos,
0500                         &eie, ignore_offset);
0501                 if (ret < 0)
0502                     break;
0503             }
0504             if (ret > 0)
0505                 goto next;
0506             ret = ulist_add_merge_ptr(parents, eb->start,
0507                           eie, (void **)&old, GFP_NOFS);
0508             if (ret < 0)
0509                 break;
0510             if (!ret && extent_item_pos) {
0511                 while (old->next)
0512                     old = old->next;
0513                 old->next = eie;
0514             }
0515             eie = NULL;
0516         }
0517 next:
0518         if (time_seq == BTRFS_SEQ_LAST)
0519             ret = btrfs_next_item(root, path);
0520         else
0521             ret = btrfs_next_old_item(root, path, time_seq);
0522     }
0523 
0524     if (ret > 0)
0525         ret = 0;
0526     else if (ret < 0)
0527         free_inode_elem_list(eie);
0528     return ret;
0529 }
0530 
0531 /*
0532  * resolve an indirect backref in the form (root_id, key, level)
0533  * to a logical address
0534  */
0535 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
0536                 struct btrfs_path *path, u64 time_seq,
0537                 struct preftrees *preftrees,
0538                 struct prelim_ref *ref, struct ulist *parents,
0539                 const u64 *extent_item_pos, bool ignore_offset)
0540 {
0541     struct btrfs_root *root;
0542     struct extent_buffer *eb;
0543     int ret = 0;
0544     int root_level;
0545     int level = ref->level;
0546     struct btrfs_key search_key = ref->key_for_search;
0547 
0548     /*
0549      * If we're search_commit_root we could possibly be holding locks on
0550      * other tree nodes.  This happens when qgroups does backref walks when
0551      * adding new delayed refs.  To deal with this we need to look in cache
0552      * for the root, and if we don't find it then we need to search the
0553      * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
0554      * here.
0555      */
0556     if (path->search_commit_root)
0557         root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
0558     else
0559         root = btrfs_get_fs_root(fs_info, ref->root_id, false);
0560     if (IS_ERR(root)) {
0561         ret = PTR_ERR(root);
0562         goto out_free;
0563     }
0564 
0565     if (!path->search_commit_root &&
0566         test_bit(BTRFS_ROOT_DELETING, &root->state)) {
0567         ret = -ENOENT;
0568         goto out;
0569     }
0570 
0571     if (btrfs_is_testing(fs_info)) {
0572         ret = -ENOENT;
0573         goto out;
0574     }
0575 
0576     if (path->search_commit_root)
0577         root_level = btrfs_header_level(root->commit_root);
0578     else if (time_seq == BTRFS_SEQ_LAST)
0579         root_level = btrfs_header_level(root->node);
0580     else
0581         root_level = btrfs_old_root_level(root, time_seq);
0582 
0583     if (root_level + 1 == level)
0584         goto out;
0585 
0586     /*
0587      * We can often find data backrefs with an offset that is too large
0588      * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
0589      * subtracting a file's offset with the data offset of its
0590      * corresponding extent data item. This can happen for example in the
0591      * clone ioctl.
0592      *
0593      * So if we detect such case we set the search key's offset to zero to
0594      * make sure we will find the matching file extent item at
0595      * add_all_parents(), otherwise we will miss it because the offset
0596      * taken form the backref is much larger then the offset of the file
0597      * extent item. This can make us scan a very large number of file
0598      * extent items, but at least it will not make us miss any.
0599      *
0600      * This is an ugly workaround for a behaviour that should have never
0601      * existed, but it does and a fix for the clone ioctl would touch a lot
0602      * of places, cause backwards incompatibility and would not fix the
0603      * problem for extents cloned with older kernels.
0604      */
0605     if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
0606         search_key.offset >= LLONG_MAX)
0607         search_key.offset = 0;
0608     path->lowest_level = level;
0609     if (time_seq == BTRFS_SEQ_LAST)
0610         ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
0611     else
0612         ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
0613 
0614     btrfs_debug(fs_info,
0615         "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
0616          ref->root_id, level, ref->count, ret,
0617          ref->key_for_search.objectid, ref->key_for_search.type,
0618          ref->key_for_search.offset);
0619     if (ret < 0)
0620         goto out;
0621 
0622     eb = path->nodes[level];
0623     while (!eb) {
0624         if (WARN_ON(!level)) {
0625             ret = 1;
0626             goto out;
0627         }
0628         level--;
0629         eb = path->nodes[level];
0630     }
0631 
0632     ret = add_all_parents(root, path, parents, preftrees, ref, level,
0633                   time_seq, extent_item_pos, ignore_offset);
0634 out:
0635     btrfs_put_root(root);
0636 out_free:
0637     path->lowest_level = 0;
0638     btrfs_release_path(path);
0639     return ret;
0640 }
0641 
0642 static struct extent_inode_elem *
0643 unode_aux_to_inode_list(struct ulist_node *node)
0644 {
0645     if (!node)
0646         return NULL;
0647     return (struct extent_inode_elem *)(uintptr_t)node->aux;
0648 }
0649 
0650 /*
0651  * We maintain three separate rbtrees: one for direct refs, one for
0652  * indirect refs which have a key, and one for indirect refs which do not
0653  * have a key. Each tree does merge on insertion.
0654  *
0655  * Once all of the references are located, we iterate over the tree of
0656  * indirect refs with missing keys. An appropriate key is located and
0657  * the ref is moved onto the tree for indirect refs. After all missing
0658  * keys are thus located, we iterate over the indirect ref tree, resolve
0659  * each reference, and then insert the resolved reference onto the
0660  * direct tree (merging there too).
0661  *
0662  * New backrefs (i.e., for parent nodes) are added to the appropriate
0663  * rbtree as they are encountered. The new backrefs are subsequently
0664  * resolved as above.
0665  */
0666 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
0667                  struct btrfs_path *path, u64 time_seq,
0668                  struct preftrees *preftrees,
0669                  const u64 *extent_item_pos,
0670                  struct share_check *sc, bool ignore_offset)
0671 {
0672     int err;
0673     int ret = 0;
0674     struct ulist *parents;
0675     struct ulist_node *node;
0676     struct ulist_iterator uiter;
0677     struct rb_node *rnode;
0678 
0679     parents = ulist_alloc(GFP_NOFS);
0680     if (!parents)
0681         return -ENOMEM;
0682 
0683     /*
0684      * We could trade memory usage for performance here by iterating
0685      * the tree, allocating new refs for each insertion, and then
0686      * freeing the entire indirect tree when we're done.  In some test
0687      * cases, the tree can grow quite large (~200k objects).
0688      */
0689     while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
0690         struct prelim_ref *ref;
0691 
0692         ref = rb_entry(rnode, struct prelim_ref, rbnode);
0693         if (WARN(ref->parent,
0694              "BUG: direct ref found in indirect tree")) {
0695             ret = -EINVAL;
0696             goto out;
0697         }
0698 
0699         rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
0700         preftrees->indirect.count--;
0701 
0702         if (ref->count == 0) {
0703             free_pref(ref);
0704             continue;
0705         }
0706 
0707         if (sc && sc->root_objectid &&
0708             ref->root_id != sc->root_objectid) {
0709             free_pref(ref);
0710             ret = BACKREF_FOUND_SHARED;
0711             goto out;
0712         }
0713         err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
0714                        ref, parents, extent_item_pos,
0715                        ignore_offset);
0716         /*
0717          * we can only tolerate ENOENT,otherwise,we should catch error
0718          * and return directly.
0719          */
0720         if (err == -ENOENT) {
0721             prelim_ref_insert(fs_info, &preftrees->direct, ref,
0722                       NULL);
0723             continue;
0724         } else if (err) {
0725             free_pref(ref);
0726             ret = err;
0727             goto out;
0728         }
0729 
0730         /* we put the first parent into the ref at hand */
0731         ULIST_ITER_INIT(&uiter);
0732         node = ulist_next(parents, &uiter);
0733         ref->parent = node ? node->val : 0;
0734         ref->inode_list = unode_aux_to_inode_list(node);
0735 
0736         /* Add a prelim_ref(s) for any other parent(s). */
0737         while ((node = ulist_next(parents, &uiter))) {
0738             struct prelim_ref *new_ref;
0739 
0740             new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
0741                            GFP_NOFS);
0742             if (!new_ref) {
0743                 free_pref(ref);
0744                 ret = -ENOMEM;
0745                 goto out;
0746             }
0747             memcpy(new_ref, ref, sizeof(*ref));
0748             new_ref->parent = node->val;
0749             new_ref->inode_list = unode_aux_to_inode_list(node);
0750             prelim_ref_insert(fs_info, &preftrees->direct,
0751                       new_ref, NULL);
0752         }
0753 
0754         /*
0755          * Now it's a direct ref, put it in the direct tree. We must
0756          * do this last because the ref could be merged/freed here.
0757          */
0758         prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
0759 
0760         ulist_reinit(parents);
0761         cond_resched();
0762     }
0763 out:
0764     ulist_free(parents);
0765     return ret;
0766 }
0767 
0768 /*
0769  * read tree blocks and add keys where required.
0770  */
0771 static int add_missing_keys(struct btrfs_fs_info *fs_info,
0772                 struct preftrees *preftrees, bool lock)
0773 {
0774     struct prelim_ref *ref;
0775     struct extent_buffer *eb;
0776     struct preftree *tree = &preftrees->indirect_missing_keys;
0777     struct rb_node *node;
0778 
0779     while ((node = rb_first_cached(&tree->root))) {
0780         ref = rb_entry(node, struct prelim_ref, rbnode);
0781         rb_erase_cached(node, &tree->root);
0782 
0783         BUG_ON(ref->parent);    /* should not be a direct ref */
0784         BUG_ON(ref->key_for_search.type);
0785         BUG_ON(!ref->wanted_disk_byte);
0786 
0787         eb = read_tree_block(fs_info, ref->wanted_disk_byte,
0788                      ref->root_id, 0, ref->level - 1, NULL);
0789         if (IS_ERR(eb)) {
0790             free_pref(ref);
0791             return PTR_ERR(eb);
0792         }
0793         if (!extent_buffer_uptodate(eb)) {
0794             free_pref(ref);
0795             free_extent_buffer(eb);
0796             return -EIO;
0797         }
0798 
0799         if (lock)
0800             btrfs_tree_read_lock(eb);
0801         if (btrfs_header_level(eb) == 0)
0802             btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
0803         else
0804             btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
0805         if (lock)
0806             btrfs_tree_read_unlock(eb);
0807         free_extent_buffer(eb);
0808         prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
0809         cond_resched();
0810     }
0811     return 0;
0812 }
0813 
0814 /*
0815  * add all currently queued delayed refs from this head whose seq nr is
0816  * smaller or equal that seq to the list
0817  */
0818 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
0819                 struct btrfs_delayed_ref_head *head, u64 seq,
0820                 struct preftrees *preftrees, struct share_check *sc)
0821 {
0822     struct btrfs_delayed_ref_node *node;
0823     struct btrfs_delayed_extent_op *extent_op = head->extent_op;
0824     struct btrfs_key key;
0825     struct btrfs_key tmp_op_key;
0826     struct rb_node *n;
0827     int count;
0828     int ret = 0;
0829 
0830     if (extent_op && extent_op->update_key)
0831         btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
0832 
0833     spin_lock(&head->lock);
0834     for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
0835         node = rb_entry(n, struct btrfs_delayed_ref_node,
0836                 ref_node);
0837         if (node->seq > seq)
0838             continue;
0839 
0840         switch (node->action) {
0841         case BTRFS_ADD_DELAYED_EXTENT:
0842         case BTRFS_UPDATE_DELAYED_HEAD:
0843             WARN_ON(1);
0844             continue;
0845         case BTRFS_ADD_DELAYED_REF:
0846             count = node->ref_mod;
0847             break;
0848         case BTRFS_DROP_DELAYED_REF:
0849             count = node->ref_mod * -1;
0850             break;
0851         default:
0852             BUG();
0853         }
0854         switch (node->type) {
0855         case BTRFS_TREE_BLOCK_REF_KEY: {
0856             /* NORMAL INDIRECT METADATA backref */
0857             struct btrfs_delayed_tree_ref *ref;
0858 
0859             ref = btrfs_delayed_node_to_tree_ref(node);
0860             ret = add_indirect_ref(fs_info, preftrees, ref->root,
0861                            &tmp_op_key, ref->level + 1,
0862                            node->bytenr, count, sc,
0863                            GFP_ATOMIC);
0864             break;
0865         }
0866         case BTRFS_SHARED_BLOCK_REF_KEY: {
0867             /* SHARED DIRECT METADATA backref */
0868             struct btrfs_delayed_tree_ref *ref;
0869 
0870             ref = btrfs_delayed_node_to_tree_ref(node);
0871 
0872             ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
0873                          ref->parent, node->bytenr, count,
0874                          sc, GFP_ATOMIC);
0875             break;
0876         }
0877         case BTRFS_EXTENT_DATA_REF_KEY: {
0878             /* NORMAL INDIRECT DATA backref */
0879             struct btrfs_delayed_data_ref *ref;
0880             ref = btrfs_delayed_node_to_data_ref(node);
0881 
0882             key.objectid = ref->objectid;
0883             key.type = BTRFS_EXTENT_DATA_KEY;
0884             key.offset = ref->offset;
0885 
0886             /*
0887              * Found a inum that doesn't match our known inum, we
0888              * know it's shared.
0889              */
0890             if (sc && sc->inum && ref->objectid != sc->inum) {
0891                 ret = BACKREF_FOUND_SHARED;
0892                 goto out;
0893             }
0894 
0895             ret = add_indirect_ref(fs_info, preftrees, ref->root,
0896                            &key, 0, node->bytenr, count, sc,
0897                            GFP_ATOMIC);
0898             break;
0899         }
0900         case BTRFS_SHARED_DATA_REF_KEY: {
0901             /* SHARED DIRECT FULL backref */
0902             struct btrfs_delayed_data_ref *ref;
0903 
0904             ref = btrfs_delayed_node_to_data_ref(node);
0905 
0906             ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
0907                          node->bytenr, count, sc,
0908                          GFP_ATOMIC);
0909             break;
0910         }
0911         default:
0912             WARN_ON(1);
0913         }
0914         /*
0915          * We must ignore BACKREF_FOUND_SHARED until all delayed
0916          * refs have been checked.
0917          */
0918         if (ret && (ret != BACKREF_FOUND_SHARED))
0919             break;
0920     }
0921     if (!ret)
0922         ret = extent_is_shared(sc);
0923 out:
0924     spin_unlock(&head->lock);
0925     return ret;
0926 }
0927 
0928 /*
0929  * add all inline backrefs for bytenr to the list
0930  *
0931  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
0932  */
0933 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
0934                struct btrfs_path *path, u64 bytenr,
0935                int *info_level, struct preftrees *preftrees,
0936                struct share_check *sc)
0937 {
0938     int ret = 0;
0939     int slot;
0940     struct extent_buffer *leaf;
0941     struct btrfs_key key;
0942     struct btrfs_key found_key;
0943     unsigned long ptr;
0944     unsigned long end;
0945     struct btrfs_extent_item *ei;
0946     u64 flags;
0947     u64 item_size;
0948 
0949     /*
0950      * enumerate all inline refs
0951      */
0952     leaf = path->nodes[0];
0953     slot = path->slots[0];
0954 
0955     item_size = btrfs_item_size(leaf, slot);
0956     BUG_ON(item_size < sizeof(*ei));
0957 
0958     ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
0959     flags = btrfs_extent_flags(leaf, ei);
0960     btrfs_item_key_to_cpu(leaf, &found_key, slot);
0961 
0962     ptr = (unsigned long)(ei + 1);
0963     end = (unsigned long)ei + item_size;
0964 
0965     if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
0966         flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
0967         struct btrfs_tree_block_info *info;
0968 
0969         info = (struct btrfs_tree_block_info *)ptr;
0970         *info_level = btrfs_tree_block_level(leaf, info);
0971         ptr += sizeof(struct btrfs_tree_block_info);
0972         BUG_ON(ptr > end);
0973     } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
0974         *info_level = found_key.offset;
0975     } else {
0976         BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
0977     }
0978 
0979     while (ptr < end) {
0980         struct btrfs_extent_inline_ref *iref;
0981         u64 offset;
0982         int type;
0983 
0984         iref = (struct btrfs_extent_inline_ref *)ptr;
0985         type = btrfs_get_extent_inline_ref_type(leaf, iref,
0986                             BTRFS_REF_TYPE_ANY);
0987         if (type == BTRFS_REF_TYPE_INVALID)
0988             return -EUCLEAN;
0989 
0990         offset = btrfs_extent_inline_ref_offset(leaf, iref);
0991 
0992         switch (type) {
0993         case BTRFS_SHARED_BLOCK_REF_KEY:
0994             ret = add_direct_ref(fs_info, preftrees,
0995                          *info_level + 1, offset,
0996                          bytenr, 1, NULL, GFP_NOFS);
0997             break;
0998         case BTRFS_SHARED_DATA_REF_KEY: {
0999             struct btrfs_shared_data_ref *sdref;
1000             int count;
1001 
1002             sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1003             count = btrfs_shared_data_ref_count(leaf, sdref);
1004 
1005             ret = add_direct_ref(fs_info, preftrees, 0, offset,
1006                          bytenr, count, sc, GFP_NOFS);
1007             break;
1008         }
1009         case BTRFS_TREE_BLOCK_REF_KEY:
1010             ret = add_indirect_ref(fs_info, preftrees, offset,
1011                            NULL, *info_level + 1,
1012                            bytenr, 1, NULL, GFP_NOFS);
1013             break;
1014         case BTRFS_EXTENT_DATA_REF_KEY: {
1015             struct btrfs_extent_data_ref *dref;
1016             int count;
1017             u64 root;
1018 
1019             dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1020             count = btrfs_extent_data_ref_count(leaf, dref);
1021             key.objectid = btrfs_extent_data_ref_objectid(leaf,
1022                                       dref);
1023             key.type = BTRFS_EXTENT_DATA_KEY;
1024             key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1025 
1026             if (sc && sc->inum && key.objectid != sc->inum) {
1027                 ret = BACKREF_FOUND_SHARED;
1028                 break;
1029             }
1030 
1031             root = btrfs_extent_data_ref_root(leaf, dref);
1032 
1033             ret = add_indirect_ref(fs_info, preftrees, root,
1034                            &key, 0, bytenr, count,
1035                            sc, GFP_NOFS);
1036             break;
1037         }
1038         default:
1039             WARN_ON(1);
1040         }
1041         if (ret)
1042             return ret;
1043         ptr += btrfs_extent_inline_ref_size(type);
1044     }
1045 
1046     return 0;
1047 }
1048 
1049 /*
1050  * add all non-inline backrefs for bytenr to the list
1051  *
1052  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1053  */
1054 static int add_keyed_refs(struct btrfs_root *extent_root,
1055               struct btrfs_path *path, u64 bytenr,
1056               int info_level, struct preftrees *preftrees,
1057               struct share_check *sc)
1058 {
1059     struct btrfs_fs_info *fs_info = extent_root->fs_info;
1060     int ret;
1061     int slot;
1062     struct extent_buffer *leaf;
1063     struct btrfs_key key;
1064 
1065     while (1) {
1066         ret = btrfs_next_item(extent_root, path);
1067         if (ret < 0)
1068             break;
1069         if (ret) {
1070             ret = 0;
1071             break;
1072         }
1073 
1074         slot = path->slots[0];
1075         leaf = path->nodes[0];
1076         btrfs_item_key_to_cpu(leaf, &key, slot);
1077 
1078         if (key.objectid != bytenr)
1079             break;
1080         if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1081             continue;
1082         if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1083             break;
1084 
1085         switch (key.type) {
1086         case BTRFS_SHARED_BLOCK_REF_KEY:
1087             /* SHARED DIRECT METADATA backref */
1088             ret = add_direct_ref(fs_info, preftrees,
1089                          info_level + 1, key.offset,
1090                          bytenr, 1, NULL, GFP_NOFS);
1091             break;
1092         case BTRFS_SHARED_DATA_REF_KEY: {
1093             /* SHARED DIRECT FULL backref */
1094             struct btrfs_shared_data_ref *sdref;
1095             int count;
1096 
1097             sdref = btrfs_item_ptr(leaf, slot,
1098                           struct btrfs_shared_data_ref);
1099             count = btrfs_shared_data_ref_count(leaf, sdref);
1100             ret = add_direct_ref(fs_info, preftrees, 0,
1101                          key.offset, bytenr, count,
1102                          sc, GFP_NOFS);
1103             break;
1104         }
1105         case BTRFS_TREE_BLOCK_REF_KEY:
1106             /* NORMAL INDIRECT METADATA backref */
1107             ret = add_indirect_ref(fs_info, preftrees, key.offset,
1108                            NULL, info_level + 1, bytenr,
1109                            1, NULL, GFP_NOFS);
1110             break;
1111         case BTRFS_EXTENT_DATA_REF_KEY: {
1112             /* NORMAL INDIRECT DATA backref */
1113             struct btrfs_extent_data_ref *dref;
1114             int count;
1115             u64 root;
1116 
1117             dref = btrfs_item_ptr(leaf, slot,
1118                           struct btrfs_extent_data_ref);
1119             count = btrfs_extent_data_ref_count(leaf, dref);
1120             key.objectid = btrfs_extent_data_ref_objectid(leaf,
1121                                       dref);
1122             key.type = BTRFS_EXTENT_DATA_KEY;
1123             key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1124 
1125             if (sc && sc->inum && key.objectid != sc->inum) {
1126                 ret = BACKREF_FOUND_SHARED;
1127                 break;
1128             }
1129 
1130             root = btrfs_extent_data_ref_root(leaf, dref);
1131             ret = add_indirect_ref(fs_info, preftrees, root,
1132                            &key, 0, bytenr, count,
1133                            sc, GFP_NOFS);
1134             break;
1135         }
1136         default:
1137             WARN_ON(1);
1138         }
1139         if (ret)
1140             return ret;
1141 
1142     }
1143 
1144     return ret;
1145 }
1146 
1147 /*
1148  * this adds all existing backrefs (inline backrefs, backrefs and delayed
1149  * refs) for the given bytenr to the refs list, merges duplicates and resolves
1150  * indirect refs to their parent bytenr.
1151  * When roots are found, they're added to the roots list
1152  *
1153  * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1154  * behave much like trans == NULL case, the difference only lies in it will not
1155  * commit root.
1156  * The special case is for qgroup to search roots in commit_transaction().
1157  *
1158  * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1159  * shared extent is detected.
1160  *
1161  * Otherwise this returns 0 for success and <0 for an error.
1162  *
1163  * If ignore_offset is set to false, only extent refs whose offsets match
1164  * extent_item_pos are returned.  If true, every extent ref is returned
1165  * and extent_item_pos is ignored.
1166  *
1167  * FIXME some caching might speed things up
1168  */
1169 static int find_parent_nodes(struct btrfs_trans_handle *trans,
1170                  struct btrfs_fs_info *fs_info, u64 bytenr,
1171                  u64 time_seq, struct ulist *refs,
1172                  struct ulist *roots, const u64 *extent_item_pos,
1173                  struct share_check *sc, bool ignore_offset)
1174 {
1175     struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr);
1176     struct btrfs_key key;
1177     struct btrfs_path *path;
1178     struct btrfs_delayed_ref_root *delayed_refs = NULL;
1179     struct btrfs_delayed_ref_head *head;
1180     int info_level = 0;
1181     int ret;
1182     struct prelim_ref *ref;
1183     struct rb_node *node;
1184     struct extent_inode_elem *eie = NULL;
1185     struct preftrees preftrees = {
1186         .direct = PREFTREE_INIT,
1187         .indirect = PREFTREE_INIT,
1188         .indirect_missing_keys = PREFTREE_INIT
1189     };
1190 
1191     key.objectid = bytenr;
1192     key.offset = (u64)-1;
1193     if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1194         key.type = BTRFS_METADATA_ITEM_KEY;
1195     else
1196         key.type = BTRFS_EXTENT_ITEM_KEY;
1197 
1198     path = btrfs_alloc_path();
1199     if (!path)
1200         return -ENOMEM;
1201     if (!trans) {
1202         path->search_commit_root = 1;
1203         path->skip_locking = 1;
1204     }
1205 
1206     if (time_seq == BTRFS_SEQ_LAST)
1207         path->skip_locking = 1;
1208 
1209 again:
1210     head = NULL;
1211 
1212     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1213     if (ret < 0)
1214         goto out;
1215     if (ret == 0) {
1216         /* This shouldn't happen, indicates a bug or fs corruption. */
1217         ASSERT(ret != 0);
1218         ret = -EUCLEAN;
1219         goto out;
1220     }
1221 
1222     if (trans && likely(trans->type != __TRANS_DUMMY) &&
1223         time_seq != BTRFS_SEQ_LAST) {
1224         /*
1225          * We have a specific time_seq we care about and trans which
1226          * means we have the path lock, we need to grab the ref head and
1227          * lock it so we have a consistent view of the refs at the given
1228          * time.
1229          */
1230         delayed_refs = &trans->transaction->delayed_refs;
1231         spin_lock(&delayed_refs->lock);
1232         head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1233         if (head) {
1234             if (!mutex_trylock(&head->mutex)) {
1235                 refcount_inc(&head->refs);
1236                 spin_unlock(&delayed_refs->lock);
1237 
1238                 btrfs_release_path(path);
1239 
1240                 /*
1241                  * Mutex was contended, block until it's
1242                  * released and try again
1243                  */
1244                 mutex_lock(&head->mutex);
1245                 mutex_unlock(&head->mutex);
1246                 btrfs_put_delayed_ref_head(head);
1247                 goto again;
1248             }
1249             spin_unlock(&delayed_refs->lock);
1250             ret = add_delayed_refs(fs_info, head, time_seq,
1251                            &preftrees, sc);
1252             mutex_unlock(&head->mutex);
1253             if (ret)
1254                 goto out;
1255         } else {
1256             spin_unlock(&delayed_refs->lock);
1257         }
1258     }
1259 
1260     if (path->slots[0]) {
1261         struct extent_buffer *leaf;
1262         int slot;
1263 
1264         path->slots[0]--;
1265         leaf = path->nodes[0];
1266         slot = path->slots[0];
1267         btrfs_item_key_to_cpu(leaf, &key, slot);
1268         if (key.objectid == bytenr &&
1269             (key.type == BTRFS_EXTENT_ITEM_KEY ||
1270              key.type == BTRFS_METADATA_ITEM_KEY)) {
1271             ret = add_inline_refs(fs_info, path, bytenr,
1272                           &info_level, &preftrees, sc);
1273             if (ret)
1274                 goto out;
1275             ret = add_keyed_refs(root, path, bytenr, info_level,
1276                          &preftrees, sc);
1277             if (ret)
1278                 goto out;
1279         }
1280     }
1281 
1282     btrfs_release_path(path);
1283 
1284     ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1285     if (ret)
1286         goto out;
1287 
1288     WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1289 
1290     ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1291                     extent_item_pos, sc, ignore_offset);
1292     if (ret)
1293         goto out;
1294 
1295     WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1296 
1297     /*
1298      * This walks the tree of merged and resolved refs. Tree blocks are
1299      * read in as needed. Unique entries are added to the ulist, and
1300      * the list of found roots is updated.
1301      *
1302      * We release the entire tree in one go before returning.
1303      */
1304     node = rb_first_cached(&preftrees.direct.root);
1305     while (node) {
1306         ref = rb_entry(node, struct prelim_ref, rbnode);
1307         node = rb_next(&ref->rbnode);
1308         /*
1309          * ref->count < 0 can happen here if there are delayed
1310          * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1311          * prelim_ref_insert() relies on this when merging
1312          * identical refs to keep the overall count correct.
1313          * prelim_ref_insert() will merge only those refs
1314          * which compare identically.  Any refs having
1315          * e.g. different offsets would not be merged,
1316          * and would retain their original ref->count < 0.
1317          */
1318         if (roots && ref->count && ref->root_id && ref->parent == 0) {
1319             if (sc && sc->root_objectid &&
1320                 ref->root_id != sc->root_objectid) {
1321                 ret = BACKREF_FOUND_SHARED;
1322                 goto out;
1323             }
1324 
1325             /* no parent == root of tree */
1326             ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1327             if (ret < 0)
1328                 goto out;
1329         }
1330         if (ref->count && ref->parent) {
1331             if (extent_item_pos && !ref->inode_list &&
1332                 ref->level == 0) {
1333                 struct extent_buffer *eb;
1334 
1335                 eb = read_tree_block(fs_info, ref->parent, 0,
1336                              0, ref->level, NULL);
1337                 if (IS_ERR(eb)) {
1338                     ret = PTR_ERR(eb);
1339                     goto out;
1340                 }
1341                 if (!extent_buffer_uptodate(eb)) {
1342                     free_extent_buffer(eb);
1343                     ret = -EIO;
1344                     goto out;
1345                 }
1346 
1347                 if (!path->skip_locking)
1348                     btrfs_tree_read_lock(eb);
1349                 ret = find_extent_in_eb(eb, bytenr,
1350                             *extent_item_pos, &eie, ignore_offset);
1351                 if (!path->skip_locking)
1352                     btrfs_tree_read_unlock(eb);
1353                 free_extent_buffer(eb);
1354                 if (ret < 0)
1355                     goto out;
1356                 ref->inode_list = eie;
1357             }
1358             ret = ulist_add_merge_ptr(refs, ref->parent,
1359                           ref->inode_list,
1360                           (void **)&eie, GFP_NOFS);
1361             if (ret < 0)
1362                 goto out;
1363             if (!ret && extent_item_pos) {
1364                 /*
1365                  * We've recorded that parent, so we must extend
1366                  * its inode list here.
1367                  *
1368                  * However if there was corruption we may not
1369                  * have found an eie, return an error in this
1370                  * case.
1371                  */
1372                 ASSERT(eie);
1373                 if (!eie) {
1374                     ret = -EUCLEAN;
1375                     goto out;
1376                 }
1377                 while (eie->next)
1378                     eie = eie->next;
1379                 eie->next = ref->inode_list;
1380             }
1381             eie = NULL;
1382         }
1383         cond_resched();
1384     }
1385 
1386 out:
1387     btrfs_free_path(path);
1388 
1389     prelim_release(&preftrees.direct);
1390     prelim_release(&preftrees.indirect);
1391     prelim_release(&preftrees.indirect_missing_keys);
1392 
1393     if (ret < 0)
1394         free_inode_elem_list(eie);
1395     return ret;
1396 }
1397 
1398 static void free_leaf_list(struct ulist *blocks)
1399 {
1400     struct ulist_node *node = NULL;
1401     struct extent_inode_elem *eie;
1402     struct ulist_iterator uiter;
1403 
1404     ULIST_ITER_INIT(&uiter);
1405     while ((node = ulist_next(blocks, &uiter))) {
1406         if (!node->aux)
1407             continue;
1408         eie = unode_aux_to_inode_list(node);
1409         free_inode_elem_list(eie);
1410         node->aux = 0;
1411     }
1412 
1413     ulist_free(blocks);
1414 }
1415 
1416 /*
1417  * Finds all leafs with a reference to the specified combination of bytenr and
1418  * offset. key_list_head will point to a list of corresponding keys (caller must
1419  * free each list element). The leafs will be stored in the leafs ulist, which
1420  * must be freed with ulist_free.
1421  *
1422  * returns 0 on success, <0 on error
1423  */
1424 int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1425              struct btrfs_fs_info *fs_info, u64 bytenr,
1426              u64 time_seq, struct ulist **leafs,
1427              const u64 *extent_item_pos, bool ignore_offset)
1428 {
1429     int ret;
1430 
1431     *leafs = ulist_alloc(GFP_NOFS);
1432     if (!*leafs)
1433         return -ENOMEM;
1434 
1435     ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1436                 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1437     if (ret < 0 && ret != -ENOENT) {
1438         free_leaf_list(*leafs);
1439         return ret;
1440     }
1441 
1442     return 0;
1443 }
1444 
1445 /*
1446  * walk all backrefs for a given extent to find all roots that reference this
1447  * extent. Walking a backref means finding all extents that reference this
1448  * extent and in turn walk the backrefs of those, too. Naturally this is a
1449  * recursive process, but here it is implemented in an iterative fashion: We
1450  * find all referencing extents for the extent in question and put them on a
1451  * list. In turn, we find all referencing extents for those, further appending
1452  * to the list. The way we iterate the list allows adding more elements after
1453  * the current while iterating. The process stops when we reach the end of the
1454  * list. Found roots are added to the roots list.
1455  *
1456  * returns 0 on success, < 0 on error.
1457  */
1458 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1459                      struct btrfs_fs_info *fs_info, u64 bytenr,
1460                      u64 time_seq, struct ulist **roots,
1461                      bool ignore_offset)
1462 {
1463     struct ulist *tmp;
1464     struct ulist_node *node = NULL;
1465     struct ulist_iterator uiter;
1466     int ret;
1467 
1468     tmp = ulist_alloc(GFP_NOFS);
1469     if (!tmp)
1470         return -ENOMEM;
1471     *roots = ulist_alloc(GFP_NOFS);
1472     if (!*roots) {
1473         ulist_free(tmp);
1474         return -ENOMEM;
1475     }
1476 
1477     ULIST_ITER_INIT(&uiter);
1478     while (1) {
1479         ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1480                     tmp, *roots, NULL, NULL, ignore_offset);
1481         if (ret < 0 && ret != -ENOENT) {
1482             ulist_free(tmp);
1483             ulist_free(*roots);
1484             *roots = NULL;
1485             return ret;
1486         }
1487         node = ulist_next(tmp, &uiter);
1488         if (!node)
1489             break;
1490         bytenr = node->val;
1491         cond_resched();
1492     }
1493 
1494     ulist_free(tmp);
1495     return 0;
1496 }
1497 
1498 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1499              struct btrfs_fs_info *fs_info, u64 bytenr,
1500              u64 time_seq, struct ulist **roots,
1501              bool skip_commit_root_sem)
1502 {
1503     int ret;
1504 
1505     if (!trans && !skip_commit_root_sem)
1506         down_read(&fs_info->commit_root_sem);
1507     ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1508                     time_seq, roots, false);
1509     if (!trans && !skip_commit_root_sem)
1510         up_read(&fs_info->commit_root_sem);
1511     return ret;
1512 }
1513 
1514 /**
1515  * Check if an extent is shared or not
1516  *
1517  * @root:   root inode belongs to
1518  * @inum:   inode number of the inode whose extent we are checking
1519  * @bytenr: logical bytenr of the extent we are checking
1520  * @roots:  list of roots this extent is shared among
1521  * @tmp:    temporary list used for iteration
1522  *
1523  * btrfs_check_shared uses the backref walking code but will short
1524  * circuit as soon as it finds a root or inode that doesn't match the
1525  * one passed in. This provides a significant performance benefit for
1526  * callers (such as fiemap) which want to know whether the extent is
1527  * shared but do not need a ref count.
1528  *
1529  * This attempts to attach to the running transaction in order to account for
1530  * delayed refs, but continues on even when no running transaction exists.
1531  *
1532  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1533  */
1534 int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1535         struct ulist *roots, struct ulist *tmp)
1536 {
1537     struct btrfs_fs_info *fs_info = root->fs_info;
1538     struct btrfs_trans_handle *trans;
1539     struct ulist_iterator uiter;
1540     struct ulist_node *node;
1541     struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1542     int ret = 0;
1543     struct share_check shared = {
1544         .root_objectid = root->root_key.objectid,
1545         .inum = inum,
1546         .share_count = 0,
1547     };
1548 
1549     ulist_init(roots);
1550     ulist_init(tmp);
1551 
1552     trans = btrfs_join_transaction_nostart(root);
1553     if (IS_ERR(trans)) {
1554         if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1555             ret = PTR_ERR(trans);
1556             goto out;
1557         }
1558         trans = NULL;
1559         down_read(&fs_info->commit_root_sem);
1560     } else {
1561         btrfs_get_tree_mod_seq(fs_info, &elem);
1562     }
1563 
1564     ULIST_ITER_INIT(&uiter);
1565     while (1) {
1566         ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1567                     roots, NULL, &shared, false);
1568         if (ret == BACKREF_FOUND_SHARED) {
1569             /* this is the only condition under which we return 1 */
1570             ret = 1;
1571             break;
1572         }
1573         if (ret < 0 && ret != -ENOENT)
1574             break;
1575         ret = 0;
1576         node = ulist_next(tmp, &uiter);
1577         if (!node)
1578             break;
1579         bytenr = node->val;
1580         shared.share_count = 0;
1581         cond_resched();
1582     }
1583 
1584     if (trans) {
1585         btrfs_put_tree_mod_seq(fs_info, &elem);
1586         btrfs_end_transaction(trans);
1587     } else {
1588         up_read(&fs_info->commit_root_sem);
1589     }
1590 out:
1591     ulist_release(roots);
1592     ulist_release(tmp);
1593     return ret;
1594 }
1595 
1596 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1597               u64 start_off, struct btrfs_path *path,
1598               struct btrfs_inode_extref **ret_extref,
1599               u64 *found_off)
1600 {
1601     int ret, slot;
1602     struct btrfs_key key;
1603     struct btrfs_key found_key;
1604     struct btrfs_inode_extref *extref;
1605     const struct extent_buffer *leaf;
1606     unsigned long ptr;
1607 
1608     key.objectid = inode_objectid;
1609     key.type = BTRFS_INODE_EXTREF_KEY;
1610     key.offset = start_off;
1611 
1612     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1613     if (ret < 0)
1614         return ret;
1615 
1616     while (1) {
1617         leaf = path->nodes[0];
1618         slot = path->slots[0];
1619         if (slot >= btrfs_header_nritems(leaf)) {
1620             /*
1621              * If the item at offset is not found,
1622              * btrfs_search_slot will point us to the slot
1623              * where it should be inserted. In our case
1624              * that will be the slot directly before the
1625              * next INODE_REF_KEY_V2 item. In the case
1626              * that we're pointing to the last slot in a
1627              * leaf, we must move one leaf over.
1628              */
1629             ret = btrfs_next_leaf(root, path);
1630             if (ret) {
1631                 if (ret >= 1)
1632                     ret = -ENOENT;
1633                 break;
1634             }
1635             continue;
1636         }
1637 
1638         btrfs_item_key_to_cpu(leaf, &found_key, slot);
1639 
1640         /*
1641          * Check that we're still looking at an extended ref key for
1642          * this particular objectid. If we have different
1643          * objectid or type then there are no more to be found
1644          * in the tree and we can exit.
1645          */
1646         ret = -ENOENT;
1647         if (found_key.objectid != inode_objectid)
1648             break;
1649         if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1650             break;
1651 
1652         ret = 0;
1653         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1654         extref = (struct btrfs_inode_extref *)ptr;
1655         *ret_extref = extref;
1656         if (found_off)
1657             *found_off = found_key.offset;
1658         break;
1659     }
1660 
1661     return ret;
1662 }
1663 
1664 /*
1665  * this iterates to turn a name (from iref/extref) into a full filesystem path.
1666  * Elements of the path are separated by '/' and the path is guaranteed to be
1667  * 0-terminated. the path is only given within the current file system.
1668  * Therefore, it never starts with a '/'. the caller is responsible to provide
1669  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1670  * the start point of the resulting string is returned. this pointer is within
1671  * dest, normally.
1672  * in case the path buffer would overflow, the pointer is decremented further
1673  * as if output was written to the buffer, though no more output is actually
1674  * generated. that way, the caller can determine how much space would be
1675  * required for the path to fit into the buffer. in that case, the returned
1676  * value will be smaller than dest. callers must check this!
1677  */
1678 char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1679             u32 name_len, unsigned long name_off,
1680             struct extent_buffer *eb_in, u64 parent,
1681             char *dest, u32 size)
1682 {
1683     int slot;
1684     u64 next_inum;
1685     int ret;
1686     s64 bytes_left = ((s64)size) - 1;
1687     struct extent_buffer *eb = eb_in;
1688     struct btrfs_key found_key;
1689     struct btrfs_inode_ref *iref;
1690 
1691     if (bytes_left >= 0)
1692         dest[bytes_left] = '\0';
1693 
1694     while (1) {
1695         bytes_left -= name_len;
1696         if (bytes_left >= 0)
1697             read_extent_buffer(eb, dest + bytes_left,
1698                        name_off, name_len);
1699         if (eb != eb_in) {
1700             if (!path->skip_locking)
1701                 btrfs_tree_read_unlock(eb);
1702             free_extent_buffer(eb);
1703         }
1704         ret = btrfs_find_item(fs_root, path, parent, 0,
1705                 BTRFS_INODE_REF_KEY, &found_key);
1706         if (ret > 0)
1707             ret = -ENOENT;
1708         if (ret)
1709             break;
1710 
1711         next_inum = found_key.offset;
1712 
1713         /* regular exit ahead */
1714         if (parent == next_inum)
1715             break;
1716 
1717         slot = path->slots[0];
1718         eb = path->nodes[0];
1719         /* make sure we can use eb after releasing the path */
1720         if (eb != eb_in) {
1721             path->nodes[0] = NULL;
1722             path->locks[0] = 0;
1723         }
1724         btrfs_release_path(path);
1725         iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1726 
1727         name_len = btrfs_inode_ref_name_len(eb, iref);
1728         name_off = (unsigned long)(iref + 1);
1729 
1730         parent = next_inum;
1731         --bytes_left;
1732         if (bytes_left >= 0)
1733             dest[bytes_left] = '/';
1734     }
1735 
1736     btrfs_release_path(path);
1737 
1738     if (ret)
1739         return ERR_PTR(ret);
1740 
1741     return dest + bytes_left;
1742 }
1743 
1744 /*
1745  * this makes the path point to (logical EXTENT_ITEM *)
1746  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1747  * tree blocks and <0 on error.
1748  */
1749 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1750             struct btrfs_path *path, struct btrfs_key *found_key,
1751             u64 *flags_ret)
1752 {
1753     struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
1754     int ret;
1755     u64 flags;
1756     u64 size = 0;
1757     u32 item_size;
1758     const struct extent_buffer *eb;
1759     struct btrfs_extent_item *ei;
1760     struct btrfs_key key;
1761 
1762     if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1763         key.type = BTRFS_METADATA_ITEM_KEY;
1764     else
1765         key.type = BTRFS_EXTENT_ITEM_KEY;
1766     key.objectid = logical;
1767     key.offset = (u64)-1;
1768 
1769     ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1770     if (ret < 0)
1771         return ret;
1772 
1773     ret = btrfs_previous_extent_item(extent_root, path, 0);
1774     if (ret) {
1775         if (ret > 0)
1776             ret = -ENOENT;
1777         return ret;
1778     }
1779     btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1780     if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1781         size = fs_info->nodesize;
1782     else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1783         size = found_key->offset;
1784 
1785     if (found_key->objectid > logical ||
1786         found_key->objectid + size <= logical) {
1787         btrfs_debug(fs_info,
1788             "logical %llu is not within any extent", logical);
1789         return -ENOENT;
1790     }
1791 
1792     eb = path->nodes[0];
1793     item_size = btrfs_item_size(eb, path->slots[0]);
1794     BUG_ON(item_size < sizeof(*ei));
1795 
1796     ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1797     flags = btrfs_extent_flags(eb, ei);
1798 
1799     btrfs_debug(fs_info,
1800         "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1801          logical, logical - found_key->objectid, found_key->objectid,
1802          found_key->offset, flags, item_size);
1803 
1804     WARN_ON(!flags_ret);
1805     if (flags_ret) {
1806         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1807             *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1808         else if (flags & BTRFS_EXTENT_FLAG_DATA)
1809             *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1810         else
1811             BUG();
1812         return 0;
1813     }
1814 
1815     return -EIO;
1816 }
1817 
1818 /*
1819  * helper function to iterate extent inline refs. ptr must point to a 0 value
1820  * for the first call and may be modified. it is used to track state.
1821  * if more refs exist, 0 is returned and the next call to
1822  * get_extent_inline_ref must pass the modified ptr parameter to get the
1823  * next ref. after the last ref was processed, 1 is returned.
1824  * returns <0 on error
1825  */
1826 static int get_extent_inline_ref(unsigned long *ptr,
1827                  const struct extent_buffer *eb,
1828                  const struct btrfs_key *key,
1829                  const struct btrfs_extent_item *ei,
1830                  u32 item_size,
1831                  struct btrfs_extent_inline_ref **out_eiref,
1832                  int *out_type)
1833 {
1834     unsigned long end;
1835     u64 flags;
1836     struct btrfs_tree_block_info *info;
1837 
1838     if (!*ptr) {
1839         /* first call */
1840         flags = btrfs_extent_flags(eb, ei);
1841         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1842             if (key->type == BTRFS_METADATA_ITEM_KEY) {
1843                 /* a skinny metadata extent */
1844                 *out_eiref =
1845                      (struct btrfs_extent_inline_ref *)(ei + 1);
1846             } else {
1847                 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1848                 info = (struct btrfs_tree_block_info *)(ei + 1);
1849                 *out_eiref =
1850                    (struct btrfs_extent_inline_ref *)(info + 1);
1851             }
1852         } else {
1853             *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1854         }
1855         *ptr = (unsigned long)*out_eiref;
1856         if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1857             return -ENOENT;
1858     }
1859 
1860     end = (unsigned long)ei + item_size;
1861     *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1862     *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1863                              BTRFS_REF_TYPE_ANY);
1864     if (*out_type == BTRFS_REF_TYPE_INVALID)
1865         return -EUCLEAN;
1866 
1867     *ptr += btrfs_extent_inline_ref_size(*out_type);
1868     WARN_ON(*ptr > end);
1869     if (*ptr == end)
1870         return 1; /* last */
1871 
1872     return 0;
1873 }
1874 
1875 /*
1876  * reads the tree block backref for an extent. tree level and root are returned
1877  * through out_level and out_root. ptr must point to a 0 value for the first
1878  * call and may be modified (see get_extent_inline_ref comment).
1879  * returns 0 if data was provided, 1 if there was no more data to provide or
1880  * <0 on error.
1881  */
1882 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1883                 struct btrfs_key *key, struct btrfs_extent_item *ei,
1884                 u32 item_size, u64 *out_root, u8 *out_level)
1885 {
1886     int ret;
1887     int type;
1888     struct btrfs_extent_inline_ref *eiref;
1889 
1890     if (*ptr == (unsigned long)-1)
1891         return 1;
1892 
1893     while (1) {
1894         ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1895                           &eiref, &type);
1896         if (ret < 0)
1897             return ret;
1898 
1899         if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1900             type == BTRFS_SHARED_BLOCK_REF_KEY)
1901             break;
1902 
1903         if (ret == 1)
1904             return 1;
1905     }
1906 
1907     /* we can treat both ref types equally here */
1908     *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1909 
1910     if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1911         struct btrfs_tree_block_info *info;
1912 
1913         info = (struct btrfs_tree_block_info *)(ei + 1);
1914         *out_level = btrfs_tree_block_level(eb, info);
1915     } else {
1916         ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1917         *out_level = (u8)key->offset;
1918     }
1919 
1920     if (ret == 1)
1921         *ptr = (unsigned long)-1;
1922 
1923     return 0;
1924 }
1925 
1926 static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1927                  struct extent_inode_elem *inode_list,
1928                  u64 root, u64 extent_item_objectid,
1929                  iterate_extent_inodes_t *iterate, void *ctx)
1930 {
1931     struct extent_inode_elem *eie;
1932     int ret = 0;
1933 
1934     for (eie = inode_list; eie; eie = eie->next) {
1935         btrfs_debug(fs_info,
1936                 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1937                 extent_item_objectid, eie->inum,
1938                 eie->offset, root);
1939         ret = iterate(eie->inum, eie->offset, root, ctx);
1940         if (ret) {
1941             btrfs_debug(fs_info,
1942                     "stopping iteration for %llu due to ret=%d",
1943                     extent_item_objectid, ret);
1944             break;
1945         }
1946     }
1947 
1948     return ret;
1949 }
1950 
1951 /*
1952  * calls iterate() for every inode that references the extent identified by
1953  * the given parameters.
1954  * when the iterator function returns a non-zero value, iteration stops.
1955  */
1956 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1957                 u64 extent_item_objectid, u64 extent_item_pos,
1958                 int search_commit_root,
1959                 iterate_extent_inodes_t *iterate, void *ctx,
1960                 bool ignore_offset)
1961 {
1962     int ret;
1963     struct btrfs_trans_handle *trans = NULL;
1964     struct ulist *refs = NULL;
1965     struct ulist *roots = NULL;
1966     struct ulist_node *ref_node = NULL;
1967     struct ulist_node *root_node = NULL;
1968     struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
1969     struct ulist_iterator ref_uiter;
1970     struct ulist_iterator root_uiter;
1971 
1972     btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1973             extent_item_objectid);
1974 
1975     if (!search_commit_root) {
1976         trans = btrfs_attach_transaction(fs_info->tree_root);
1977         if (IS_ERR(trans)) {
1978             if (PTR_ERR(trans) != -ENOENT &&
1979                 PTR_ERR(trans) != -EROFS)
1980                 return PTR_ERR(trans);
1981             trans = NULL;
1982         }
1983     }
1984 
1985     if (trans)
1986         btrfs_get_tree_mod_seq(fs_info, &seq_elem);
1987     else
1988         down_read(&fs_info->commit_root_sem);
1989 
1990     ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1991                    seq_elem.seq, &refs,
1992                    &extent_item_pos, ignore_offset);
1993     if (ret)
1994         goto out;
1995 
1996     ULIST_ITER_INIT(&ref_uiter);
1997     while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1998         ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1999                         seq_elem.seq, &roots,
2000                         ignore_offset);
2001         if (ret)
2002             break;
2003         ULIST_ITER_INIT(&root_uiter);
2004         while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
2005             btrfs_debug(fs_info,
2006                     "root %llu references leaf %llu, data list %#llx",
2007                     root_node->val, ref_node->val,
2008                     ref_node->aux);
2009             ret = iterate_leaf_refs(fs_info,
2010                         (struct extent_inode_elem *)
2011                         (uintptr_t)ref_node->aux,
2012                         root_node->val,
2013                         extent_item_objectid,
2014                         iterate, ctx);
2015         }
2016         ulist_free(roots);
2017     }
2018 
2019     free_leaf_list(refs);
2020 out:
2021     if (trans) {
2022         btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2023         btrfs_end_transaction(trans);
2024     } else {
2025         up_read(&fs_info->commit_root_sem);
2026     }
2027 
2028     return ret;
2029 }
2030 
2031 static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
2032 {
2033     struct btrfs_data_container *inodes = ctx;
2034     const size_t c = 3 * sizeof(u64);
2035 
2036     if (inodes->bytes_left >= c) {
2037         inodes->bytes_left -= c;
2038         inodes->val[inodes->elem_cnt] = inum;
2039         inodes->val[inodes->elem_cnt + 1] = offset;
2040         inodes->val[inodes->elem_cnt + 2] = root;
2041         inodes->elem_cnt += 3;
2042     } else {
2043         inodes->bytes_missing += c - inodes->bytes_left;
2044         inodes->bytes_left = 0;
2045         inodes->elem_missed += 3;
2046     }
2047 
2048     return 0;
2049 }
2050 
2051 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2052                 struct btrfs_path *path,
2053                 void *ctx, bool ignore_offset)
2054 {
2055     int ret;
2056     u64 extent_item_pos;
2057     u64 flags = 0;
2058     struct btrfs_key found_key;
2059     int search_commit_root = path->search_commit_root;
2060 
2061     ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2062     btrfs_release_path(path);
2063     if (ret < 0)
2064         return ret;
2065     if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2066         return -EINVAL;
2067 
2068     extent_item_pos = logical - found_key.objectid;
2069     ret = iterate_extent_inodes(fs_info, found_key.objectid,
2070                     extent_item_pos, search_commit_root,
2071                     build_ino_list, ctx, ignore_offset);
2072 
2073     return ret;
2074 }
2075 
2076 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2077              struct extent_buffer *eb, struct inode_fs_paths *ipath);
2078 
2079 static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2080 {
2081     int ret = 0;
2082     int slot;
2083     u32 cur;
2084     u32 len;
2085     u32 name_len;
2086     u64 parent = 0;
2087     int found = 0;
2088     struct btrfs_root *fs_root = ipath->fs_root;
2089     struct btrfs_path *path = ipath->btrfs_path;
2090     struct extent_buffer *eb;
2091     struct btrfs_inode_ref *iref;
2092     struct btrfs_key found_key;
2093 
2094     while (!ret) {
2095         ret = btrfs_find_item(fs_root, path, inum,
2096                 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2097                 &found_key);
2098 
2099         if (ret < 0)
2100             break;
2101         if (ret) {
2102             ret = found ? 0 : -ENOENT;
2103             break;
2104         }
2105         ++found;
2106 
2107         parent = found_key.offset;
2108         slot = path->slots[0];
2109         eb = btrfs_clone_extent_buffer(path->nodes[0]);
2110         if (!eb) {
2111             ret = -ENOMEM;
2112             break;
2113         }
2114         btrfs_release_path(path);
2115 
2116         iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2117 
2118         for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2119             name_len = btrfs_inode_ref_name_len(eb, iref);
2120             /* path must be released before calling iterate()! */
2121             btrfs_debug(fs_root->fs_info,
2122                 "following ref at offset %u for inode %llu in tree %llu",
2123                 cur, found_key.objectid,
2124                 fs_root->root_key.objectid);
2125             ret = inode_to_path(parent, name_len,
2126                       (unsigned long)(iref + 1), eb, ipath);
2127             if (ret)
2128                 break;
2129             len = sizeof(*iref) + name_len;
2130             iref = (struct btrfs_inode_ref *)((char *)iref + len);
2131         }
2132         free_extent_buffer(eb);
2133     }
2134 
2135     btrfs_release_path(path);
2136 
2137     return ret;
2138 }
2139 
2140 static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2141 {
2142     int ret;
2143     int slot;
2144     u64 offset = 0;
2145     u64 parent;
2146     int found = 0;
2147     struct btrfs_root *fs_root = ipath->fs_root;
2148     struct btrfs_path *path = ipath->btrfs_path;
2149     struct extent_buffer *eb;
2150     struct btrfs_inode_extref *extref;
2151     u32 item_size;
2152     u32 cur_offset;
2153     unsigned long ptr;
2154 
2155     while (1) {
2156         ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2157                         &offset);
2158         if (ret < 0)
2159             break;
2160         if (ret) {
2161             ret = found ? 0 : -ENOENT;
2162             break;
2163         }
2164         ++found;
2165 
2166         slot = path->slots[0];
2167         eb = btrfs_clone_extent_buffer(path->nodes[0]);
2168         if (!eb) {
2169             ret = -ENOMEM;
2170             break;
2171         }
2172         btrfs_release_path(path);
2173 
2174         item_size = btrfs_item_size(eb, slot);
2175         ptr = btrfs_item_ptr_offset(eb, slot);
2176         cur_offset = 0;
2177 
2178         while (cur_offset < item_size) {
2179             u32 name_len;
2180 
2181             extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2182             parent = btrfs_inode_extref_parent(eb, extref);
2183             name_len = btrfs_inode_extref_name_len(eb, extref);
2184             ret = inode_to_path(parent, name_len,
2185                       (unsigned long)&extref->name, eb, ipath);
2186             if (ret)
2187                 break;
2188 
2189             cur_offset += btrfs_inode_extref_name_len(eb, extref);
2190             cur_offset += sizeof(*extref);
2191         }
2192         free_extent_buffer(eb);
2193 
2194         offset++;
2195     }
2196 
2197     btrfs_release_path(path);
2198 
2199     return ret;
2200 }
2201 
2202 /*
2203  * returns 0 if the path could be dumped (probably truncated)
2204  * returns <0 in case of an error
2205  */
2206 static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2207              struct extent_buffer *eb, struct inode_fs_paths *ipath)
2208 {
2209     char *fspath;
2210     char *fspath_min;
2211     int i = ipath->fspath->elem_cnt;
2212     const int s_ptr = sizeof(char *);
2213     u32 bytes_left;
2214 
2215     bytes_left = ipath->fspath->bytes_left > s_ptr ?
2216                     ipath->fspath->bytes_left - s_ptr : 0;
2217 
2218     fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2219     fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2220                    name_off, eb, inum, fspath_min, bytes_left);
2221     if (IS_ERR(fspath))
2222         return PTR_ERR(fspath);
2223 
2224     if (fspath > fspath_min) {
2225         ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2226         ++ipath->fspath->elem_cnt;
2227         ipath->fspath->bytes_left = fspath - fspath_min;
2228     } else {
2229         ++ipath->fspath->elem_missed;
2230         ipath->fspath->bytes_missing += fspath_min - fspath;
2231         ipath->fspath->bytes_left = 0;
2232     }
2233 
2234     return 0;
2235 }
2236 
2237 /*
2238  * this dumps all file system paths to the inode into the ipath struct, provided
2239  * is has been created large enough. each path is zero-terminated and accessed
2240  * from ipath->fspath->val[i].
2241  * when it returns, there are ipath->fspath->elem_cnt number of paths available
2242  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2243  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2244  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2245  * have been needed to return all paths.
2246  */
2247 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2248 {
2249     int ret;
2250     int found_refs = 0;
2251 
2252     ret = iterate_inode_refs(inum, ipath);
2253     if (!ret)
2254         ++found_refs;
2255     else if (ret != -ENOENT)
2256         return ret;
2257 
2258     ret = iterate_inode_extrefs(inum, ipath);
2259     if (ret == -ENOENT && found_refs)
2260         return 0;
2261 
2262     return ret;
2263 }
2264 
2265 struct btrfs_data_container *init_data_container(u32 total_bytes)
2266 {
2267     struct btrfs_data_container *data;
2268     size_t alloc_bytes;
2269 
2270     alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2271     data = kvmalloc(alloc_bytes, GFP_KERNEL);
2272     if (!data)
2273         return ERR_PTR(-ENOMEM);
2274 
2275     if (total_bytes >= sizeof(*data)) {
2276         data->bytes_left = total_bytes - sizeof(*data);
2277         data->bytes_missing = 0;
2278     } else {
2279         data->bytes_missing = sizeof(*data) - total_bytes;
2280         data->bytes_left = 0;
2281     }
2282 
2283     data->elem_cnt = 0;
2284     data->elem_missed = 0;
2285 
2286     return data;
2287 }
2288 
2289 /*
2290  * allocates space to return multiple file system paths for an inode.
2291  * total_bytes to allocate are passed, note that space usable for actual path
2292  * information will be total_bytes - sizeof(struct inode_fs_paths).
2293  * the returned pointer must be freed with free_ipath() in the end.
2294  */
2295 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2296                     struct btrfs_path *path)
2297 {
2298     struct inode_fs_paths *ifp;
2299     struct btrfs_data_container *fspath;
2300 
2301     fspath = init_data_container(total_bytes);
2302     if (IS_ERR(fspath))
2303         return ERR_CAST(fspath);
2304 
2305     ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2306     if (!ifp) {
2307         kvfree(fspath);
2308         return ERR_PTR(-ENOMEM);
2309     }
2310 
2311     ifp->btrfs_path = path;
2312     ifp->fspath = fspath;
2313     ifp->fs_root = fs_root;
2314 
2315     return ifp;
2316 }
2317 
2318 void free_ipath(struct inode_fs_paths *ipath)
2319 {
2320     if (!ipath)
2321         return;
2322     kvfree(ipath->fspath);
2323     kfree(ipath);
2324 }
2325 
2326 struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2327         struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2328 {
2329     struct btrfs_backref_iter *ret;
2330 
2331     ret = kzalloc(sizeof(*ret), gfp_flag);
2332     if (!ret)
2333         return NULL;
2334 
2335     ret->path = btrfs_alloc_path();
2336     if (!ret->path) {
2337         kfree(ret);
2338         return NULL;
2339     }
2340 
2341     /* Current backref iterator only supports iteration in commit root */
2342     ret->path->search_commit_root = 1;
2343     ret->path->skip_locking = 1;
2344     ret->fs_info = fs_info;
2345 
2346     return ret;
2347 }
2348 
2349 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2350 {
2351     struct btrfs_fs_info *fs_info = iter->fs_info;
2352     struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2353     struct btrfs_path *path = iter->path;
2354     struct btrfs_extent_item *ei;
2355     struct btrfs_key key;
2356     int ret;
2357 
2358     key.objectid = bytenr;
2359     key.type = BTRFS_METADATA_ITEM_KEY;
2360     key.offset = (u64)-1;
2361     iter->bytenr = bytenr;
2362 
2363     ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2364     if (ret < 0)
2365         return ret;
2366     if (ret == 0) {
2367         ret = -EUCLEAN;
2368         goto release;
2369     }
2370     if (path->slots[0] == 0) {
2371         WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2372         ret = -EUCLEAN;
2373         goto release;
2374     }
2375     path->slots[0]--;
2376 
2377     btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2378     if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2379          key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2380         ret = -ENOENT;
2381         goto release;
2382     }
2383     memcpy(&iter->cur_key, &key, sizeof(key));
2384     iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2385                             path->slots[0]);
2386     iter->end_ptr = (u32)(iter->item_ptr +
2387             btrfs_item_size(path->nodes[0], path->slots[0]));
2388     ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2389                 struct btrfs_extent_item);
2390 
2391     /*
2392      * Only support iteration on tree backref yet.
2393      *
2394      * This is an extra precaution for non skinny-metadata, where
2395      * EXTENT_ITEM is also used for tree blocks, that we can only use
2396      * extent flags to determine if it's a tree block.
2397      */
2398     if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2399         ret = -ENOTSUPP;
2400         goto release;
2401     }
2402     iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2403 
2404     /* If there is no inline backref, go search for keyed backref */
2405     if (iter->cur_ptr >= iter->end_ptr) {
2406         ret = btrfs_next_item(extent_root, path);
2407 
2408         /* No inline nor keyed ref */
2409         if (ret > 0) {
2410             ret = -ENOENT;
2411             goto release;
2412         }
2413         if (ret < 0)
2414             goto release;
2415 
2416         btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2417                 path->slots[0]);
2418         if (iter->cur_key.objectid != bytenr ||
2419             (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2420              iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2421             ret = -ENOENT;
2422             goto release;
2423         }
2424         iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2425                                path->slots[0]);
2426         iter->item_ptr = iter->cur_ptr;
2427         iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2428                       path->nodes[0], path->slots[0]));
2429     }
2430 
2431     return 0;
2432 release:
2433     btrfs_backref_iter_release(iter);
2434     return ret;
2435 }
2436 
2437 /*
2438  * Go to the next backref item of current bytenr, can be either inlined or
2439  * keyed.
2440  *
2441  * Caller needs to check whether it's inline ref or not by iter->cur_key.
2442  *
2443  * Return 0 if we get next backref without problem.
2444  * Return >0 if there is no extra backref for this bytenr.
2445  * Return <0 if there is something wrong happened.
2446  */
2447 int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2448 {
2449     struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2450     struct btrfs_root *extent_root;
2451     struct btrfs_path *path = iter->path;
2452     struct btrfs_extent_inline_ref *iref;
2453     int ret;
2454     u32 size;
2455 
2456     if (btrfs_backref_iter_is_inline_ref(iter)) {
2457         /* We're still inside the inline refs */
2458         ASSERT(iter->cur_ptr < iter->end_ptr);
2459 
2460         if (btrfs_backref_has_tree_block_info(iter)) {
2461             /* First tree block info */
2462             size = sizeof(struct btrfs_tree_block_info);
2463         } else {
2464             /* Use inline ref type to determine the size */
2465             int type;
2466 
2467             iref = (struct btrfs_extent_inline_ref *)
2468                 ((unsigned long)iter->cur_ptr);
2469             type = btrfs_extent_inline_ref_type(eb, iref);
2470 
2471             size = btrfs_extent_inline_ref_size(type);
2472         }
2473         iter->cur_ptr += size;
2474         if (iter->cur_ptr < iter->end_ptr)
2475             return 0;
2476 
2477         /* All inline items iterated, fall through */
2478     }
2479 
2480     /* We're at keyed items, there is no inline item, go to the next one */
2481     extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2482     ret = btrfs_next_item(extent_root, iter->path);
2483     if (ret)
2484         return ret;
2485 
2486     btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2487     if (iter->cur_key.objectid != iter->bytenr ||
2488         (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2489          iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2490         return 1;
2491     iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2492                     path->slots[0]);
2493     iter->cur_ptr = iter->item_ptr;
2494     iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2495                         path->slots[0]);
2496     return 0;
2497 }
2498 
2499 void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2500                   struct btrfs_backref_cache *cache, int is_reloc)
2501 {
2502     int i;
2503 
2504     cache->rb_root = RB_ROOT;
2505     for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2506         INIT_LIST_HEAD(&cache->pending[i]);
2507     INIT_LIST_HEAD(&cache->changed);
2508     INIT_LIST_HEAD(&cache->detached);
2509     INIT_LIST_HEAD(&cache->leaves);
2510     INIT_LIST_HEAD(&cache->pending_edge);
2511     INIT_LIST_HEAD(&cache->useless_node);
2512     cache->fs_info = fs_info;
2513     cache->is_reloc = is_reloc;
2514 }
2515 
2516 struct btrfs_backref_node *btrfs_backref_alloc_node(
2517         struct btrfs_backref_cache *cache, u64 bytenr, int level)
2518 {
2519     struct btrfs_backref_node *node;
2520 
2521     ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2522     node = kzalloc(sizeof(*node), GFP_NOFS);
2523     if (!node)
2524         return node;
2525 
2526     INIT_LIST_HEAD(&node->list);
2527     INIT_LIST_HEAD(&node->upper);
2528     INIT_LIST_HEAD(&node->lower);
2529     RB_CLEAR_NODE(&node->rb_node);
2530     cache->nr_nodes++;
2531     node->level = level;
2532     node->bytenr = bytenr;
2533 
2534     return node;
2535 }
2536 
2537 struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2538         struct btrfs_backref_cache *cache)
2539 {
2540     struct btrfs_backref_edge *edge;
2541 
2542     edge = kzalloc(sizeof(*edge), GFP_NOFS);
2543     if (edge)
2544         cache->nr_edges++;
2545     return edge;
2546 }
2547 
2548 /*
2549  * Drop the backref node from cache, also cleaning up all its
2550  * upper edges and any uncached nodes in the path.
2551  *
2552  * This cleanup happens bottom up, thus the node should either
2553  * be the lowest node in the cache or a detached node.
2554  */
2555 void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2556                 struct btrfs_backref_node *node)
2557 {
2558     struct btrfs_backref_node *upper;
2559     struct btrfs_backref_edge *edge;
2560 
2561     if (!node)
2562         return;
2563 
2564     BUG_ON(!node->lowest && !node->detached);
2565     while (!list_empty(&node->upper)) {
2566         edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2567                   list[LOWER]);
2568         upper = edge->node[UPPER];
2569         list_del(&edge->list[LOWER]);
2570         list_del(&edge->list[UPPER]);
2571         btrfs_backref_free_edge(cache, edge);
2572 
2573         /*
2574          * Add the node to leaf node list if no other child block
2575          * cached.
2576          */
2577         if (list_empty(&upper->lower)) {
2578             list_add_tail(&upper->lower, &cache->leaves);
2579             upper->lowest = 1;
2580         }
2581     }
2582 
2583     btrfs_backref_drop_node(cache, node);
2584 }
2585 
2586 /*
2587  * Release all nodes/edges from current cache
2588  */
2589 void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2590 {
2591     struct btrfs_backref_node *node;
2592     int i;
2593 
2594     while (!list_empty(&cache->detached)) {
2595         node = list_entry(cache->detached.next,
2596                   struct btrfs_backref_node, list);
2597         btrfs_backref_cleanup_node(cache, node);
2598     }
2599 
2600     while (!list_empty(&cache->leaves)) {
2601         node = list_entry(cache->leaves.next,
2602                   struct btrfs_backref_node, lower);
2603         btrfs_backref_cleanup_node(cache, node);
2604     }
2605 
2606     cache->last_trans = 0;
2607 
2608     for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2609         ASSERT(list_empty(&cache->pending[i]));
2610     ASSERT(list_empty(&cache->pending_edge));
2611     ASSERT(list_empty(&cache->useless_node));
2612     ASSERT(list_empty(&cache->changed));
2613     ASSERT(list_empty(&cache->detached));
2614     ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2615     ASSERT(!cache->nr_nodes);
2616     ASSERT(!cache->nr_edges);
2617 }
2618 
2619 /*
2620  * Handle direct tree backref
2621  *
2622  * Direct tree backref means, the backref item shows its parent bytenr
2623  * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2624  *
2625  * @ref_key:    The converted backref key.
2626  *      For keyed backref, it's the item key.
2627  *      For inlined backref, objectid is the bytenr,
2628  *      type is btrfs_inline_ref_type, offset is
2629  *      btrfs_inline_ref_offset.
2630  */
2631 static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2632                       struct btrfs_key *ref_key,
2633                       struct btrfs_backref_node *cur)
2634 {
2635     struct btrfs_backref_edge *edge;
2636     struct btrfs_backref_node *upper;
2637     struct rb_node *rb_node;
2638 
2639     ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2640 
2641     /* Only reloc root uses backref pointing to itself */
2642     if (ref_key->objectid == ref_key->offset) {
2643         struct btrfs_root *root;
2644 
2645         cur->is_reloc_root = 1;
2646         /* Only reloc backref cache cares about a specific root */
2647         if (cache->is_reloc) {
2648             root = find_reloc_root(cache->fs_info, cur->bytenr);
2649             if (!root)
2650                 return -ENOENT;
2651             cur->root = root;
2652         } else {
2653             /*
2654              * For generic purpose backref cache, reloc root node
2655              * is useless.
2656              */
2657             list_add(&cur->list, &cache->useless_node);
2658         }
2659         return 0;
2660     }
2661 
2662     edge = btrfs_backref_alloc_edge(cache);
2663     if (!edge)
2664         return -ENOMEM;
2665 
2666     rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2667     if (!rb_node) {
2668         /* Parent node not yet cached */
2669         upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2670                        cur->level + 1);
2671         if (!upper) {
2672             btrfs_backref_free_edge(cache, edge);
2673             return -ENOMEM;
2674         }
2675 
2676         /*
2677          *  Backrefs for the upper level block isn't cached, add the
2678          *  block to pending list
2679          */
2680         list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2681     } else {
2682         /* Parent node already cached */
2683         upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2684         ASSERT(upper->checked);
2685         INIT_LIST_HEAD(&edge->list[UPPER]);
2686     }
2687     btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2688     return 0;
2689 }
2690 
2691 /*
2692  * Handle indirect tree backref
2693  *
2694  * Indirect tree backref means, we only know which tree the node belongs to.
2695  * We still need to do a tree search to find out the parents. This is for
2696  * TREE_BLOCK_REF backref (keyed or inlined).
2697  *
2698  * @ref_key:    The same as @ref_key in  handle_direct_tree_backref()
2699  * @tree_key:   The first key of this tree block.
2700  * @path:   A clean (released) path, to avoid allocating path every time
2701  *      the function get called.
2702  */
2703 static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2704                     struct btrfs_path *path,
2705                     struct btrfs_key *ref_key,
2706                     struct btrfs_key *tree_key,
2707                     struct btrfs_backref_node *cur)
2708 {
2709     struct btrfs_fs_info *fs_info = cache->fs_info;
2710     struct btrfs_backref_node *upper;
2711     struct btrfs_backref_node *lower;
2712     struct btrfs_backref_edge *edge;
2713     struct extent_buffer *eb;
2714     struct btrfs_root *root;
2715     struct rb_node *rb_node;
2716     int level;
2717     bool need_check = true;
2718     int ret;
2719 
2720     root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2721     if (IS_ERR(root))
2722         return PTR_ERR(root);
2723     if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2724         cur->cowonly = 1;
2725 
2726     if (btrfs_root_level(&root->root_item) == cur->level) {
2727         /* Tree root */
2728         ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2729         /*
2730          * For reloc backref cache, we may ignore reloc root.  But for
2731          * general purpose backref cache, we can't rely on
2732          * btrfs_should_ignore_reloc_root() as it may conflict with
2733          * current running relocation and lead to missing root.
2734          *
2735          * For general purpose backref cache, reloc root detection is
2736          * completely relying on direct backref (key->offset is parent
2737          * bytenr), thus only do such check for reloc cache.
2738          */
2739         if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2740             btrfs_put_root(root);
2741             list_add(&cur->list, &cache->useless_node);
2742         } else {
2743             cur->root = root;
2744         }
2745         return 0;
2746     }
2747 
2748     level = cur->level + 1;
2749 
2750     /* Search the tree to find parent blocks referring to the block */
2751     path->search_commit_root = 1;
2752     path->skip_locking = 1;
2753     path->lowest_level = level;
2754     ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2755     path->lowest_level = 0;
2756     if (ret < 0) {
2757         btrfs_put_root(root);
2758         return ret;
2759     }
2760     if (ret > 0 && path->slots[level] > 0)
2761         path->slots[level]--;
2762 
2763     eb = path->nodes[level];
2764     if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2765         btrfs_err(fs_info,
2766 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2767               cur->bytenr, level - 1, root->root_key.objectid,
2768               tree_key->objectid, tree_key->type, tree_key->offset);
2769         btrfs_put_root(root);
2770         ret = -ENOENT;
2771         goto out;
2772     }
2773     lower = cur;
2774 
2775     /* Add all nodes and edges in the path */
2776     for (; level < BTRFS_MAX_LEVEL; level++) {
2777         if (!path->nodes[level]) {
2778             ASSERT(btrfs_root_bytenr(&root->root_item) ==
2779                    lower->bytenr);
2780             /* Same as previous should_ignore_reloc_root() call */
2781             if (btrfs_should_ignore_reloc_root(root) &&
2782                 cache->is_reloc) {
2783                 btrfs_put_root(root);
2784                 list_add(&lower->list, &cache->useless_node);
2785             } else {
2786                 lower->root = root;
2787             }
2788             break;
2789         }
2790 
2791         edge = btrfs_backref_alloc_edge(cache);
2792         if (!edge) {
2793             btrfs_put_root(root);
2794             ret = -ENOMEM;
2795             goto out;
2796         }
2797 
2798         eb = path->nodes[level];
2799         rb_node = rb_simple_search(&cache->rb_root, eb->start);
2800         if (!rb_node) {
2801             upper = btrfs_backref_alloc_node(cache, eb->start,
2802                              lower->level + 1);
2803             if (!upper) {
2804                 btrfs_put_root(root);
2805                 btrfs_backref_free_edge(cache, edge);
2806                 ret = -ENOMEM;
2807                 goto out;
2808             }
2809             upper->owner = btrfs_header_owner(eb);
2810             if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2811                 upper->cowonly = 1;
2812 
2813             /*
2814              * If we know the block isn't shared we can avoid
2815              * checking its backrefs.
2816              */
2817             if (btrfs_block_can_be_shared(root, eb))
2818                 upper->checked = 0;
2819             else
2820                 upper->checked = 1;
2821 
2822             /*
2823              * Add the block to pending list if we need to check its
2824              * backrefs, we only do this once while walking up a
2825              * tree as we will catch anything else later on.
2826              */
2827             if (!upper->checked && need_check) {
2828                 need_check = false;
2829                 list_add_tail(&edge->list[UPPER],
2830                           &cache->pending_edge);
2831             } else {
2832                 if (upper->checked)
2833                     need_check = true;
2834                 INIT_LIST_HEAD(&edge->list[UPPER]);
2835             }
2836         } else {
2837             upper = rb_entry(rb_node, struct btrfs_backref_node,
2838                      rb_node);
2839             ASSERT(upper->checked);
2840             INIT_LIST_HEAD(&edge->list[UPPER]);
2841             if (!upper->owner)
2842                 upper->owner = btrfs_header_owner(eb);
2843         }
2844         btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2845 
2846         if (rb_node) {
2847             btrfs_put_root(root);
2848             break;
2849         }
2850         lower = upper;
2851         upper = NULL;
2852     }
2853 out:
2854     btrfs_release_path(path);
2855     return ret;
2856 }
2857 
2858 /*
2859  * Add backref node @cur into @cache.
2860  *
2861  * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2862  *   links aren't yet bi-directional. Needs to finish such links.
2863  *   Use btrfs_backref_finish_upper_links() to finish such linkage.
2864  *
2865  * @path:   Released path for indirect tree backref lookup
2866  * @iter:   Released backref iter for extent tree search
2867  * @node_key:   The first key of the tree block
2868  */
2869 int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2870                 struct btrfs_path *path,
2871                 struct btrfs_backref_iter *iter,
2872                 struct btrfs_key *node_key,
2873                 struct btrfs_backref_node *cur)
2874 {
2875     struct btrfs_fs_info *fs_info = cache->fs_info;
2876     struct btrfs_backref_edge *edge;
2877     struct btrfs_backref_node *exist;
2878     int ret;
2879 
2880     ret = btrfs_backref_iter_start(iter, cur->bytenr);
2881     if (ret < 0)
2882         return ret;
2883     /*
2884      * We skip the first btrfs_tree_block_info, as we don't use the key
2885      * stored in it, but fetch it from the tree block
2886      */
2887     if (btrfs_backref_has_tree_block_info(iter)) {
2888         ret = btrfs_backref_iter_next(iter);
2889         if (ret < 0)
2890             goto out;
2891         /* No extra backref? This means the tree block is corrupted */
2892         if (ret > 0) {
2893             ret = -EUCLEAN;
2894             goto out;
2895         }
2896     }
2897     WARN_ON(cur->checked);
2898     if (!list_empty(&cur->upper)) {
2899         /*
2900          * The backref was added previously when processing backref of
2901          * type BTRFS_TREE_BLOCK_REF_KEY
2902          */
2903         ASSERT(list_is_singular(&cur->upper));
2904         edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2905                   list[LOWER]);
2906         ASSERT(list_empty(&edge->list[UPPER]));
2907         exist = edge->node[UPPER];
2908         /*
2909          * Add the upper level block to pending list if we need check
2910          * its backrefs
2911          */
2912         if (!exist->checked)
2913             list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2914     } else {
2915         exist = NULL;
2916     }
2917 
2918     for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2919         struct extent_buffer *eb;
2920         struct btrfs_key key;
2921         int type;
2922 
2923         cond_resched();
2924         eb = btrfs_backref_get_eb(iter);
2925 
2926         key.objectid = iter->bytenr;
2927         if (btrfs_backref_iter_is_inline_ref(iter)) {
2928             struct btrfs_extent_inline_ref *iref;
2929 
2930             /* Update key for inline backref */
2931             iref = (struct btrfs_extent_inline_ref *)
2932                 ((unsigned long)iter->cur_ptr);
2933             type = btrfs_get_extent_inline_ref_type(eb, iref,
2934                             BTRFS_REF_TYPE_BLOCK);
2935             if (type == BTRFS_REF_TYPE_INVALID) {
2936                 ret = -EUCLEAN;
2937                 goto out;
2938             }
2939             key.type = type;
2940             key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2941         } else {
2942             key.type = iter->cur_key.type;
2943             key.offset = iter->cur_key.offset;
2944         }
2945 
2946         /*
2947          * Parent node found and matches current inline ref, no need to
2948          * rebuild this node for this inline ref
2949          */
2950         if (exist &&
2951             ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2952               exist->owner == key.offset) ||
2953              (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2954               exist->bytenr == key.offset))) {
2955             exist = NULL;
2956             continue;
2957         }
2958 
2959         /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2960         if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2961             ret = handle_direct_tree_backref(cache, &key, cur);
2962             if (ret < 0)
2963                 goto out;
2964             continue;
2965         } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2966             ret = -EINVAL;
2967             btrfs_print_v0_err(fs_info);
2968             btrfs_handle_fs_error(fs_info, ret, NULL);
2969             goto out;
2970         } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2971             continue;
2972         }
2973 
2974         /*
2975          * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2976          * means the root objectid. We need to search the tree to get
2977          * its parent bytenr.
2978          */
2979         ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2980                            cur);
2981         if (ret < 0)
2982             goto out;
2983     }
2984     ret = 0;
2985     cur->checked = 1;
2986     WARN_ON(exist);
2987 out:
2988     btrfs_backref_iter_release(iter);
2989     return ret;
2990 }
2991 
2992 /*
2993  * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2994  */
2995 int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2996                      struct btrfs_backref_node *start)
2997 {
2998     struct list_head *useless_node = &cache->useless_node;
2999     struct btrfs_backref_edge *edge;
3000     struct rb_node *rb_node;
3001     LIST_HEAD(pending_edge);
3002 
3003     ASSERT(start->checked);
3004 
3005     /* Insert this node to cache if it's not COW-only */
3006     if (!start->cowonly) {
3007         rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3008                        &start->rb_node);
3009         if (rb_node)
3010             btrfs_backref_panic(cache->fs_info, start->bytenr,
3011                         -EEXIST);
3012         list_add_tail(&start->lower, &cache->leaves);
3013     }
3014 
3015     /*
3016      * Use breadth first search to iterate all related edges.
3017      *
3018      * The starting points are all the edges of this node
3019      */
3020     list_for_each_entry(edge, &start->upper, list[LOWER])
3021         list_add_tail(&edge->list[UPPER], &pending_edge);
3022 
3023     while (!list_empty(&pending_edge)) {
3024         struct btrfs_backref_node *upper;
3025         struct btrfs_backref_node *lower;
3026 
3027         edge = list_first_entry(&pending_edge,
3028                 struct btrfs_backref_edge, list[UPPER]);
3029         list_del_init(&edge->list[UPPER]);
3030         upper = edge->node[UPPER];
3031         lower = edge->node[LOWER];
3032 
3033         /* Parent is detached, no need to keep any edges */
3034         if (upper->detached) {
3035             list_del(&edge->list[LOWER]);
3036             btrfs_backref_free_edge(cache, edge);
3037 
3038             /* Lower node is orphan, queue for cleanup */
3039             if (list_empty(&lower->upper))
3040                 list_add(&lower->list, useless_node);
3041             continue;
3042         }
3043 
3044         /*
3045          * All new nodes added in current build_backref_tree() haven't
3046          * been linked to the cache rb tree.
3047          * So if we have upper->rb_node populated, this means a cache
3048          * hit. We only need to link the edge, as @upper and all its
3049          * parents have already been linked.
3050          */
3051         if (!RB_EMPTY_NODE(&upper->rb_node)) {
3052             if (upper->lowest) {
3053                 list_del_init(&upper->lower);
3054                 upper->lowest = 0;
3055             }
3056 
3057             list_add_tail(&edge->list[UPPER], &upper->lower);
3058             continue;
3059         }
3060 
3061         /* Sanity check, we shouldn't have any unchecked nodes */
3062         if (!upper->checked) {
3063             ASSERT(0);
3064             return -EUCLEAN;
3065         }
3066 
3067         /* Sanity check, COW-only node has non-COW-only parent */
3068         if (start->cowonly != upper->cowonly) {
3069             ASSERT(0);
3070             return -EUCLEAN;
3071         }
3072 
3073         /* Only cache non-COW-only (subvolume trees) tree blocks */
3074         if (!upper->cowonly) {
3075             rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3076                            &upper->rb_node);
3077             if (rb_node) {
3078                 btrfs_backref_panic(cache->fs_info,
3079                         upper->bytenr, -EEXIST);
3080                 return -EUCLEAN;
3081             }
3082         }
3083 
3084         list_add_tail(&edge->list[UPPER], &upper->lower);
3085 
3086         /*
3087          * Also queue all the parent edges of this uncached node
3088          * to finish the upper linkage
3089          */
3090         list_for_each_entry(edge, &upper->upper, list[LOWER])
3091             list_add_tail(&edge->list[UPPER], &pending_edge);
3092     }
3093     return 0;
3094 }
3095 
3096 void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3097                  struct btrfs_backref_node *node)
3098 {
3099     struct btrfs_backref_node *lower;
3100     struct btrfs_backref_node *upper;
3101     struct btrfs_backref_edge *edge;
3102 
3103     while (!list_empty(&cache->useless_node)) {
3104         lower = list_first_entry(&cache->useless_node,
3105                    struct btrfs_backref_node, list);
3106         list_del_init(&lower->list);
3107     }
3108     while (!list_empty(&cache->pending_edge)) {
3109         edge = list_first_entry(&cache->pending_edge,
3110                 struct btrfs_backref_edge, list[UPPER]);
3111         list_del(&edge->list[UPPER]);
3112         list_del(&edge->list[LOWER]);
3113         lower = edge->node[LOWER];
3114         upper = edge->node[UPPER];
3115         btrfs_backref_free_edge(cache, edge);
3116 
3117         /*
3118          * Lower is no longer linked to any upper backref nodes and
3119          * isn't in the cache, we can free it ourselves.
3120          */
3121         if (list_empty(&lower->upper) &&
3122             RB_EMPTY_NODE(&lower->rb_node))
3123             list_add(&lower->list, &cache->useless_node);
3124 
3125         if (!RB_EMPTY_NODE(&upper->rb_node))
3126             continue;
3127 
3128         /* Add this guy's upper edges to the list to process */
3129         list_for_each_entry(edge, &upper->upper, list[LOWER])
3130             list_add_tail(&edge->list[UPPER],
3131                       &cache->pending_edge);
3132         if (list_empty(&upper->upper))
3133             list_add(&upper->list, &cache->useless_node);
3134     }
3135 
3136     while (!list_empty(&cache->useless_node)) {
3137         lower = list_first_entry(&cache->useless_node,
3138                    struct btrfs_backref_node, list);
3139         list_del_init(&lower->list);
3140         if (lower == node)
3141             node = NULL;
3142         btrfs_backref_drop_node(cache, lower);
3143     }
3144 
3145     btrfs_backref_cleanup_node(cache, node);
3146     ASSERT(list_empty(&cache->useless_node) &&
3147            list_empty(&cache->pending_edge));
3148 }