Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2012 Alexander Block.  All rights reserved.
0004  */
0005 
0006 #include <linux/bsearch.h>
0007 #include <linux/fs.h>
0008 #include <linux/file.h>
0009 #include <linux/sort.h>
0010 #include <linux/mount.h>
0011 #include <linux/xattr.h>
0012 #include <linux/posix_acl_xattr.h>
0013 #include <linux/radix-tree.h>
0014 #include <linux/vmalloc.h>
0015 #include <linux/string.h>
0016 #include <linux/compat.h>
0017 #include <linux/crc32c.h>
0018 
0019 #include "send.h"
0020 #include "ctree.h"
0021 #include "backref.h"
0022 #include "locking.h"
0023 #include "disk-io.h"
0024 #include "btrfs_inode.h"
0025 #include "transaction.h"
0026 #include "compression.h"
0027 #include "xattr.h"
0028 #include "print-tree.h"
0029 
0030 /*
0031  * Maximum number of references an extent can have in order for us to attempt to
0032  * issue clone operations instead of write operations. This currently exists to
0033  * avoid hitting limitations of the backreference walking code (taking a lot of
0034  * time and using too much memory for extents with large number of references).
0035  */
0036 #define SEND_MAX_EXTENT_REFS    64
0037 
0038 /*
0039  * A fs_path is a helper to dynamically build path names with unknown size.
0040  * It reallocates the internal buffer on demand.
0041  * It allows fast adding of path elements on the right side (normal path) and
0042  * fast adding to the left side (reversed path). A reversed path can also be
0043  * unreversed if needed.
0044  */
0045 struct fs_path {
0046     union {
0047         struct {
0048             char *start;
0049             char *end;
0050 
0051             char *buf;
0052             unsigned short buf_len:15;
0053             unsigned short reversed:1;
0054             char inline_buf[];
0055         };
0056         /*
0057          * Average path length does not exceed 200 bytes, we'll have
0058          * better packing in the slab and higher chance to satisfy
0059          * a allocation later during send.
0060          */
0061         char pad[256];
0062     };
0063 };
0064 #define FS_PATH_INLINE_SIZE \
0065     (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
0066 
0067 
0068 /* reused for each extent */
0069 struct clone_root {
0070     struct btrfs_root *root;
0071     u64 ino;
0072     u64 offset;
0073 
0074     u64 found_refs;
0075 };
0076 
0077 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
0078 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
0079 
0080 struct send_ctx {
0081     struct file *send_filp;
0082     loff_t send_off;
0083     char *send_buf;
0084     u32 send_size;
0085     u32 send_max_size;
0086     /*
0087      * Whether BTRFS_SEND_A_DATA attribute was already added to current
0088      * command (since protocol v2, data must be the last attribute).
0089      */
0090     bool put_data;
0091     struct page **send_buf_pages;
0092     u64 flags;  /* 'flags' member of btrfs_ioctl_send_args is u64 */
0093     /* Protocol version compatibility requested */
0094     u32 proto;
0095 
0096     struct btrfs_root *send_root;
0097     struct btrfs_root *parent_root;
0098     struct clone_root *clone_roots;
0099     int clone_roots_cnt;
0100 
0101     /* current state of the compare_tree call */
0102     struct btrfs_path *left_path;
0103     struct btrfs_path *right_path;
0104     struct btrfs_key *cmp_key;
0105 
0106     /*
0107      * Keep track of the generation of the last transaction that was used
0108      * for relocating a block group. This is periodically checked in order
0109      * to detect if a relocation happened since the last check, so that we
0110      * don't operate on stale extent buffers for nodes (level >= 1) or on
0111      * stale disk_bytenr values of file extent items.
0112      */
0113     u64 last_reloc_trans;
0114 
0115     /*
0116      * infos of the currently processed inode. In case of deleted inodes,
0117      * these are the values from the deleted inode.
0118      */
0119     u64 cur_ino;
0120     u64 cur_inode_gen;
0121     u64 cur_inode_size;
0122     u64 cur_inode_mode;
0123     u64 cur_inode_rdev;
0124     u64 cur_inode_last_extent;
0125     u64 cur_inode_next_write_offset;
0126     bool cur_inode_new;
0127     bool cur_inode_new_gen;
0128     bool cur_inode_deleted;
0129     bool ignore_cur_inode;
0130 
0131     u64 send_progress;
0132 
0133     struct list_head new_refs;
0134     struct list_head deleted_refs;
0135 
0136     struct radix_tree_root name_cache;
0137     struct list_head name_cache_list;
0138     int name_cache_size;
0139 
0140     /*
0141      * The inode we are currently processing. It's not NULL only when we
0142      * need to issue write commands for data extents from this inode.
0143      */
0144     struct inode *cur_inode;
0145     struct file_ra_state ra;
0146     u64 page_cache_clear_start;
0147     bool clean_page_cache;
0148 
0149     /*
0150      * We process inodes by their increasing order, so if before an
0151      * incremental send we reverse the parent/child relationship of
0152      * directories such that a directory with a lower inode number was
0153      * the parent of a directory with a higher inode number, and the one
0154      * becoming the new parent got renamed too, we can't rename/move the
0155      * directory with lower inode number when we finish processing it - we
0156      * must process the directory with higher inode number first, then
0157      * rename/move it and then rename/move the directory with lower inode
0158      * number. Example follows.
0159      *
0160      * Tree state when the first send was performed:
0161      *
0162      * .
0163      * |-- a                   (ino 257)
0164      *     |-- b               (ino 258)
0165      *         |
0166      *         |
0167      *         |-- c           (ino 259)
0168      *         |   |-- d       (ino 260)
0169      *         |
0170      *         |-- c2          (ino 261)
0171      *
0172      * Tree state when the second (incremental) send is performed:
0173      *
0174      * .
0175      * |-- a                   (ino 257)
0176      *     |-- b               (ino 258)
0177      *         |-- c2          (ino 261)
0178      *             |-- d2      (ino 260)
0179      *                 |-- cc  (ino 259)
0180      *
0181      * The sequence of steps that lead to the second state was:
0182      *
0183      * mv /a/b/c/d /a/b/c2/d2
0184      * mv /a/b/c /a/b/c2/d2/cc
0185      *
0186      * "c" has lower inode number, but we can't move it (2nd mv operation)
0187      * before we move "d", which has higher inode number.
0188      *
0189      * So we just memorize which move/rename operations must be performed
0190      * later when their respective parent is processed and moved/renamed.
0191      */
0192 
0193     /* Indexed by parent directory inode number. */
0194     struct rb_root pending_dir_moves;
0195 
0196     /*
0197      * Reverse index, indexed by the inode number of a directory that
0198      * is waiting for the move/rename of its immediate parent before its
0199      * own move/rename can be performed.
0200      */
0201     struct rb_root waiting_dir_moves;
0202 
0203     /*
0204      * A directory that is going to be rm'ed might have a child directory
0205      * which is in the pending directory moves index above. In this case,
0206      * the directory can only be removed after the move/rename of its child
0207      * is performed. Example:
0208      *
0209      * Parent snapshot:
0210      *
0211      * .                        (ino 256)
0212      * |-- a/                   (ino 257)
0213      *     |-- b/               (ino 258)
0214      *         |-- c/           (ino 259)
0215      *         |   |-- x/       (ino 260)
0216      *         |
0217      *         |-- y/           (ino 261)
0218      *
0219      * Send snapshot:
0220      *
0221      * .                        (ino 256)
0222      * |-- a/                   (ino 257)
0223      *     |-- b/               (ino 258)
0224      *         |-- YY/          (ino 261)
0225      *              |-- x/      (ino 260)
0226      *
0227      * Sequence of steps that lead to the send snapshot:
0228      * rm -f /a/b/c/foo.txt
0229      * mv /a/b/y /a/b/YY
0230      * mv /a/b/c/x /a/b/YY
0231      * rmdir /a/b/c
0232      *
0233      * When the child is processed, its move/rename is delayed until its
0234      * parent is processed (as explained above), but all other operations
0235      * like update utimes, chown, chgrp, etc, are performed and the paths
0236      * that it uses for those operations must use the orphanized name of
0237      * its parent (the directory we're going to rm later), so we need to
0238      * memorize that name.
0239      *
0240      * Indexed by the inode number of the directory to be deleted.
0241      */
0242     struct rb_root orphan_dirs;
0243 
0244     struct rb_root rbtree_new_refs;
0245     struct rb_root rbtree_deleted_refs;
0246 };
0247 
0248 struct pending_dir_move {
0249     struct rb_node node;
0250     struct list_head list;
0251     u64 parent_ino;
0252     u64 ino;
0253     u64 gen;
0254     struct list_head update_refs;
0255 };
0256 
0257 struct waiting_dir_move {
0258     struct rb_node node;
0259     u64 ino;
0260     /*
0261      * There might be some directory that could not be removed because it
0262      * was waiting for this directory inode to be moved first. Therefore
0263      * after this directory is moved, we can try to rmdir the ino rmdir_ino.
0264      */
0265     u64 rmdir_ino;
0266     u64 rmdir_gen;
0267     bool orphanized;
0268 };
0269 
0270 struct orphan_dir_info {
0271     struct rb_node node;
0272     u64 ino;
0273     u64 gen;
0274     u64 last_dir_index_offset;
0275 };
0276 
0277 struct name_cache_entry {
0278     struct list_head list;
0279     /*
0280      * radix_tree has only 32bit entries but we need to handle 64bit inums.
0281      * We use the lower 32bit of the 64bit inum to store it in the tree. If
0282      * more then one inum would fall into the same entry, we use radix_list
0283      * to store the additional entries. radix_list is also used to store
0284      * entries where two entries have the same inum but different
0285      * generations.
0286      */
0287     struct list_head radix_list;
0288     u64 ino;
0289     u64 gen;
0290     u64 parent_ino;
0291     u64 parent_gen;
0292     int ret;
0293     int need_later_update;
0294     int name_len;
0295     char name[];
0296 };
0297 
0298 #define ADVANCE                         1
0299 #define ADVANCE_ONLY_NEXT                   -1
0300 
0301 enum btrfs_compare_tree_result {
0302     BTRFS_COMPARE_TREE_NEW,
0303     BTRFS_COMPARE_TREE_DELETED,
0304     BTRFS_COMPARE_TREE_CHANGED,
0305     BTRFS_COMPARE_TREE_SAME,
0306 };
0307 
0308 __cold
0309 static void inconsistent_snapshot_error(struct send_ctx *sctx,
0310                     enum btrfs_compare_tree_result result,
0311                     const char *what)
0312 {
0313     const char *result_string;
0314 
0315     switch (result) {
0316     case BTRFS_COMPARE_TREE_NEW:
0317         result_string = "new";
0318         break;
0319     case BTRFS_COMPARE_TREE_DELETED:
0320         result_string = "deleted";
0321         break;
0322     case BTRFS_COMPARE_TREE_CHANGED:
0323         result_string = "updated";
0324         break;
0325     case BTRFS_COMPARE_TREE_SAME:
0326         ASSERT(0);
0327         result_string = "unchanged";
0328         break;
0329     default:
0330         ASSERT(0);
0331         result_string = "unexpected";
0332     }
0333 
0334     btrfs_err(sctx->send_root->fs_info,
0335           "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
0336           result_string, what, sctx->cmp_key->objectid,
0337           sctx->send_root->root_key.objectid,
0338           (sctx->parent_root ?
0339            sctx->parent_root->root_key.objectid : 0));
0340 }
0341 
0342 __maybe_unused
0343 static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd)
0344 {
0345     switch (sctx->proto) {
0346     case 1:  return cmd <= BTRFS_SEND_C_MAX_V1;
0347     case 2:  return cmd <= BTRFS_SEND_C_MAX_V2;
0348     default: return false;
0349     }
0350 }
0351 
0352 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
0353 
0354 static struct waiting_dir_move *
0355 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
0356 
0357 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
0358 
0359 static int need_send_hole(struct send_ctx *sctx)
0360 {
0361     return (sctx->parent_root && !sctx->cur_inode_new &&
0362         !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
0363         S_ISREG(sctx->cur_inode_mode));
0364 }
0365 
0366 static void fs_path_reset(struct fs_path *p)
0367 {
0368     if (p->reversed) {
0369         p->start = p->buf + p->buf_len - 1;
0370         p->end = p->start;
0371         *p->start = 0;
0372     } else {
0373         p->start = p->buf;
0374         p->end = p->start;
0375         *p->start = 0;
0376     }
0377 }
0378 
0379 static struct fs_path *fs_path_alloc(void)
0380 {
0381     struct fs_path *p;
0382 
0383     p = kmalloc(sizeof(*p), GFP_KERNEL);
0384     if (!p)
0385         return NULL;
0386     p->reversed = 0;
0387     p->buf = p->inline_buf;
0388     p->buf_len = FS_PATH_INLINE_SIZE;
0389     fs_path_reset(p);
0390     return p;
0391 }
0392 
0393 static struct fs_path *fs_path_alloc_reversed(void)
0394 {
0395     struct fs_path *p;
0396 
0397     p = fs_path_alloc();
0398     if (!p)
0399         return NULL;
0400     p->reversed = 1;
0401     fs_path_reset(p);
0402     return p;
0403 }
0404 
0405 static void fs_path_free(struct fs_path *p)
0406 {
0407     if (!p)
0408         return;
0409     if (p->buf != p->inline_buf)
0410         kfree(p->buf);
0411     kfree(p);
0412 }
0413 
0414 static int fs_path_len(struct fs_path *p)
0415 {
0416     return p->end - p->start;
0417 }
0418 
0419 static int fs_path_ensure_buf(struct fs_path *p, int len)
0420 {
0421     char *tmp_buf;
0422     int path_len;
0423     int old_buf_len;
0424 
0425     len++;
0426 
0427     if (p->buf_len >= len)
0428         return 0;
0429 
0430     if (len > PATH_MAX) {
0431         WARN_ON(1);
0432         return -ENOMEM;
0433     }
0434 
0435     path_len = p->end - p->start;
0436     old_buf_len = p->buf_len;
0437 
0438     /*
0439      * First time the inline_buf does not suffice
0440      */
0441     if (p->buf == p->inline_buf) {
0442         tmp_buf = kmalloc(len, GFP_KERNEL);
0443         if (tmp_buf)
0444             memcpy(tmp_buf, p->buf, old_buf_len);
0445     } else {
0446         tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
0447     }
0448     if (!tmp_buf)
0449         return -ENOMEM;
0450     p->buf = tmp_buf;
0451     /*
0452      * The real size of the buffer is bigger, this will let the fast path
0453      * happen most of the time
0454      */
0455     p->buf_len = ksize(p->buf);
0456 
0457     if (p->reversed) {
0458         tmp_buf = p->buf + old_buf_len - path_len - 1;
0459         p->end = p->buf + p->buf_len - 1;
0460         p->start = p->end - path_len;
0461         memmove(p->start, tmp_buf, path_len + 1);
0462     } else {
0463         p->start = p->buf;
0464         p->end = p->start + path_len;
0465     }
0466     return 0;
0467 }
0468 
0469 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
0470                    char **prepared)
0471 {
0472     int ret;
0473     int new_len;
0474 
0475     new_len = p->end - p->start + name_len;
0476     if (p->start != p->end)
0477         new_len++;
0478     ret = fs_path_ensure_buf(p, new_len);
0479     if (ret < 0)
0480         goto out;
0481 
0482     if (p->reversed) {
0483         if (p->start != p->end)
0484             *--p->start = '/';
0485         p->start -= name_len;
0486         *prepared = p->start;
0487     } else {
0488         if (p->start != p->end)
0489             *p->end++ = '/';
0490         *prepared = p->end;
0491         p->end += name_len;
0492         *p->end = 0;
0493     }
0494 
0495 out:
0496     return ret;
0497 }
0498 
0499 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
0500 {
0501     int ret;
0502     char *prepared;
0503 
0504     ret = fs_path_prepare_for_add(p, name_len, &prepared);
0505     if (ret < 0)
0506         goto out;
0507     memcpy(prepared, name, name_len);
0508 
0509 out:
0510     return ret;
0511 }
0512 
0513 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
0514 {
0515     int ret;
0516     char *prepared;
0517 
0518     ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
0519     if (ret < 0)
0520         goto out;
0521     memcpy(prepared, p2->start, p2->end - p2->start);
0522 
0523 out:
0524     return ret;
0525 }
0526 
0527 static int fs_path_add_from_extent_buffer(struct fs_path *p,
0528                       struct extent_buffer *eb,
0529                       unsigned long off, int len)
0530 {
0531     int ret;
0532     char *prepared;
0533 
0534     ret = fs_path_prepare_for_add(p, len, &prepared);
0535     if (ret < 0)
0536         goto out;
0537 
0538     read_extent_buffer(eb, prepared, off, len);
0539 
0540 out:
0541     return ret;
0542 }
0543 
0544 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
0545 {
0546     p->reversed = from->reversed;
0547     fs_path_reset(p);
0548 
0549     return fs_path_add_path(p, from);
0550 }
0551 
0552 static void fs_path_unreverse(struct fs_path *p)
0553 {
0554     char *tmp;
0555     int len;
0556 
0557     if (!p->reversed)
0558         return;
0559 
0560     tmp = p->start;
0561     len = p->end - p->start;
0562     p->start = p->buf;
0563     p->end = p->start + len;
0564     memmove(p->start, tmp, len + 1);
0565     p->reversed = 0;
0566 }
0567 
0568 static struct btrfs_path *alloc_path_for_send(void)
0569 {
0570     struct btrfs_path *path;
0571 
0572     path = btrfs_alloc_path();
0573     if (!path)
0574         return NULL;
0575     path->search_commit_root = 1;
0576     path->skip_locking = 1;
0577     path->need_commit_sem = 1;
0578     return path;
0579 }
0580 
0581 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
0582 {
0583     int ret;
0584     u32 pos = 0;
0585 
0586     while (pos < len) {
0587         ret = kernel_write(filp, buf + pos, len - pos, off);
0588         if (ret < 0)
0589             return ret;
0590         if (ret == 0)
0591             return -EIO;
0592         pos += ret;
0593     }
0594 
0595     return 0;
0596 }
0597 
0598 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
0599 {
0600     struct btrfs_tlv_header *hdr;
0601     int total_len = sizeof(*hdr) + len;
0602     int left = sctx->send_max_size - sctx->send_size;
0603 
0604     if (WARN_ON_ONCE(sctx->put_data))
0605         return -EINVAL;
0606 
0607     if (unlikely(left < total_len))
0608         return -EOVERFLOW;
0609 
0610     hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
0611     put_unaligned_le16(attr, &hdr->tlv_type);
0612     put_unaligned_le16(len, &hdr->tlv_len);
0613     memcpy(hdr + 1, data, len);
0614     sctx->send_size += total_len;
0615 
0616     return 0;
0617 }
0618 
0619 #define TLV_PUT_DEFINE_INT(bits) \
0620     static int tlv_put_u##bits(struct send_ctx *sctx,       \
0621             u##bits attr, u##bits value)            \
0622     {                               \
0623         __le##bits __tmp = cpu_to_le##bits(value);      \
0624         return tlv_put(sctx, attr, &__tmp, sizeof(__tmp));  \
0625     }
0626 
0627 TLV_PUT_DEFINE_INT(32)
0628 TLV_PUT_DEFINE_INT(64)
0629 
0630 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
0631               const char *str, int len)
0632 {
0633     if (len == -1)
0634         len = strlen(str);
0635     return tlv_put(sctx, attr, str, len);
0636 }
0637 
0638 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
0639             const u8 *uuid)
0640 {
0641     return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
0642 }
0643 
0644 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
0645                   struct extent_buffer *eb,
0646                   struct btrfs_timespec *ts)
0647 {
0648     struct btrfs_timespec bts;
0649     read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
0650     return tlv_put(sctx, attr, &bts, sizeof(bts));
0651 }
0652 
0653 
0654 #define TLV_PUT(sctx, attrtype, data, attrlen) \
0655     do { \
0656         ret = tlv_put(sctx, attrtype, data, attrlen); \
0657         if (ret < 0) \
0658             goto tlv_put_failure; \
0659     } while (0)
0660 
0661 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
0662     do { \
0663         ret = tlv_put_u##bits(sctx, attrtype, value); \
0664         if (ret < 0) \
0665             goto tlv_put_failure; \
0666     } while (0)
0667 
0668 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
0669 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
0670 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
0671 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
0672 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
0673     do { \
0674         ret = tlv_put_string(sctx, attrtype, str, len); \
0675         if (ret < 0) \
0676             goto tlv_put_failure; \
0677     } while (0)
0678 #define TLV_PUT_PATH(sctx, attrtype, p) \
0679     do { \
0680         ret = tlv_put_string(sctx, attrtype, p->start, \
0681             p->end - p->start); \
0682         if (ret < 0) \
0683             goto tlv_put_failure; \
0684     } while(0)
0685 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
0686     do { \
0687         ret = tlv_put_uuid(sctx, attrtype, uuid); \
0688         if (ret < 0) \
0689             goto tlv_put_failure; \
0690     } while (0)
0691 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
0692     do { \
0693         ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
0694         if (ret < 0) \
0695             goto tlv_put_failure; \
0696     } while (0)
0697 
0698 static int send_header(struct send_ctx *sctx)
0699 {
0700     struct btrfs_stream_header hdr;
0701 
0702     strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
0703     hdr.version = cpu_to_le32(sctx->proto);
0704     return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
0705                     &sctx->send_off);
0706 }
0707 
0708 /*
0709  * For each command/item we want to send to userspace, we call this function.
0710  */
0711 static int begin_cmd(struct send_ctx *sctx, int cmd)
0712 {
0713     struct btrfs_cmd_header *hdr;
0714 
0715     if (WARN_ON(!sctx->send_buf))
0716         return -EINVAL;
0717 
0718     BUG_ON(sctx->send_size);
0719 
0720     sctx->send_size += sizeof(*hdr);
0721     hdr = (struct btrfs_cmd_header *)sctx->send_buf;
0722     put_unaligned_le16(cmd, &hdr->cmd);
0723 
0724     return 0;
0725 }
0726 
0727 static int send_cmd(struct send_ctx *sctx)
0728 {
0729     int ret;
0730     struct btrfs_cmd_header *hdr;
0731     u32 crc;
0732 
0733     hdr = (struct btrfs_cmd_header *)sctx->send_buf;
0734     put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
0735     put_unaligned_le32(0, &hdr->crc);
0736 
0737     crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
0738     put_unaligned_le32(crc, &hdr->crc);
0739 
0740     ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
0741                     &sctx->send_off);
0742 
0743     sctx->send_size = 0;
0744     sctx->put_data = false;
0745 
0746     return ret;
0747 }
0748 
0749 /*
0750  * Sends a move instruction to user space
0751  */
0752 static int send_rename(struct send_ctx *sctx,
0753              struct fs_path *from, struct fs_path *to)
0754 {
0755     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
0756     int ret;
0757 
0758     btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
0759 
0760     ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
0761     if (ret < 0)
0762         goto out;
0763 
0764     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
0765     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
0766 
0767     ret = send_cmd(sctx);
0768 
0769 tlv_put_failure:
0770 out:
0771     return ret;
0772 }
0773 
0774 /*
0775  * Sends a link instruction to user space
0776  */
0777 static int send_link(struct send_ctx *sctx,
0778              struct fs_path *path, struct fs_path *lnk)
0779 {
0780     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
0781     int ret;
0782 
0783     btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
0784 
0785     ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
0786     if (ret < 0)
0787         goto out;
0788 
0789     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
0790     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
0791 
0792     ret = send_cmd(sctx);
0793 
0794 tlv_put_failure:
0795 out:
0796     return ret;
0797 }
0798 
0799 /*
0800  * Sends an unlink instruction to user space
0801  */
0802 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
0803 {
0804     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
0805     int ret;
0806 
0807     btrfs_debug(fs_info, "send_unlink %s", path->start);
0808 
0809     ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
0810     if (ret < 0)
0811         goto out;
0812 
0813     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
0814 
0815     ret = send_cmd(sctx);
0816 
0817 tlv_put_failure:
0818 out:
0819     return ret;
0820 }
0821 
0822 /*
0823  * Sends a rmdir instruction to user space
0824  */
0825 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
0826 {
0827     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
0828     int ret;
0829 
0830     btrfs_debug(fs_info, "send_rmdir %s", path->start);
0831 
0832     ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
0833     if (ret < 0)
0834         goto out;
0835 
0836     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
0837 
0838     ret = send_cmd(sctx);
0839 
0840 tlv_put_failure:
0841 out:
0842     return ret;
0843 }
0844 
0845 /*
0846  * Helper function to retrieve some fields from an inode item.
0847  */
0848 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
0849               u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
0850               u64 *gid, u64 *rdev, u64 *fileattr)
0851 {
0852     int ret;
0853     struct btrfs_inode_item *ii;
0854     struct btrfs_key key;
0855 
0856     key.objectid = ino;
0857     key.type = BTRFS_INODE_ITEM_KEY;
0858     key.offset = 0;
0859     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
0860     if (ret) {
0861         if (ret > 0)
0862             ret = -ENOENT;
0863         return ret;
0864     }
0865 
0866     ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
0867             struct btrfs_inode_item);
0868     if (size)
0869         *size = btrfs_inode_size(path->nodes[0], ii);
0870     if (gen)
0871         *gen = btrfs_inode_generation(path->nodes[0], ii);
0872     if (mode)
0873         *mode = btrfs_inode_mode(path->nodes[0], ii);
0874     if (uid)
0875         *uid = btrfs_inode_uid(path->nodes[0], ii);
0876     if (gid)
0877         *gid = btrfs_inode_gid(path->nodes[0], ii);
0878     if (rdev)
0879         *rdev = btrfs_inode_rdev(path->nodes[0], ii);
0880     /*
0881      * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's
0882      * otherwise logically split to 32/32 parts.
0883      */
0884     if (fileattr)
0885         *fileattr = btrfs_inode_flags(path->nodes[0], ii);
0886 
0887     return ret;
0888 }
0889 
0890 static int get_inode_info(struct btrfs_root *root,
0891               u64 ino, u64 *size, u64 *gen,
0892               u64 *mode, u64 *uid, u64 *gid,
0893               u64 *rdev, u64 *fileattr)
0894 {
0895     struct btrfs_path *path;
0896     int ret;
0897 
0898     path = alloc_path_for_send();
0899     if (!path)
0900         return -ENOMEM;
0901     ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
0902                    rdev, fileattr);
0903     btrfs_free_path(path);
0904     return ret;
0905 }
0906 
0907 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
0908                    struct fs_path *p,
0909                    void *ctx);
0910 
0911 /*
0912  * Helper function to iterate the entries in ONE btrfs_inode_ref or
0913  * btrfs_inode_extref.
0914  * The iterate callback may return a non zero value to stop iteration. This can
0915  * be a negative value for error codes or 1 to simply stop it.
0916  *
0917  * path must point to the INODE_REF or INODE_EXTREF when called.
0918  */
0919 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
0920                  struct btrfs_key *found_key, int resolve,
0921                  iterate_inode_ref_t iterate, void *ctx)
0922 {
0923     struct extent_buffer *eb = path->nodes[0];
0924     struct btrfs_inode_ref *iref;
0925     struct btrfs_inode_extref *extref;
0926     struct btrfs_path *tmp_path;
0927     struct fs_path *p;
0928     u32 cur = 0;
0929     u32 total;
0930     int slot = path->slots[0];
0931     u32 name_len;
0932     char *start;
0933     int ret = 0;
0934     int num = 0;
0935     int index;
0936     u64 dir;
0937     unsigned long name_off;
0938     unsigned long elem_size;
0939     unsigned long ptr;
0940 
0941     p = fs_path_alloc_reversed();
0942     if (!p)
0943         return -ENOMEM;
0944 
0945     tmp_path = alloc_path_for_send();
0946     if (!tmp_path) {
0947         fs_path_free(p);
0948         return -ENOMEM;
0949     }
0950 
0951 
0952     if (found_key->type == BTRFS_INODE_REF_KEY) {
0953         ptr = (unsigned long)btrfs_item_ptr(eb, slot,
0954                             struct btrfs_inode_ref);
0955         total = btrfs_item_size(eb, slot);
0956         elem_size = sizeof(*iref);
0957     } else {
0958         ptr = btrfs_item_ptr_offset(eb, slot);
0959         total = btrfs_item_size(eb, slot);
0960         elem_size = sizeof(*extref);
0961     }
0962 
0963     while (cur < total) {
0964         fs_path_reset(p);
0965 
0966         if (found_key->type == BTRFS_INODE_REF_KEY) {
0967             iref = (struct btrfs_inode_ref *)(ptr + cur);
0968             name_len = btrfs_inode_ref_name_len(eb, iref);
0969             name_off = (unsigned long)(iref + 1);
0970             index = btrfs_inode_ref_index(eb, iref);
0971             dir = found_key->offset;
0972         } else {
0973             extref = (struct btrfs_inode_extref *)(ptr + cur);
0974             name_len = btrfs_inode_extref_name_len(eb, extref);
0975             name_off = (unsigned long)&extref->name;
0976             index = btrfs_inode_extref_index(eb, extref);
0977             dir = btrfs_inode_extref_parent(eb, extref);
0978         }
0979 
0980         if (resolve) {
0981             start = btrfs_ref_to_path(root, tmp_path, name_len,
0982                           name_off, eb, dir,
0983                           p->buf, p->buf_len);
0984             if (IS_ERR(start)) {
0985                 ret = PTR_ERR(start);
0986                 goto out;
0987             }
0988             if (start < p->buf) {
0989                 /* overflow , try again with larger buffer */
0990                 ret = fs_path_ensure_buf(p,
0991                         p->buf_len + p->buf - start);
0992                 if (ret < 0)
0993                     goto out;
0994                 start = btrfs_ref_to_path(root, tmp_path,
0995                               name_len, name_off,
0996                               eb, dir,
0997                               p->buf, p->buf_len);
0998                 if (IS_ERR(start)) {
0999                     ret = PTR_ERR(start);
1000                     goto out;
1001                 }
1002                 BUG_ON(start < p->buf);
1003             }
1004             p->start = start;
1005         } else {
1006             ret = fs_path_add_from_extent_buffer(p, eb, name_off,
1007                                  name_len);
1008             if (ret < 0)
1009                 goto out;
1010         }
1011 
1012         cur += elem_size + name_len;
1013         ret = iterate(num, dir, index, p, ctx);
1014         if (ret)
1015             goto out;
1016         num++;
1017     }
1018 
1019 out:
1020     btrfs_free_path(tmp_path);
1021     fs_path_free(p);
1022     return ret;
1023 }
1024 
1025 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
1026                   const char *name, int name_len,
1027                   const char *data, int data_len,
1028                   void *ctx);
1029 
1030 /*
1031  * Helper function to iterate the entries in ONE btrfs_dir_item.
1032  * The iterate callback may return a non zero value to stop iteration. This can
1033  * be a negative value for error codes or 1 to simply stop it.
1034  *
1035  * path must point to the dir item when called.
1036  */
1037 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1038                 iterate_dir_item_t iterate, void *ctx)
1039 {
1040     int ret = 0;
1041     struct extent_buffer *eb;
1042     struct btrfs_dir_item *di;
1043     struct btrfs_key di_key;
1044     char *buf = NULL;
1045     int buf_len;
1046     u32 name_len;
1047     u32 data_len;
1048     u32 cur;
1049     u32 len;
1050     u32 total;
1051     int slot;
1052     int num;
1053 
1054     /*
1055      * Start with a small buffer (1 page). If later we end up needing more
1056      * space, which can happen for xattrs on a fs with a leaf size greater
1057      * then the page size, attempt to increase the buffer. Typically xattr
1058      * values are small.
1059      */
1060     buf_len = PATH_MAX;
1061     buf = kmalloc(buf_len, GFP_KERNEL);
1062     if (!buf) {
1063         ret = -ENOMEM;
1064         goto out;
1065     }
1066 
1067     eb = path->nodes[0];
1068     slot = path->slots[0];
1069     di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1070     cur = 0;
1071     len = 0;
1072     total = btrfs_item_size(eb, slot);
1073 
1074     num = 0;
1075     while (cur < total) {
1076         name_len = btrfs_dir_name_len(eb, di);
1077         data_len = btrfs_dir_data_len(eb, di);
1078         btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1079 
1080         if (btrfs_dir_type(eb, di) == BTRFS_FT_XATTR) {
1081             if (name_len > XATTR_NAME_MAX) {
1082                 ret = -ENAMETOOLONG;
1083                 goto out;
1084             }
1085             if (name_len + data_len >
1086                     BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1087                 ret = -E2BIG;
1088                 goto out;
1089             }
1090         } else {
1091             /*
1092              * Path too long
1093              */
1094             if (name_len + data_len > PATH_MAX) {
1095                 ret = -ENAMETOOLONG;
1096                 goto out;
1097             }
1098         }
1099 
1100         if (name_len + data_len > buf_len) {
1101             buf_len = name_len + data_len;
1102             if (is_vmalloc_addr(buf)) {
1103                 vfree(buf);
1104                 buf = NULL;
1105             } else {
1106                 char *tmp = krealloc(buf, buf_len,
1107                         GFP_KERNEL | __GFP_NOWARN);
1108 
1109                 if (!tmp)
1110                     kfree(buf);
1111                 buf = tmp;
1112             }
1113             if (!buf) {
1114                 buf = kvmalloc(buf_len, GFP_KERNEL);
1115                 if (!buf) {
1116                     ret = -ENOMEM;
1117                     goto out;
1118                 }
1119             }
1120         }
1121 
1122         read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1123                 name_len + data_len);
1124 
1125         len = sizeof(*di) + name_len + data_len;
1126         di = (struct btrfs_dir_item *)((char *)di + len);
1127         cur += len;
1128 
1129         ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1130                   data_len, ctx);
1131         if (ret < 0)
1132             goto out;
1133         if (ret) {
1134             ret = 0;
1135             goto out;
1136         }
1137 
1138         num++;
1139     }
1140 
1141 out:
1142     kvfree(buf);
1143     return ret;
1144 }
1145 
1146 static int __copy_first_ref(int num, u64 dir, int index,
1147                 struct fs_path *p, void *ctx)
1148 {
1149     int ret;
1150     struct fs_path *pt = ctx;
1151 
1152     ret = fs_path_copy(pt, p);
1153     if (ret < 0)
1154         return ret;
1155 
1156     /* we want the first only */
1157     return 1;
1158 }
1159 
1160 /*
1161  * Retrieve the first path of an inode. If an inode has more then one
1162  * ref/hardlink, this is ignored.
1163  */
1164 static int get_inode_path(struct btrfs_root *root,
1165               u64 ino, struct fs_path *path)
1166 {
1167     int ret;
1168     struct btrfs_key key, found_key;
1169     struct btrfs_path *p;
1170 
1171     p = alloc_path_for_send();
1172     if (!p)
1173         return -ENOMEM;
1174 
1175     fs_path_reset(path);
1176 
1177     key.objectid = ino;
1178     key.type = BTRFS_INODE_REF_KEY;
1179     key.offset = 0;
1180 
1181     ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1182     if (ret < 0)
1183         goto out;
1184     if (ret) {
1185         ret = 1;
1186         goto out;
1187     }
1188     btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1189     if (found_key.objectid != ino ||
1190         (found_key.type != BTRFS_INODE_REF_KEY &&
1191          found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1192         ret = -ENOENT;
1193         goto out;
1194     }
1195 
1196     ret = iterate_inode_ref(root, p, &found_key, 1,
1197                 __copy_first_ref, path);
1198     if (ret < 0)
1199         goto out;
1200     ret = 0;
1201 
1202 out:
1203     btrfs_free_path(p);
1204     return ret;
1205 }
1206 
1207 struct backref_ctx {
1208     struct send_ctx *sctx;
1209 
1210     /* number of total found references */
1211     u64 found;
1212 
1213     /*
1214      * used for clones found in send_root. clones found behind cur_objectid
1215      * and cur_offset are not considered as allowed clones.
1216      */
1217     u64 cur_objectid;
1218     u64 cur_offset;
1219 
1220     /* may be truncated in case it's the last extent in a file */
1221     u64 extent_len;
1222 
1223     /* Just to check for bugs in backref resolving */
1224     int found_itself;
1225 };
1226 
1227 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1228 {
1229     u64 root = (u64)(uintptr_t)key;
1230     const struct clone_root *cr = elt;
1231 
1232     if (root < cr->root->root_key.objectid)
1233         return -1;
1234     if (root > cr->root->root_key.objectid)
1235         return 1;
1236     return 0;
1237 }
1238 
1239 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1240 {
1241     const struct clone_root *cr1 = e1;
1242     const struct clone_root *cr2 = e2;
1243 
1244     if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1245         return -1;
1246     if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1247         return 1;
1248     return 0;
1249 }
1250 
1251 /*
1252  * Called for every backref that is found for the current extent.
1253  * Results are collected in sctx->clone_roots->ino/offset/found_refs
1254  */
1255 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1256 {
1257     struct backref_ctx *bctx = ctx_;
1258     struct clone_root *found;
1259 
1260     /* First check if the root is in the list of accepted clone sources */
1261     found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1262             bctx->sctx->clone_roots_cnt,
1263             sizeof(struct clone_root),
1264             __clone_root_cmp_bsearch);
1265     if (!found)
1266         return 0;
1267 
1268     if (found->root == bctx->sctx->send_root &&
1269         ino == bctx->cur_objectid &&
1270         offset == bctx->cur_offset) {
1271         bctx->found_itself = 1;
1272     }
1273 
1274     /*
1275      * Make sure we don't consider clones from send_root that are
1276      * behind the current inode/offset.
1277      */
1278     if (found->root == bctx->sctx->send_root) {
1279         /*
1280          * If the source inode was not yet processed we can't issue a
1281          * clone operation, as the source extent does not exist yet at
1282          * the destination of the stream.
1283          */
1284         if (ino > bctx->cur_objectid)
1285             return 0;
1286         /*
1287          * We clone from the inode currently being sent as long as the
1288          * source extent is already processed, otherwise we could try
1289          * to clone from an extent that does not exist yet at the
1290          * destination of the stream.
1291          */
1292         if (ino == bctx->cur_objectid &&
1293             offset + bctx->extent_len >
1294             bctx->sctx->cur_inode_next_write_offset)
1295             return 0;
1296     }
1297 
1298     bctx->found++;
1299     found->found_refs++;
1300     if (ino < found->ino) {
1301         found->ino = ino;
1302         found->offset = offset;
1303     } else if (found->ino == ino) {
1304         /*
1305          * same extent found more then once in the same file.
1306          */
1307         if (found->offset > offset + bctx->extent_len)
1308             found->offset = offset;
1309     }
1310 
1311     return 0;
1312 }
1313 
1314 /*
1315  * Given an inode, offset and extent item, it finds a good clone for a clone
1316  * instruction. Returns -ENOENT when none could be found. The function makes
1317  * sure that the returned clone is usable at the point where sending is at the
1318  * moment. This means, that no clones are accepted which lie behind the current
1319  * inode+offset.
1320  *
1321  * path must point to the extent item when called.
1322  */
1323 static int find_extent_clone(struct send_ctx *sctx,
1324                  struct btrfs_path *path,
1325                  u64 ino, u64 data_offset,
1326                  u64 ino_size,
1327                  struct clone_root **found)
1328 {
1329     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1330     int ret;
1331     int extent_type;
1332     u64 logical;
1333     u64 disk_byte;
1334     u64 num_bytes;
1335     u64 extent_item_pos;
1336     u64 flags = 0;
1337     struct btrfs_file_extent_item *fi;
1338     struct extent_buffer *eb = path->nodes[0];
1339     struct backref_ctx backref_ctx = {0};
1340     struct clone_root *cur_clone_root;
1341     struct btrfs_key found_key;
1342     struct btrfs_path *tmp_path;
1343     struct btrfs_extent_item *ei;
1344     int compressed;
1345     u32 i;
1346 
1347     tmp_path = alloc_path_for_send();
1348     if (!tmp_path)
1349         return -ENOMEM;
1350 
1351     /* We only use this path under the commit sem */
1352     tmp_path->need_commit_sem = 0;
1353 
1354     if (data_offset >= ino_size) {
1355         /*
1356          * There may be extents that lie behind the file's size.
1357          * I at least had this in combination with snapshotting while
1358          * writing large files.
1359          */
1360         ret = 0;
1361         goto out;
1362     }
1363 
1364     fi = btrfs_item_ptr(eb, path->slots[0],
1365             struct btrfs_file_extent_item);
1366     extent_type = btrfs_file_extent_type(eb, fi);
1367     if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1368         ret = -ENOENT;
1369         goto out;
1370     }
1371     compressed = btrfs_file_extent_compression(eb, fi);
1372 
1373     num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1374     disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1375     if (disk_byte == 0) {
1376         ret = -ENOENT;
1377         goto out;
1378     }
1379     logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1380 
1381     down_read(&fs_info->commit_root_sem);
1382     ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1383                   &found_key, &flags);
1384     up_read(&fs_info->commit_root_sem);
1385 
1386     if (ret < 0)
1387         goto out;
1388     if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1389         ret = -EIO;
1390         goto out;
1391     }
1392 
1393     ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1394                 struct btrfs_extent_item);
1395     /*
1396      * Backreference walking (iterate_extent_inodes() below) is currently
1397      * too expensive when an extent has a large number of references, both
1398      * in time spent and used memory. So for now just fallback to write
1399      * operations instead of clone operations when an extent has more than
1400      * a certain amount of references.
1401      */
1402     if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1403         ret = -ENOENT;
1404         goto out;
1405     }
1406     btrfs_release_path(tmp_path);
1407 
1408     /*
1409      * Setup the clone roots.
1410      */
1411     for (i = 0; i < sctx->clone_roots_cnt; i++) {
1412         cur_clone_root = sctx->clone_roots + i;
1413         cur_clone_root->ino = (u64)-1;
1414         cur_clone_root->offset = 0;
1415         cur_clone_root->found_refs = 0;
1416     }
1417 
1418     backref_ctx.sctx = sctx;
1419     backref_ctx.found = 0;
1420     backref_ctx.cur_objectid = ino;
1421     backref_ctx.cur_offset = data_offset;
1422     backref_ctx.found_itself = 0;
1423     backref_ctx.extent_len = num_bytes;
1424 
1425     /*
1426      * The last extent of a file may be too large due to page alignment.
1427      * We need to adjust extent_len in this case so that the checks in
1428      * __iterate_backrefs work.
1429      */
1430     if (data_offset + num_bytes >= ino_size)
1431         backref_ctx.extent_len = ino_size - data_offset;
1432 
1433     /*
1434      * Now collect all backrefs.
1435      */
1436     if (compressed == BTRFS_COMPRESS_NONE)
1437         extent_item_pos = logical - found_key.objectid;
1438     else
1439         extent_item_pos = 0;
1440     ret = iterate_extent_inodes(fs_info, found_key.objectid,
1441                     extent_item_pos, 1, __iterate_backrefs,
1442                     &backref_ctx, false);
1443 
1444     if (ret < 0)
1445         goto out;
1446 
1447     down_read(&fs_info->commit_root_sem);
1448     if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
1449         /*
1450          * A transaction commit for a transaction in which block group
1451          * relocation was done just happened.
1452          * The disk_bytenr of the file extent item we processed is
1453          * possibly stale, referring to the extent's location before
1454          * relocation. So act as if we haven't found any clone sources
1455          * and fallback to write commands, which will read the correct
1456          * data from the new extent location. Otherwise we will fail
1457          * below because we haven't found our own back reference or we
1458          * could be getting incorrect sources in case the old extent
1459          * was already reallocated after the relocation.
1460          */
1461         up_read(&fs_info->commit_root_sem);
1462         ret = -ENOENT;
1463         goto out;
1464     }
1465     up_read(&fs_info->commit_root_sem);
1466 
1467     if (!backref_ctx.found_itself) {
1468         /* found a bug in backref code? */
1469         ret = -EIO;
1470         btrfs_err(fs_info,
1471               "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1472               ino, data_offset, disk_byte, found_key.objectid);
1473         goto out;
1474     }
1475 
1476     btrfs_debug(fs_info,
1477             "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1478             data_offset, ino, num_bytes, logical);
1479 
1480     if (!backref_ctx.found)
1481         btrfs_debug(fs_info, "no clones found");
1482 
1483     cur_clone_root = NULL;
1484     for (i = 0; i < sctx->clone_roots_cnt; i++) {
1485         if (sctx->clone_roots[i].found_refs) {
1486             if (!cur_clone_root)
1487                 cur_clone_root = sctx->clone_roots + i;
1488             else if (sctx->clone_roots[i].root == sctx->send_root)
1489                 /* prefer clones from send_root over others */
1490                 cur_clone_root = sctx->clone_roots + i;
1491         }
1492 
1493     }
1494 
1495     if (cur_clone_root) {
1496         *found = cur_clone_root;
1497         ret = 0;
1498     } else {
1499         ret = -ENOENT;
1500     }
1501 
1502 out:
1503     btrfs_free_path(tmp_path);
1504     return ret;
1505 }
1506 
1507 static int read_symlink(struct btrfs_root *root,
1508             u64 ino,
1509             struct fs_path *dest)
1510 {
1511     int ret;
1512     struct btrfs_path *path;
1513     struct btrfs_key key;
1514     struct btrfs_file_extent_item *ei;
1515     u8 type;
1516     u8 compression;
1517     unsigned long off;
1518     int len;
1519 
1520     path = alloc_path_for_send();
1521     if (!path)
1522         return -ENOMEM;
1523 
1524     key.objectid = ino;
1525     key.type = BTRFS_EXTENT_DATA_KEY;
1526     key.offset = 0;
1527     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1528     if (ret < 0)
1529         goto out;
1530     if (ret) {
1531         /*
1532          * An empty symlink inode. Can happen in rare error paths when
1533          * creating a symlink (transaction committed before the inode
1534          * eviction handler removed the symlink inode items and a crash
1535          * happened in between or the subvol was snapshoted in between).
1536          * Print an informative message to dmesg/syslog so that the user
1537          * can delete the symlink.
1538          */
1539         btrfs_err(root->fs_info,
1540               "Found empty symlink inode %llu at root %llu",
1541               ino, root->root_key.objectid);
1542         ret = -EIO;
1543         goto out;
1544     }
1545 
1546     ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1547             struct btrfs_file_extent_item);
1548     type = btrfs_file_extent_type(path->nodes[0], ei);
1549     compression = btrfs_file_extent_compression(path->nodes[0], ei);
1550     BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1551     BUG_ON(compression);
1552 
1553     off = btrfs_file_extent_inline_start(ei);
1554     len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1555 
1556     ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1557 
1558 out:
1559     btrfs_free_path(path);
1560     return ret;
1561 }
1562 
1563 /*
1564  * Helper function to generate a file name that is unique in the root of
1565  * send_root and parent_root. This is used to generate names for orphan inodes.
1566  */
1567 static int gen_unique_name(struct send_ctx *sctx,
1568                u64 ino, u64 gen,
1569                struct fs_path *dest)
1570 {
1571     int ret = 0;
1572     struct btrfs_path *path;
1573     struct btrfs_dir_item *di;
1574     char tmp[64];
1575     int len;
1576     u64 idx = 0;
1577 
1578     path = alloc_path_for_send();
1579     if (!path)
1580         return -ENOMEM;
1581 
1582     while (1) {
1583         len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1584                 ino, gen, idx);
1585         ASSERT(len < sizeof(tmp));
1586 
1587         di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1588                 path, BTRFS_FIRST_FREE_OBJECTID,
1589                 tmp, strlen(tmp), 0);
1590         btrfs_release_path(path);
1591         if (IS_ERR(di)) {
1592             ret = PTR_ERR(di);
1593             goto out;
1594         }
1595         if (di) {
1596             /* not unique, try again */
1597             idx++;
1598             continue;
1599         }
1600 
1601         if (!sctx->parent_root) {
1602             /* unique */
1603             ret = 0;
1604             break;
1605         }
1606 
1607         di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1608                 path, BTRFS_FIRST_FREE_OBJECTID,
1609                 tmp, strlen(tmp), 0);
1610         btrfs_release_path(path);
1611         if (IS_ERR(di)) {
1612             ret = PTR_ERR(di);
1613             goto out;
1614         }
1615         if (di) {
1616             /* not unique, try again */
1617             idx++;
1618             continue;
1619         }
1620         /* unique */
1621         break;
1622     }
1623 
1624     ret = fs_path_add(dest, tmp, strlen(tmp));
1625 
1626 out:
1627     btrfs_free_path(path);
1628     return ret;
1629 }
1630 
1631 enum inode_state {
1632     inode_state_no_change,
1633     inode_state_will_create,
1634     inode_state_did_create,
1635     inode_state_will_delete,
1636     inode_state_did_delete,
1637 };
1638 
1639 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1640 {
1641     int ret;
1642     int left_ret;
1643     int right_ret;
1644     u64 left_gen;
1645     u64 right_gen;
1646 
1647     ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1648             NULL, NULL, NULL);
1649     if (ret < 0 && ret != -ENOENT)
1650         goto out;
1651     left_ret = ret;
1652 
1653     if (!sctx->parent_root) {
1654         right_ret = -ENOENT;
1655     } else {
1656         ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1657                 NULL, NULL, NULL, NULL, NULL);
1658         if (ret < 0 && ret != -ENOENT)
1659             goto out;
1660         right_ret = ret;
1661     }
1662 
1663     if (!left_ret && !right_ret) {
1664         if (left_gen == gen && right_gen == gen) {
1665             ret = inode_state_no_change;
1666         } else if (left_gen == gen) {
1667             if (ino < sctx->send_progress)
1668                 ret = inode_state_did_create;
1669             else
1670                 ret = inode_state_will_create;
1671         } else if (right_gen == gen) {
1672             if (ino < sctx->send_progress)
1673                 ret = inode_state_did_delete;
1674             else
1675                 ret = inode_state_will_delete;
1676         } else  {
1677             ret = -ENOENT;
1678         }
1679     } else if (!left_ret) {
1680         if (left_gen == gen) {
1681             if (ino < sctx->send_progress)
1682                 ret = inode_state_did_create;
1683             else
1684                 ret = inode_state_will_create;
1685         } else {
1686             ret = -ENOENT;
1687         }
1688     } else if (!right_ret) {
1689         if (right_gen == gen) {
1690             if (ino < sctx->send_progress)
1691                 ret = inode_state_did_delete;
1692             else
1693                 ret = inode_state_will_delete;
1694         } else {
1695             ret = -ENOENT;
1696         }
1697     } else {
1698         ret = -ENOENT;
1699     }
1700 
1701 out:
1702     return ret;
1703 }
1704 
1705 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1706 {
1707     int ret;
1708 
1709     if (ino == BTRFS_FIRST_FREE_OBJECTID)
1710         return 1;
1711 
1712     ret = get_cur_inode_state(sctx, ino, gen);
1713     if (ret < 0)
1714         goto out;
1715 
1716     if (ret == inode_state_no_change ||
1717         ret == inode_state_did_create ||
1718         ret == inode_state_will_delete)
1719         ret = 1;
1720     else
1721         ret = 0;
1722 
1723 out:
1724     return ret;
1725 }
1726 
1727 /*
1728  * Helper function to lookup a dir item in a dir.
1729  */
1730 static int lookup_dir_item_inode(struct btrfs_root *root,
1731                  u64 dir, const char *name, int name_len,
1732                  u64 *found_inode)
1733 {
1734     int ret = 0;
1735     struct btrfs_dir_item *di;
1736     struct btrfs_key key;
1737     struct btrfs_path *path;
1738 
1739     path = alloc_path_for_send();
1740     if (!path)
1741         return -ENOMEM;
1742 
1743     di = btrfs_lookup_dir_item(NULL, root, path,
1744             dir, name, name_len, 0);
1745     if (IS_ERR_OR_NULL(di)) {
1746         ret = di ? PTR_ERR(di) : -ENOENT;
1747         goto out;
1748     }
1749     btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1750     if (key.type == BTRFS_ROOT_ITEM_KEY) {
1751         ret = -ENOENT;
1752         goto out;
1753     }
1754     *found_inode = key.objectid;
1755 
1756 out:
1757     btrfs_free_path(path);
1758     return ret;
1759 }
1760 
1761 /*
1762  * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1763  * generation of the parent dir and the name of the dir entry.
1764  */
1765 static int get_first_ref(struct btrfs_root *root, u64 ino,
1766              u64 *dir, u64 *dir_gen, struct fs_path *name)
1767 {
1768     int ret;
1769     struct btrfs_key key;
1770     struct btrfs_key found_key;
1771     struct btrfs_path *path;
1772     int len;
1773     u64 parent_dir;
1774 
1775     path = alloc_path_for_send();
1776     if (!path)
1777         return -ENOMEM;
1778 
1779     key.objectid = ino;
1780     key.type = BTRFS_INODE_REF_KEY;
1781     key.offset = 0;
1782 
1783     ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1784     if (ret < 0)
1785         goto out;
1786     if (!ret)
1787         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1788                 path->slots[0]);
1789     if (ret || found_key.objectid != ino ||
1790         (found_key.type != BTRFS_INODE_REF_KEY &&
1791          found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1792         ret = -ENOENT;
1793         goto out;
1794     }
1795 
1796     if (found_key.type == BTRFS_INODE_REF_KEY) {
1797         struct btrfs_inode_ref *iref;
1798         iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1799                       struct btrfs_inode_ref);
1800         len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1801         ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1802                              (unsigned long)(iref + 1),
1803                              len);
1804         parent_dir = found_key.offset;
1805     } else {
1806         struct btrfs_inode_extref *extref;
1807         extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1808                     struct btrfs_inode_extref);
1809         len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1810         ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1811                     (unsigned long)&extref->name, len);
1812         parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1813     }
1814     if (ret < 0)
1815         goto out;
1816     btrfs_release_path(path);
1817 
1818     if (dir_gen) {
1819         ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1820                      NULL, NULL, NULL, NULL);
1821         if (ret < 0)
1822             goto out;
1823     }
1824 
1825     *dir = parent_dir;
1826 
1827 out:
1828     btrfs_free_path(path);
1829     return ret;
1830 }
1831 
1832 static int is_first_ref(struct btrfs_root *root,
1833             u64 ino, u64 dir,
1834             const char *name, int name_len)
1835 {
1836     int ret;
1837     struct fs_path *tmp_name;
1838     u64 tmp_dir;
1839 
1840     tmp_name = fs_path_alloc();
1841     if (!tmp_name)
1842         return -ENOMEM;
1843 
1844     ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1845     if (ret < 0)
1846         goto out;
1847 
1848     if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1849         ret = 0;
1850         goto out;
1851     }
1852 
1853     ret = !memcmp(tmp_name->start, name, name_len);
1854 
1855 out:
1856     fs_path_free(tmp_name);
1857     return ret;
1858 }
1859 
1860 /*
1861  * Used by process_recorded_refs to determine if a new ref would overwrite an
1862  * already existing ref. In case it detects an overwrite, it returns the
1863  * inode/gen in who_ino/who_gen.
1864  * When an overwrite is detected, process_recorded_refs does proper orphanizing
1865  * to make sure later references to the overwritten inode are possible.
1866  * Orphanizing is however only required for the first ref of an inode.
1867  * process_recorded_refs does an additional is_first_ref check to see if
1868  * orphanizing is really required.
1869  */
1870 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1871                   const char *name, int name_len,
1872                   u64 *who_ino, u64 *who_gen, u64 *who_mode)
1873 {
1874     int ret = 0;
1875     u64 gen;
1876     u64 other_inode = 0;
1877 
1878     if (!sctx->parent_root)
1879         goto out;
1880 
1881     ret = is_inode_existent(sctx, dir, dir_gen);
1882     if (ret <= 0)
1883         goto out;
1884 
1885     /*
1886      * If we have a parent root we need to verify that the parent dir was
1887      * not deleted and then re-created, if it was then we have no overwrite
1888      * and we can just unlink this entry.
1889      */
1890     if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1891         ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1892                      NULL, NULL, NULL, NULL);
1893         if (ret < 0 && ret != -ENOENT)
1894             goto out;
1895         if (ret) {
1896             ret = 0;
1897             goto out;
1898         }
1899         if (gen != dir_gen)
1900             goto out;
1901     }
1902 
1903     ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1904                     &other_inode);
1905     if (ret < 0 && ret != -ENOENT)
1906         goto out;
1907     if (ret) {
1908         ret = 0;
1909         goto out;
1910     }
1911 
1912     /*
1913      * Check if the overwritten ref was already processed. If yes, the ref
1914      * was already unlinked/moved, so we can safely assume that we will not
1915      * overwrite anything at this point in time.
1916      */
1917     if (other_inode > sctx->send_progress ||
1918         is_waiting_for_move(sctx, other_inode)) {
1919         ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1920                 who_gen, who_mode, NULL, NULL, NULL, NULL);
1921         if (ret < 0)
1922             goto out;
1923 
1924         ret = 1;
1925         *who_ino = other_inode;
1926     } else {
1927         ret = 0;
1928     }
1929 
1930 out:
1931     return ret;
1932 }
1933 
1934 /*
1935  * Checks if the ref was overwritten by an already processed inode. This is
1936  * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1937  * thus the orphan name needs be used.
1938  * process_recorded_refs also uses it to avoid unlinking of refs that were
1939  * overwritten.
1940  */
1941 static int did_overwrite_ref(struct send_ctx *sctx,
1942                 u64 dir, u64 dir_gen,
1943                 u64 ino, u64 ino_gen,
1944                 const char *name, int name_len)
1945 {
1946     int ret = 0;
1947     u64 gen;
1948     u64 ow_inode;
1949 
1950     if (!sctx->parent_root)
1951         goto out;
1952 
1953     ret = is_inode_existent(sctx, dir, dir_gen);
1954     if (ret <= 0)
1955         goto out;
1956 
1957     if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1958         ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1959                      NULL, NULL, NULL, NULL);
1960         if (ret < 0 && ret != -ENOENT)
1961             goto out;
1962         if (ret) {
1963             ret = 0;
1964             goto out;
1965         }
1966         if (gen != dir_gen)
1967             goto out;
1968     }
1969 
1970     /* check if the ref was overwritten by another ref */
1971     ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1972                     &ow_inode);
1973     if (ret < 0 && ret != -ENOENT)
1974         goto out;
1975     if (ret) {
1976         /* was never and will never be overwritten */
1977         ret = 0;
1978         goto out;
1979     }
1980 
1981     ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1982             NULL, NULL, NULL);
1983     if (ret < 0)
1984         goto out;
1985 
1986     if (ow_inode == ino && gen == ino_gen) {
1987         ret = 0;
1988         goto out;
1989     }
1990 
1991     /*
1992      * We know that it is or will be overwritten. Check this now.
1993      * The current inode being processed might have been the one that caused
1994      * inode 'ino' to be orphanized, therefore check if ow_inode matches
1995      * the current inode being processed.
1996      */
1997     if ((ow_inode < sctx->send_progress) ||
1998         (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1999          gen == sctx->cur_inode_gen))
2000         ret = 1;
2001     else
2002         ret = 0;
2003 
2004 out:
2005     return ret;
2006 }
2007 
2008 /*
2009  * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
2010  * that got overwritten. This is used by process_recorded_refs to determine
2011  * if it has to use the path as returned by get_cur_path or the orphan name.
2012  */
2013 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
2014 {
2015     int ret = 0;
2016     struct fs_path *name = NULL;
2017     u64 dir;
2018     u64 dir_gen;
2019 
2020     if (!sctx->parent_root)
2021         goto out;
2022 
2023     name = fs_path_alloc();
2024     if (!name)
2025         return -ENOMEM;
2026 
2027     ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2028     if (ret < 0)
2029         goto out;
2030 
2031     ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2032             name->start, fs_path_len(name));
2033 
2034 out:
2035     fs_path_free(name);
2036     return ret;
2037 }
2038 
2039 /*
2040  * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2041  * so we need to do some special handling in case we have clashes. This function
2042  * takes care of this with the help of name_cache_entry::radix_list.
2043  * In case of error, nce is kfreed.
2044  */
2045 static int name_cache_insert(struct send_ctx *sctx,
2046                  struct name_cache_entry *nce)
2047 {
2048     int ret = 0;
2049     struct list_head *nce_head;
2050 
2051     nce_head = radix_tree_lookup(&sctx->name_cache,
2052             (unsigned long)nce->ino);
2053     if (!nce_head) {
2054         nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2055         if (!nce_head) {
2056             kfree(nce);
2057             return -ENOMEM;
2058         }
2059         INIT_LIST_HEAD(nce_head);
2060 
2061         ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2062         if (ret < 0) {
2063             kfree(nce_head);
2064             kfree(nce);
2065             return ret;
2066         }
2067     }
2068     list_add_tail(&nce->radix_list, nce_head);
2069     list_add_tail(&nce->list, &sctx->name_cache_list);
2070     sctx->name_cache_size++;
2071 
2072     return ret;
2073 }
2074 
2075 static void name_cache_delete(struct send_ctx *sctx,
2076                   struct name_cache_entry *nce)
2077 {
2078     struct list_head *nce_head;
2079 
2080     nce_head = radix_tree_lookup(&sctx->name_cache,
2081             (unsigned long)nce->ino);
2082     if (!nce_head) {
2083         btrfs_err(sctx->send_root->fs_info,
2084           "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2085             nce->ino, sctx->name_cache_size);
2086     }
2087 
2088     list_del(&nce->radix_list);
2089     list_del(&nce->list);
2090     sctx->name_cache_size--;
2091 
2092     /*
2093      * We may not get to the final release of nce_head if the lookup fails
2094      */
2095     if (nce_head && list_empty(nce_head)) {
2096         radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2097         kfree(nce_head);
2098     }
2099 }
2100 
2101 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2102                             u64 ino, u64 gen)
2103 {
2104     struct list_head *nce_head;
2105     struct name_cache_entry *cur;
2106 
2107     nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2108     if (!nce_head)
2109         return NULL;
2110 
2111     list_for_each_entry(cur, nce_head, radix_list) {
2112         if (cur->ino == ino && cur->gen == gen)
2113             return cur;
2114     }
2115     return NULL;
2116 }
2117 
2118 /*
2119  * Remove some entries from the beginning of name_cache_list.
2120  */
2121 static void name_cache_clean_unused(struct send_ctx *sctx)
2122 {
2123     struct name_cache_entry *nce;
2124 
2125     if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2126         return;
2127 
2128     while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2129         nce = list_entry(sctx->name_cache_list.next,
2130                 struct name_cache_entry, list);
2131         name_cache_delete(sctx, nce);
2132         kfree(nce);
2133     }
2134 }
2135 
2136 static void name_cache_free(struct send_ctx *sctx)
2137 {
2138     struct name_cache_entry *nce;
2139 
2140     while (!list_empty(&sctx->name_cache_list)) {
2141         nce = list_entry(sctx->name_cache_list.next,
2142                 struct name_cache_entry, list);
2143         name_cache_delete(sctx, nce);
2144         kfree(nce);
2145     }
2146 }
2147 
2148 /*
2149  * Used by get_cur_path for each ref up to the root.
2150  * Returns 0 if it succeeded.
2151  * Returns 1 if the inode is not existent or got overwritten. In that case, the
2152  * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2153  * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2154  * Returns <0 in case of error.
2155  */
2156 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2157                      u64 ino, u64 gen,
2158                      u64 *parent_ino,
2159                      u64 *parent_gen,
2160                      struct fs_path *dest)
2161 {
2162     int ret;
2163     int nce_ret;
2164     struct name_cache_entry *nce = NULL;
2165 
2166     /*
2167      * First check if we already did a call to this function with the same
2168      * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2169      * return the cached result.
2170      */
2171     nce = name_cache_search(sctx, ino, gen);
2172     if (nce) {
2173         if (ino < sctx->send_progress && nce->need_later_update) {
2174             name_cache_delete(sctx, nce);
2175             kfree(nce);
2176             nce = NULL;
2177         } else {
2178             /*
2179              * Removes the entry from the list and adds it back to
2180              * the end.  This marks the entry as recently used so
2181              * that name_cache_clean_unused does not remove it.
2182              */
2183             list_move_tail(&nce->list, &sctx->name_cache_list);
2184 
2185             *parent_ino = nce->parent_ino;
2186             *parent_gen = nce->parent_gen;
2187             ret = fs_path_add(dest, nce->name, nce->name_len);
2188             if (ret < 0)
2189                 goto out;
2190             ret = nce->ret;
2191             goto out;
2192         }
2193     }
2194 
2195     /*
2196      * If the inode is not existent yet, add the orphan name and return 1.
2197      * This should only happen for the parent dir that we determine in
2198      * record_new_ref_if_needed().
2199      */
2200     ret = is_inode_existent(sctx, ino, gen);
2201     if (ret < 0)
2202         goto out;
2203 
2204     if (!ret) {
2205         ret = gen_unique_name(sctx, ino, gen, dest);
2206         if (ret < 0)
2207             goto out;
2208         ret = 1;
2209         goto out_cache;
2210     }
2211 
2212     /*
2213      * Depending on whether the inode was already processed or not, use
2214      * send_root or parent_root for ref lookup.
2215      */
2216     if (ino < sctx->send_progress)
2217         ret = get_first_ref(sctx->send_root, ino,
2218                     parent_ino, parent_gen, dest);
2219     else
2220         ret = get_first_ref(sctx->parent_root, ino,
2221                     parent_ino, parent_gen, dest);
2222     if (ret < 0)
2223         goto out;
2224 
2225     /*
2226      * Check if the ref was overwritten by an inode's ref that was processed
2227      * earlier. If yes, treat as orphan and return 1.
2228      */
2229     ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2230             dest->start, dest->end - dest->start);
2231     if (ret < 0)
2232         goto out;
2233     if (ret) {
2234         fs_path_reset(dest);
2235         ret = gen_unique_name(sctx, ino, gen, dest);
2236         if (ret < 0)
2237             goto out;
2238         ret = 1;
2239     }
2240 
2241 out_cache:
2242     /*
2243      * Store the result of the lookup in the name cache.
2244      */
2245     nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2246     if (!nce) {
2247         ret = -ENOMEM;
2248         goto out;
2249     }
2250 
2251     nce->ino = ino;
2252     nce->gen = gen;
2253     nce->parent_ino = *parent_ino;
2254     nce->parent_gen = *parent_gen;
2255     nce->name_len = fs_path_len(dest);
2256     nce->ret = ret;
2257     strcpy(nce->name, dest->start);
2258 
2259     if (ino < sctx->send_progress)
2260         nce->need_later_update = 0;
2261     else
2262         nce->need_later_update = 1;
2263 
2264     nce_ret = name_cache_insert(sctx, nce);
2265     if (nce_ret < 0)
2266         ret = nce_ret;
2267     name_cache_clean_unused(sctx);
2268 
2269 out:
2270     return ret;
2271 }
2272 
2273 /*
2274  * Magic happens here. This function returns the first ref to an inode as it
2275  * would look like while receiving the stream at this point in time.
2276  * We walk the path up to the root. For every inode in between, we check if it
2277  * was already processed/sent. If yes, we continue with the parent as found
2278  * in send_root. If not, we continue with the parent as found in parent_root.
2279  * If we encounter an inode that was deleted at this point in time, we use the
2280  * inodes "orphan" name instead of the real name and stop. Same with new inodes
2281  * that were not created yet and overwritten inodes/refs.
2282  *
2283  * When do we have orphan inodes:
2284  * 1. When an inode is freshly created and thus no valid refs are available yet
2285  * 2. When a directory lost all it's refs (deleted) but still has dir items
2286  *    inside which were not processed yet (pending for move/delete). If anyone
2287  *    tried to get the path to the dir items, it would get a path inside that
2288  *    orphan directory.
2289  * 3. When an inode is moved around or gets new links, it may overwrite the ref
2290  *    of an unprocessed inode. If in that case the first ref would be
2291  *    overwritten, the overwritten inode gets "orphanized". Later when we
2292  *    process this overwritten inode, it is restored at a new place by moving
2293  *    the orphan inode.
2294  *
2295  * sctx->send_progress tells this function at which point in time receiving
2296  * would be.
2297  */
2298 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2299             struct fs_path *dest)
2300 {
2301     int ret = 0;
2302     struct fs_path *name = NULL;
2303     u64 parent_inode = 0;
2304     u64 parent_gen = 0;
2305     int stop = 0;
2306 
2307     name = fs_path_alloc();
2308     if (!name) {
2309         ret = -ENOMEM;
2310         goto out;
2311     }
2312 
2313     dest->reversed = 1;
2314     fs_path_reset(dest);
2315 
2316     while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2317         struct waiting_dir_move *wdm;
2318 
2319         fs_path_reset(name);
2320 
2321         if (is_waiting_for_rm(sctx, ino, gen)) {
2322             ret = gen_unique_name(sctx, ino, gen, name);
2323             if (ret < 0)
2324                 goto out;
2325             ret = fs_path_add_path(dest, name);
2326             break;
2327         }
2328 
2329         wdm = get_waiting_dir_move(sctx, ino);
2330         if (wdm && wdm->orphanized) {
2331             ret = gen_unique_name(sctx, ino, gen, name);
2332             stop = 1;
2333         } else if (wdm) {
2334             ret = get_first_ref(sctx->parent_root, ino,
2335                         &parent_inode, &parent_gen, name);
2336         } else {
2337             ret = __get_cur_name_and_parent(sctx, ino, gen,
2338                             &parent_inode,
2339                             &parent_gen, name);
2340             if (ret)
2341                 stop = 1;
2342         }
2343 
2344         if (ret < 0)
2345             goto out;
2346 
2347         ret = fs_path_add_path(dest, name);
2348         if (ret < 0)
2349             goto out;
2350 
2351         ino = parent_inode;
2352         gen = parent_gen;
2353     }
2354 
2355 out:
2356     fs_path_free(name);
2357     if (!ret)
2358         fs_path_unreverse(dest);
2359     return ret;
2360 }
2361 
2362 /*
2363  * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2364  */
2365 static int send_subvol_begin(struct send_ctx *sctx)
2366 {
2367     int ret;
2368     struct btrfs_root *send_root = sctx->send_root;
2369     struct btrfs_root *parent_root = sctx->parent_root;
2370     struct btrfs_path *path;
2371     struct btrfs_key key;
2372     struct btrfs_root_ref *ref;
2373     struct extent_buffer *leaf;
2374     char *name = NULL;
2375     int namelen;
2376 
2377     path = btrfs_alloc_path();
2378     if (!path)
2379         return -ENOMEM;
2380 
2381     name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2382     if (!name) {
2383         btrfs_free_path(path);
2384         return -ENOMEM;
2385     }
2386 
2387     key.objectid = send_root->root_key.objectid;
2388     key.type = BTRFS_ROOT_BACKREF_KEY;
2389     key.offset = 0;
2390 
2391     ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2392                 &key, path, 1, 0);
2393     if (ret < 0)
2394         goto out;
2395     if (ret) {
2396         ret = -ENOENT;
2397         goto out;
2398     }
2399 
2400     leaf = path->nodes[0];
2401     btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2402     if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2403         key.objectid != send_root->root_key.objectid) {
2404         ret = -ENOENT;
2405         goto out;
2406     }
2407     ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2408     namelen = btrfs_root_ref_name_len(leaf, ref);
2409     read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2410     btrfs_release_path(path);
2411 
2412     if (parent_root) {
2413         ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2414         if (ret < 0)
2415             goto out;
2416     } else {
2417         ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2418         if (ret < 0)
2419             goto out;
2420     }
2421 
2422     TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2423 
2424     if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2425         TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2426                 sctx->send_root->root_item.received_uuid);
2427     else
2428         TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2429                 sctx->send_root->root_item.uuid);
2430 
2431     TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2432             btrfs_root_ctransid(&sctx->send_root->root_item));
2433     if (parent_root) {
2434         if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2435             TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2436                      parent_root->root_item.received_uuid);
2437         else
2438             TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2439                      parent_root->root_item.uuid);
2440         TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2441                 btrfs_root_ctransid(&sctx->parent_root->root_item));
2442     }
2443 
2444     ret = send_cmd(sctx);
2445 
2446 tlv_put_failure:
2447 out:
2448     btrfs_free_path(path);
2449     kfree(name);
2450     return ret;
2451 }
2452 
2453 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2454 {
2455     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2456     int ret = 0;
2457     struct fs_path *p;
2458 
2459     btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2460 
2461     p = fs_path_alloc();
2462     if (!p)
2463         return -ENOMEM;
2464 
2465     ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2466     if (ret < 0)
2467         goto out;
2468 
2469     ret = get_cur_path(sctx, ino, gen, p);
2470     if (ret < 0)
2471         goto out;
2472     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2473     TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2474 
2475     ret = send_cmd(sctx);
2476 
2477 tlv_put_failure:
2478 out:
2479     fs_path_free(p);
2480     return ret;
2481 }
2482 
2483 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2484 {
2485     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2486     int ret = 0;
2487     struct fs_path *p;
2488 
2489     btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2490 
2491     p = fs_path_alloc();
2492     if (!p)
2493         return -ENOMEM;
2494 
2495     ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2496     if (ret < 0)
2497         goto out;
2498 
2499     ret = get_cur_path(sctx, ino, gen, p);
2500     if (ret < 0)
2501         goto out;
2502     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2503     TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2504 
2505     ret = send_cmd(sctx);
2506 
2507 tlv_put_failure:
2508 out:
2509     fs_path_free(p);
2510     return ret;
2511 }
2512 
2513 static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr)
2514 {
2515     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2516     int ret = 0;
2517     struct fs_path *p;
2518 
2519     if (sctx->proto < 2)
2520         return 0;
2521 
2522     btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr);
2523 
2524     p = fs_path_alloc();
2525     if (!p)
2526         return -ENOMEM;
2527 
2528     ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR);
2529     if (ret < 0)
2530         goto out;
2531 
2532     ret = get_cur_path(sctx, ino, gen, p);
2533     if (ret < 0)
2534         goto out;
2535     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2536     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr);
2537 
2538     ret = send_cmd(sctx);
2539 
2540 tlv_put_failure:
2541 out:
2542     fs_path_free(p);
2543     return ret;
2544 }
2545 
2546 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2547 {
2548     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2549     int ret = 0;
2550     struct fs_path *p;
2551 
2552     btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2553             ino, uid, gid);
2554 
2555     p = fs_path_alloc();
2556     if (!p)
2557         return -ENOMEM;
2558 
2559     ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2560     if (ret < 0)
2561         goto out;
2562 
2563     ret = get_cur_path(sctx, ino, gen, p);
2564     if (ret < 0)
2565         goto out;
2566     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2567     TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2568     TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2569 
2570     ret = send_cmd(sctx);
2571 
2572 tlv_put_failure:
2573 out:
2574     fs_path_free(p);
2575     return ret;
2576 }
2577 
2578 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2579 {
2580     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2581     int ret = 0;
2582     struct fs_path *p = NULL;
2583     struct btrfs_inode_item *ii;
2584     struct btrfs_path *path = NULL;
2585     struct extent_buffer *eb;
2586     struct btrfs_key key;
2587     int slot;
2588 
2589     btrfs_debug(fs_info, "send_utimes %llu", ino);
2590 
2591     p = fs_path_alloc();
2592     if (!p)
2593         return -ENOMEM;
2594 
2595     path = alloc_path_for_send();
2596     if (!path) {
2597         ret = -ENOMEM;
2598         goto out;
2599     }
2600 
2601     key.objectid = ino;
2602     key.type = BTRFS_INODE_ITEM_KEY;
2603     key.offset = 0;
2604     ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2605     if (ret > 0)
2606         ret = -ENOENT;
2607     if (ret < 0)
2608         goto out;
2609 
2610     eb = path->nodes[0];
2611     slot = path->slots[0];
2612     ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2613 
2614     ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2615     if (ret < 0)
2616         goto out;
2617 
2618     ret = get_cur_path(sctx, ino, gen, p);
2619     if (ret < 0)
2620         goto out;
2621     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2622     TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2623     TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2624     TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2625     if (sctx->proto >= 2)
2626         TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime);
2627 
2628     ret = send_cmd(sctx);
2629 
2630 tlv_put_failure:
2631 out:
2632     fs_path_free(p);
2633     btrfs_free_path(path);
2634     return ret;
2635 }
2636 
2637 /*
2638  * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2639  * a valid path yet because we did not process the refs yet. So, the inode
2640  * is created as orphan.
2641  */
2642 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2643 {
2644     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2645     int ret = 0;
2646     struct fs_path *p;
2647     int cmd;
2648     u64 gen;
2649     u64 mode;
2650     u64 rdev;
2651 
2652     btrfs_debug(fs_info, "send_create_inode %llu", ino);
2653 
2654     p = fs_path_alloc();
2655     if (!p)
2656         return -ENOMEM;
2657 
2658     if (ino != sctx->cur_ino) {
2659         ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2660                      NULL, NULL, &rdev, NULL);
2661         if (ret < 0)
2662             goto out;
2663     } else {
2664         gen = sctx->cur_inode_gen;
2665         mode = sctx->cur_inode_mode;
2666         rdev = sctx->cur_inode_rdev;
2667     }
2668 
2669     if (S_ISREG(mode)) {
2670         cmd = BTRFS_SEND_C_MKFILE;
2671     } else if (S_ISDIR(mode)) {
2672         cmd = BTRFS_SEND_C_MKDIR;
2673     } else if (S_ISLNK(mode)) {
2674         cmd = BTRFS_SEND_C_SYMLINK;
2675     } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2676         cmd = BTRFS_SEND_C_MKNOD;
2677     } else if (S_ISFIFO(mode)) {
2678         cmd = BTRFS_SEND_C_MKFIFO;
2679     } else if (S_ISSOCK(mode)) {
2680         cmd = BTRFS_SEND_C_MKSOCK;
2681     } else {
2682         btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2683                 (int)(mode & S_IFMT));
2684         ret = -EOPNOTSUPP;
2685         goto out;
2686     }
2687 
2688     ret = begin_cmd(sctx, cmd);
2689     if (ret < 0)
2690         goto out;
2691 
2692     ret = gen_unique_name(sctx, ino, gen, p);
2693     if (ret < 0)
2694         goto out;
2695 
2696     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2697     TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2698 
2699     if (S_ISLNK(mode)) {
2700         fs_path_reset(p);
2701         ret = read_symlink(sctx->send_root, ino, p);
2702         if (ret < 0)
2703             goto out;
2704         TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2705     } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2706            S_ISFIFO(mode) || S_ISSOCK(mode)) {
2707         TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2708         TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2709     }
2710 
2711     ret = send_cmd(sctx);
2712     if (ret < 0)
2713         goto out;
2714 
2715 
2716 tlv_put_failure:
2717 out:
2718     fs_path_free(p);
2719     return ret;
2720 }
2721 
2722 /*
2723  * We need some special handling for inodes that get processed before the parent
2724  * directory got created. See process_recorded_refs for details.
2725  * This function does the check if we already created the dir out of order.
2726  */
2727 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2728 {
2729     int ret = 0;
2730     int iter_ret = 0;
2731     struct btrfs_path *path = NULL;
2732     struct btrfs_key key;
2733     struct btrfs_key found_key;
2734     struct btrfs_key di_key;
2735     struct btrfs_dir_item *di;
2736 
2737     path = alloc_path_for_send();
2738     if (!path)
2739         return -ENOMEM;
2740 
2741     key.objectid = dir;
2742     key.type = BTRFS_DIR_INDEX_KEY;
2743     key.offset = 0;
2744 
2745     btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) {
2746         struct extent_buffer *eb = path->nodes[0];
2747 
2748         if (found_key.objectid != key.objectid ||
2749             found_key.type != key.type) {
2750             ret = 0;
2751             break;
2752         }
2753 
2754         di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item);
2755         btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2756 
2757         if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2758             di_key.objectid < sctx->send_progress) {
2759             ret = 1;
2760             break;
2761         }
2762     }
2763     /* Catch error found during iteration */
2764     if (iter_ret < 0)
2765         ret = iter_ret;
2766 
2767     btrfs_free_path(path);
2768     return ret;
2769 }
2770 
2771 /*
2772  * Only creates the inode if it is:
2773  * 1. Not a directory
2774  * 2. Or a directory which was not created already due to out of order
2775  *    directories. See did_create_dir and process_recorded_refs for details.
2776  */
2777 static int send_create_inode_if_needed(struct send_ctx *sctx)
2778 {
2779     int ret;
2780 
2781     if (S_ISDIR(sctx->cur_inode_mode)) {
2782         ret = did_create_dir(sctx, sctx->cur_ino);
2783         if (ret < 0)
2784             return ret;
2785         else if (ret > 0)
2786             return 0;
2787     }
2788 
2789     return send_create_inode(sctx, sctx->cur_ino);
2790 }
2791 
2792 struct recorded_ref {
2793     struct list_head list;
2794     char *name;
2795     struct fs_path *full_path;
2796     u64 dir;
2797     u64 dir_gen;
2798     int name_len;
2799     struct rb_node node;
2800     struct rb_root *root;
2801 };
2802 
2803 static struct recorded_ref *recorded_ref_alloc(void)
2804 {
2805     struct recorded_ref *ref;
2806 
2807     ref = kzalloc(sizeof(*ref), GFP_KERNEL);
2808     if (!ref)
2809         return NULL;
2810     RB_CLEAR_NODE(&ref->node);
2811     INIT_LIST_HEAD(&ref->list);
2812     return ref;
2813 }
2814 
2815 static void recorded_ref_free(struct recorded_ref *ref)
2816 {
2817     if (!ref)
2818         return;
2819     if (!RB_EMPTY_NODE(&ref->node))
2820         rb_erase(&ref->node, ref->root);
2821     list_del(&ref->list);
2822     fs_path_free(ref->full_path);
2823     kfree(ref);
2824 }
2825 
2826 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2827 {
2828     ref->full_path = path;
2829     ref->name = (char *)kbasename(ref->full_path->start);
2830     ref->name_len = ref->full_path->end - ref->name;
2831 }
2832 
2833 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2834 {
2835     struct recorded_ref *new;
2836 
2837     new = recorded_ref_alloc();
2838     if (!new)
2839         return -ENOMEM;
2840 
2841     new->dir = ref->dir;
2842     new->dir_gen = ref->dir_gen;
2843     list_add_tail(&new->list, list);
2844     return 0;
2845 }
2846 
2847 static void __free_recorded_refs(struct list_head *head)
2848 {
2849     struct recorded_ref *cur;
2850 
2851     while (!list_empty(head)) {
2852         cur = list_entry(head->next, struct recorded_ref, list);
2853         recorded_ref_free(cur);
2854     }
2855 }
2856 
2857 static void free_recorded_refs(struct send_ctx *sctx)
2858 {
2859     __free_recorded_refs(&sctx->new_refs);
2860     __free_recorded_refs(&sctx->deleted_refs);
2861 }
2862 
2863 /*
2864  * Renames/moves a file/dir to its orphan name. Used when the first
2865  * ref of an unprocessed inode gets overwritten and for all non empty
2866  * directories.
2867  */
2868 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2869               struct fs_path *path)
2870 {
2871     int ret;
2872     struct fs_path *orphan;
2873 
2874     orphan = fs_path_alloc();
2875     if (!orphan)
2876         return -ENOMEM;
2877 
2878     ret = gen_unique_name(sctx, ino, gen, orphan);
2879     if (ret < 0)
2880         goto out;
2881 
2882     ret = send_rename(sctx, path, orphan);
2883 
2884 out:
2885     fs_path_free(orphan);
2886     return ret;
2887 }
2888 
2889 static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2890                            u64 dir_ino, u64 dir_gen)
2891 {
2892     struct rb_node **p = &sctx->orphan_dirs.rb_node;
2893     struct rb_node *parent = NULL;
2894     struct orphan_dir_info *entry, *odi;
2895 
2896     while (*p) {
2897         parent = *p;
2898         entry = rb_entry(parent, struct orphan_dir_info, node);
2899         if (dir_ino < entry->ino)
2900             p = &(*p)->rb_left;
2901         else if (dir_ino > entry->ino)
2902             p = &(*p)->rb_right;
2903         else if (dir_gen < entry->gen)
2904             p = &(*p)->rb_left;
2905         else if (dir_gen > entry->gen)
2906             p = &(*p)->rb_right;
2907         else
2908             return entry;
2909     }
2910 
2911     odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2912     if (!odi)
2913         return ERR_PTR(-ENOMEM);
2914     odi->ino = dir_ino;
2915     odi->gen = dir_gen;
2916     odi->last_dir_index_offset = 0;
2917 
2918     rb_link_node(&odi->node, parent, p);
2919     rb_insert_color(&odi->node, &sctx->orphan_dirs);
2920     return odi;
2921 }
2922 
2923 static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2924                            u64 dir_ino, u64 gen)
2925 {
2926     struct rb_node *n = sctx->orphan_dirs.rb_node;
2927     struct orphan_dir_info *entry;
2928 
2929     while (n) {
2930         entry = rb_entry(n, struct orphan_dir_info, node);
2931         if (dir_ino < entry->ino)
2932             n = n->rb_left;
2933         else if (dir_ino > entry->ino)
2934             n = n->rb_right;
2935         else if (gen < entry->gen)
2936             n = n->rb_left;
2937         else if (gen > entry->gen)
2938             n = n->rb_right;
2939         else
2940             return entry;
2941     }
2942     return NULL;
2943 }
2944 
2945 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2946 {
2947     struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2948 
2949     return odi != NULL;
2950 }
2951 
2952 static void free_orphan_dir_info(struct send_ctx *sctx,
2953                  struct orphan_dir_info *odi)
2954 {
2955     if (!odi)
2956         return;
2957     rb_erase(&odi->node, &sctx->orphan_dirs);
2958     kfree(odi);
2959 }
2960 
2961 /*
2962  * Returns 1 if a directory can be removed at this point in time.
2963  * We check this by iterating all dir items and checking if the inode behind
2964  * the dir item was already processed.
2965  */
2966 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2967              u64 send_progress)
2968 {
2969     int ret = 0;
2970     int iter_ret = 0;
2971     struct btrfs_root *root = sctx->parent_root;
2972     struct btrfs_path *path;
2973     struct btrfs_key key;
2974     struct btrfs_key found_key;
2975     struct btrfs_key loc;
2976     struct btrfs_dir_item *di;
2977     struct orphan_dir_info *odi = NULL;
2978 
2979     /*
2980      * Don't try to rmdir the top/root subvolume dir.
2981      */
2982     if (dir == BTRFS_FIRST_FREE_OBJECTID)
2983         return 0;
2984 
2985     path = alloc_path_for_send();
2986     if (!path)
2987         return -ENOMEM;
2988 
2989     key.objectid = dir;
2990     key.type = BTRFS_DIR_INDEX_KEY;
2991     key.offset = 0;
2992 
2993     odi = get_orphan_dir_info(sctx, dir, dir_gen);
2994     if (odi)
2995         key.offset = odi->last_dir_index_offset;
2996 
2997     btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
2998         struct waiting_dir_move *dm;
2999 
3000         if (found_key.objectid != key.objectid ||
3001             found_key.type != key.type)
3002             break;
3003 
3004         di = btrfs_item_ptr(path->nodes[0], path->slots[0],
3005                 struct btrfs_dir_item);
3006         btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
3007 
3008         dm = get_waiting_dir_move(sctx, loc.objectid);
3009         if (dm) {
3010             odi = add_orphan_dir_info(sctx, dir, dir_gen);
3011             if (IS_ERR(odi)) {
3012                 ret = PTR_ERR(odi);
3013                 goto out;
3014             }
3015             odi->gen = dir_gen;
3016             odi->last_dir_index_offset = found_key.offset;
3017             dm->rmdir_ino = dir;
3018             dm->rmdir_gen = dir_gen;
3019             ret = 0;
3020             goto out;
3021         }
3022 
3023         if (loc.objectid > send_progress) {
3024             odi = add_orphan_dir_info(sctx, dir, dir_gen);
3025             if (IS_ERR(odi)) {
3026                 ret = PTR_ERR(odi);
3027                 goto out;
3028             }
3029             odi->gen = dir_gen;
3030             odi->last_dir_index_offset = found_key.offset;
3031             ret = 0;
3032             goto out;
3033         }
3034     }
3035     if (iter_ret < 0) {
3036         ret = iter_ret;
3037         goto out;
3038     }
3039     free_orphan_dir_info(sctx, odi);
3040 
3041     ret = 1;
3042 
3043 out:
3044     btrfs_free_path(path);
3045     return ret;
3046 }
3047 
3048 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3049 {
3050     struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3051 
3052     return entry != NULL;
3053 }
3054 
3055 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3056 {
3057     struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3058     struct rb_node *parent = NULL;
3059     struct waiting_dir_move *entry, *dm;
3060 
3061     dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3062     if (!dm)
3063         return -ENOMEM;
3064     dm->ino = ino;
3065     dm->rmdir_ino = 0;
3066     dm->rmdir_gen = 0;
3067     dm->orphanized = orphanized;
3068 
3069     while (*p) {
3070         parent = *p;
3071         entry = rb_entry(parent, struct waiting_dir_move, node);
3072         if (ino < entry->ino) {
3073             p = &(*p)->rb_left;
3074         } else if (ino > entry->ino) {
3075             p = &(*p)->rb_right;
3076         } else {
3077             kfree(dm);
3078             return -EEXIST;
3079         }
3080     }
3081 
3082     rb_link_node(&dm->node, parent, p);
3083     rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3084     return 0;
3085 }
3086 
3087 static struct waiting_dir_move *
3088 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3089 {
3090     struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3091     struct waiting_dir_move *entry;
3092 
3093     while (n) {
3094         entry = rb_entry(n, struct waiting_dir_move, node);
3095         if (ino < entry->ino)
3096             n = n->rb_left;
3097         else if (ino > entry->ino)
3098             n = n->rb_right;
3099         else
3100             return entry;
3101     }
3102     return NULL;
3103 }
3104 
3105 static void free_waiting_dir_move(struct send_ctx *sctx,
3106                   struct waiting_dir_move *dm)
3107 {
3108     if (!dm)
3109         return;
3110     rb_erase(&dm->node, &sctx->waiting_dir_moves);
3111     kfree(dm);
3112 }
3113 
3114 static int add_pending_dir_move(struct send_ctx *sctx,
3115                 u64 ino,
3116                 u64 ino_gen,
3117                 u64 parent_ino,
3118                 struct list_head *new_refs,
3119                 struct list_head *deleted_refs,
3120                 const bool is_orphan)
3121 {
3122     struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3123     struct rb_node *parent = NULL;
3124     struct pending_dir_move *entry = NULL, *pm;
3125     struct recorded_ref *cur;
3126     int exists = 0;
3127     int ret;
3128 
3129     pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3130     if (!pm)
3131         return -ENOMEM;
3132     pm->parent_ino = parent_ino;
3133     pm->ino = ino;
3134     pm->gen = ino_gen;
3135     INIT_LIST_HEAD(&pm->list);
3136     INIT_LIST_HEAD(&pm->update_refs);
3137     RB_CLEAR_NODE(&pm->node);
3138 
3139     while (*p) {
3140         parent = *p;
3141         entry = rb_entry(parent, struct pending_dir_move, node);
3142         if (parent_ino < entry->parent_ino) {
3143             p = &(*p)->rb_left;
3144         } else if (parent_ino > entry->parent_ino) {
3145             p = &(*p)->rb_right;
3146         } else {
3147             exists = 1;
3148             break;
3149         }
3150     }
3151 
3152     list_for_each_entry(cur, deleted_refs, list) {
3153         ret = dup_ref(cur, &pm->update_refs);
3154         if (ret < 0)
3155             goto out;
3156     }
3157     list_for_each_entry(cur, new_refs, list) {
3158         ret = dup_ref(cur, &pm->update_refs);
3159         if (ret < 0)
3160             goto out;
3161     }
3162 
3163     ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3164     if (ret)
3165         goto out;
3166 
3167     if (exists) {
3168         list_add_tail(&pm->list, &entry->list);
3169     } else {
3170         rb_link_node(&pm->node, parent, p);
3171         rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3172     }
3173     ret = 0;
3174 out:
3175     if (ret) {
3176         __free_recorded_refs(&pm->update_refs);
3177         kfree(pm);
3178     }
3179     return ret;
3180 }
3181 
3182 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3183                               u64 parent_ino)
3184 {
3185     struct rb_node *n = sctx->pending_dir_moves.rb_node;
3186     struct pending_dir_move *entry;
3187 
3188     while (n) {
3189         entry = rb_entry(n, struct pending_dir_move, node);
3190         if (parent_ino < entry->parent_ino)
3191             n = n->rb_left;
3192         else if (parent_ino > entry->parent_ino)
3193             n = n->rb_right;
3194         else
3195             return entry;
3196     }
3197     return NULL;
3198 }
3199 
3200 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3201              u64 ino, u64 gen, u64 *ancestor_ino)
3202 {
3203     int ret = 0;
3204     u64 parent_inode = 0;
3205     u64 parent_gen = 0;
3206     u64 start_ino = ino;
3207 
3208     *ancestor_ino = 0;
3209     while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3210         fs_path_reset(name);
3211 
3212         if (is_waiting_for_rm(sctx, ino, gen))
3213             break;
3214         if (is_waiting_for_move(sctx, ino)) {
3215             if (*ancestor_ino == 0)
3216                 *ancestor_ino = ino;
3217             ret = get_first_ref(sctx->parent_root, ino,
3218                         &parent_inode, &parent_gen, name);
3219         } else {
3220             ret = __get_cur_name_and_parent(sctx, ino, gen,
3221                             &parent_inode,
3222                             &parent_gen, name);
3223             if (ret > 0) {
3224                 ret = 0;
3225                 break;
3226             }
3227         }
3228         if (ret < 0)
3229             break;
3230         if (parent_inode == start_ino) {
3231             ret = 1;
3232             if (*ancestor_ino == 0)
3233                 *ancestor_ino = ino;
3234             break;
3235         }
3236         ino = parent_inode;
3237         gen = parent_gen;
3238     }
3239     return ret;
3240 }
3241 
3242 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3243 {
3244     struct fs_path *from_path = NULL;
3245     struct fs_path *to_path = NULL;
3246     struct fs_path *name = NULL;
3247     u64 orig_progress = sctx->send_progress;
3248     struct recorded_ref *cur;
3249     u64 parent_ino, parent_gen;
3250     struct waiting_dir_move *dm = NULL;
3251     u64 rmdir_ino = 0;
3252     u64 rmdir_gen;
3253     u64 ancestor;
3254     bool is_orphan;
3255     int ret;
3256 
3257     name = fs_path_alloc();
3258     from_path = fs_path_alloc();
3259     if (!name || !from_path) {
3260         ret = -ENOMEM;
3261         goto out;
3262     }
3263 
3264     dm = get_waiting_dir_move(sctx, pm->ino);
3265     ASSERT(dm);
3266     rmdir_ino = dm->rmdir_ino;
3267     rmdir_gen = dm->rmdir_gen;
3268     is_orphan = dm->orphanized;
3269     free_waiting_dir_move(sctx, dm);
3270 
3271     if (is_orphan) {
3272         ret = gen_unique_name(sctx, pm->ino,
3273                       pm->gen, from_path);
3274     } else {
3275         ret = get_first_ref(sctx->parent_root, pm->ino,
3276                     &parent_ino, &parent_gen, name);
3277         if (ret < 0)
3278             goto out;
3279         ret = get_cur_path(sctx, parent_ino, parent_gen,
3280                    from_path);
3281         if (ret < 0)
3282             goto out;
3283         ret = fs_path_add_path(from_path, name);
3284     }
3285     if (ret < 0)
3286         goto out;
3287 
3288     sctx->send_progress = sctx->cur_ino + 1;
3289     ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3290     if (ret < 0)
3291         goto out;
3292     if (ret) {
3293         LIST_HEAD(deleted_refs);
3294         ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3295         ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3296                        &pm->update_refs, &deleted_refs,
3297                        is_orphan);
3298         if (ret < 0)
3299             goto out;
3300         if (rmdir_ino) {
3301             dm = get_waiting_dir_move(sctx, pm->ino);
3302             ASSERT(dm);
3303             dm->rmdir_ino = rmdir_ino;
3304             dm->rmdir_gen = rmdir_gen;
3305         }
3306         goto out;
3307     }
3308     fs_path_reset(name);
3309     to_path = name;
3310     name = NULL;
3311     ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3312     if (ret < 0)
3313         goto out;
3314 
3315     ret = send_rename(sctx, from_path, to_path);
3316     if (ret < 0)
3317         goto out;
3318 
3319     if (rmdir_ino) {
3320         struct orphan_dir_info *odi;
3321         u64 gen;
3322 
3323         odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3324         if (!odi) {
3325             /* already deleted */
3326             goto finish;
3327         }
3328         gen = odi->gen;
3329 
3330         ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3331         if (ret < 0)
3332             goto out;
3333         if (!ret)
3334             goto finish;
3335 
3336         name = fs_path_alloc();
3337         if (!name) {
3338             ret = -ENOMEM;
3339             goto out;
3340         }
3341         ret = get_cur_path(sctx, rmdir_ino, gen, name);
3342         if (ret < 0)
3343             goto out;
3344         ret = send_rmdir(sctx, name);
3345         if (ret < 0)
3346             goto out;
3347     }
3348 
3349 finish:
3350     ret = send_utimes(sctx, pm->ino, pm->gen);
3351     if (ret < 0)
3352         goto out;
3353 
3354     /*
3355      * After rename/move, need to update the utimes of both new parent(s)
3356      * and old parent(s).
3357      */
3358     list_for_each_entry(cur, &pm->update_refs, list) {
3359         /*
3360          * The parent inode might have been deleted in the send snapshot
3361          */
3362         ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3363                      NULL, NULL, NULL, NULL, NULL, NULL);
3364         if (ret == -ENOENT) {
3365             ret = 0;
3366             continue;
3367         }
3368         if (ret < 0)
3369             goto out;
3370 
3371         ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3372         if (ret < 0)
3373             goto out;
3374     }
3375 
3376 out:
3377     fs_path_free(name);
3378     fs_path_free(from_path);
3379     fs_path_free(to_path);
3380     sctx->send_progress = orig_progress;
3381 
3382     return ret;
3383 }
3384 
3385 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3386 {
3387     if (!list_empty(&m->list))
3388         list_del(&m->list);
3389     if (!RB_EMPTY_NODE(&m->node))
3390         rb_erase(&m->node, &sctx->pending_dir_moves);
3391     __free_recorded_refs(&m->update_refs);
3392     kfree(m);
3393 }
3394 
3395 static void tail_append_pending_moves(struct send_ctx *sctx,
3396                       struct pending_dir_move *moves,
3397                       struct list_head *stack)
3398 {
3399     if (list_empty(&moves->list)) {
3400         list_add_tail(&moves->list, stack);
3401     } else {
3402         LIST_HEAD(list);
3403         list_splice_init(&moves->list, &list);
3404         list_add_tail(&moves->list, stack);
3405         list_splice_tail(&list, stack);
3406     }
3407     if (!RB_EMPTY_NODE(&moves->node)) {
3408         rb_erase(&moves->node, &sctx->pending_dir_moves);
3409         RB_CLEAR_NODE(&moves->node);
3410     }
3411 }
3412 
3413 static int apply_children_dir_moves(struct send_ctx *sctx)
3414 {
3415     struct pending_dir_move *pm;
3416     struct list_head stack;
3417     u64 parent_ino = sctx->cur_ino;
3418     int ret = 0;
3419 
3420     pm = get_pending_dir_moves(sctx, parent_ino);
3421     if (!pm)
3422         return 0;
3423 
3424     INIT_LIST_HEAD(&stack);
3425     tail_append_pending_moves(sctx, pm, &stack);
3426 
3427     while (!list_empty(&stack)) {
3428         pm = list_first_entry(&stack, struct pending_dir_move, list);
3429         parent_ino = pm->ino;
3430         ret = apply_dir_move(sctx, pm);
3431         free_pending_move(sctx, pm);
3432         if (ret)
3433             goto out;
3434         pm = get_pending_dir_moves(sctx, parent_ino);
3435         if (pm)
3436             tail_append_pending_moves(sctx, pm, &stack);
3437     }
3438     return 0;
3439 
3440 out:
3441     while (!list_empty(&stack)) {
3442         pm = list_first_entry(&stack, struct pending_dir_move, list);
3443         free_pending_move(sctx, pm);
3444     }
3445     return ret;
3446 }
3447 
3448 /*
3449  * We might need to delay a directory rename even when no ancestor directory
3450  * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3451  * renamed. This happens when we rename a directory to the old name (the name
3452  * in the parent root) of some other unrelated directory that got its rename
3453  * delayed due to some ancestor with higher number that got renamed.
3454  *
3455  * Example:
3456  *
3457  * Parent snapshot:
3458  * .                                       (ino 256)
3459  * |---- a/                                (ino 257)
3460  * |     |---- file                        (ino 260)
3461  * |
3462  * |---- b/                                (ino 258)
3463  * |---- c/                                (ino 259)
3464  *
3465  * Send snapshot:
3466  * .                                       (ino 256)
3467  * |---- a/                                (ino 258)
3468  * |---- x/                                (ino 259)
3469  *       |---- y/                          (ino 257)
3470  *             |----- file                 (ino 260)
3471  *
3472  * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3473  * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3474  * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3475  * must issue is:
3476  *
3477  * 1 - rename 259 from 'c' to 'x'
3478  * 2 - rename 257 from 'a' to 'x/y'
3479  * 3 - rename 258 from 'b' to 'a'
3480  *
3481  * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3482  * be done right away and < 0 on error.
3483  */
3484 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3485                   struct recorded_ref *parent_ref,
3486                   const bool is_orphan)
3487 {
3488     struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3489     struct btrfs_path *path;
3490     struct btrfs_key key;
3491     struct btrfs_key di_key;
3492     struct btrfs_dir_item *di;
3493     u64 left_gen;
3494     u64 right_gen;
3495     int ret = 0;
3496     struct waiting_dir_move *wdm;
3497 
3498     if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3499         return 0;
3500 
3501     path = alloc_path_for_send();
3502     if (!path)
3503         return -ENOMEM;
3504 
3505     key.objectid = parent_ref->dir;
3506     key.type = BTRFS_DIR_ITEM_KEY;
3507     key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3508 
3509     ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3510     if (ret < 0) {
3511         goto out;
3512     } else if (ret > 0) {
3513         ret = 0;
3514         goto out;
3515     }
3516 
3517     di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3518                        parent_ref->name_len);
3519     if (!di) {
3520         ret = 0;
3521         goto out;
3522     }
3523     /*
3524      * di_key.objectid has the number of the inode that has a dentry in the
3525      * parent directory with the same name that sctx->cur_ino is being
3526      * renamed to. We need to check if that inode is in the send root as
3527      * well and if it is currently marked as an inode with a pending rename,
3528      * if it is, we need to delay the rename of sctx->cur_ino as well, so
3529      * that it happens after that other inode is renamed.
3530      */
3531     btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3532     if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3533         ret = 0;
3534         goto out;
3535     }
3536 
3537     ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3538                  &left_gen, NULL, NULL, NULL, NULL, NULL);
3539     if (ret < 0)
3540         goto out;
3541     ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3542                  &right_gen, NULL, NULL, NULL, NULL, NULL);
3543     if (ret < 0) {
3544         if (ret == -ENOENT)
3545             ret = 0;
3546         goto out;
3547     }
3548 
3549     /* Different inode, no need to delay the rename of sctx->cur_ino */
3550     if (right_gen != left_gen) {
3551         ret = 0;
3552         goto out;
3553     }
3554 
3555     wdm = get_waiting_dir_move(sctx, di_key.objectid);
3556     if (wdm && !wdm->orphanized) {
3557         ret = add_pending_dir_move(sctx,
3558                        sctx->cur_ino,
3559                        sctx->cur_inode_gen,
3560                        di_key.objectid,
3561                        &sctx->new_refs,
3562                        &sctx->deleted_refs,
3563                        is_orphan);
3564         if (!ret)
3565             ret = 1;
3566     }
3567 out:
3568     btrfs_free_path(path);
3569     return ret;
3570 }
3571 
3572 /*
3573  * Check if inode ino2, or any of its ancestors, is inode ino1.
3574  * Return 1 if true, 0 if false and < 0 on error.
3575  */
3576 static int check_ino_in_path(struct btrfs_root *root,
3577                  const u64 ino1,
3578                  const u64 ino1_gen,
3579                  const u64 ino2,
3580                  const u64 ino2_gen,
3581                  struct fs_path *fs_path)
3582 {
3583     u64 ino = ino2;
3584 
3585     if (ino1 == ino2)
3586         return ino1_gen == ino2_gen;
3587 
3588     while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3589         u64 parent;
3590         u64 parent_gen;
3591         int ret;
3592 
3593         fs_path_reset(fs_path);
3594         ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3595         if (ret < 0)
3596             return ret;
3597         if (parent == ino1)
3598             return parent_gen == ino1_gen;
3599         ino = parent;
3600     }
3601     return 0;
3602 }
3603 
3604 /*
3605  * Check if inode ino1 is an ancestor of inode ino2 in the given root for any
3606  * possible path (in case ino2 is not a directory and has multiple hard links).
3607  * Return 1 if true, 0 if false and < 0 on error.
3608  */
3609 static int is_ancestor(struct btrfs_root *root,
3610                const u64 ino1,
3611                const u64 ino1_gen,
3612                const u64 ino2,
3613                struct fs_path *fs_path)
3614 {
3615     bool free_fs_path = false;
3616     int ret = 0;
3617     int iter_ret = 0;
3618     struct btrfs_path *path = NULL;
3619     struct btrfs_key key;
3620 
3621     if (!fs_path) {
3622         fs_path = fs_path_alloc();
3623         if (!fs_path)
3624             return -ENOMEM;
3625         free_fs_path = true;
3626     }
3627 
3628     path = alloc_path_for_send();
3629     if (!path) {
3630         ret = -ENOMEM;
3631         goto out;
3632     }
3633 
3634     key.objectid = ino2;
3635     key.type = BTRFS_INODE_REF_KEY;
3636     key.offset = 0;
3637 
3638     btrfs_for_each_slot(root, &key, &key, path, iter_ret) {
3639         struct extent_buffer *leaf = path->nodes[0];
3640         int slot = path->slots[0];
3641         u32 cur_offset = 0;
3642         u32 item_size;
3643 
3644         if (key.objectid != ino2)
3645             break;
3646         if (key.type != BTRFS_INODE_REF_KEY &&
3647             key.type != BTRFS_INODE_EXTREF_KEY)
3648             break;
3649 
3650         item_size = btrfs_item_size(leaf, slot);
3651         while (cur_offset < item_size) {
3652             u64 parent;
3653             u64 parent_gen;
3654 
3655             if (key.type == BTRFS_INODE_EXTREF_KEY) {
3656                 unsigned long ptr;
3657                 struct btrfs_inode_extref *extref;
3658 
3659                 ptr = btrfs_item_ptr_offset(leaf, slot);
3660                 extref = (struct btrfs_inode_extref *)
3661                     (ptr + cur_offset);
3662                 parent = btrfs_inode_extref_parent(leaf,
3663                                    extref);
3664                 cur_offset += sizeof(*extref);
3665                 cur_offset += btrfs_inode_extref_name_len(leaf,
3666                                   extref);
3667             } else {
3668                 parent = key.offset;
3669                 cur_offset = item_size;
3670             }
3671 
3672             ret = get_inode_info(root, parent, NULL, &parent_gen,
3673                          NULL, NULL, NULL, NULL, NULL);
3674             if (ret < 0)
3675                 goto out;
3676             ret = check_ino_in_path(root, ino1, ino1_gen,
3677                         parent, parent_gen, fs_path);
3678             if (ret)
3679                 goto out;
3680         }
3681     }
3682     ret = 0;
3683     if (iter_ret < 0)
3684         ret = iter_ret;
3685 
3686 out:
3687     btrfs_free_path(path);
3688     if (free_fs_path)
3689         fs_path_free(fs_path);
3690     return ret;
3691 }
3692 
3693 static int wait_for_parent_move(struct send_ctx *sctx,
3694                 struct recorded_ref *parent_ref,
3695                 const bool is_orphan)
3696 {
3697     int ret = 0;
3698     u64 ino = parent_ref->dir;
3699     u64 ino_gen = parent_ref->dir_gen;
3700     u64 parent_ino_before, parent_ino_after;
3701     struct fs_path *path_before = NULL;
3702     struct fs_path *path_after = NULL;
3703     int len1, len2;
3704 
3705     path_after = fs_path_alloc();
3706     path_before = fs_path_alloc();
3707     if (!path_after || !path_before) {
3708         ret = -ENOMEM;
3709         goto out;
3710     }
3711 
3712     /*
3713      * Our current directory inode may not yet be renamed/moved because some
3714      * ancestor (immediate or not) has to be renamed/moved first. So find if
3715      * such ancestor exists and make sure our own rename/move happens after
3716      * that ancestor is processed to avoid path build infinite loops (done
3717      * at get_cur_path()).
3718      */
3719     while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3720         u64 parent_ino_after_gen;
3721 
3722         if (is_waiting_for_move(sctx, ino)) {
3723             /*
3724              * If the current inode is an ancestor of ino in the
3725              * parent root, we need to delay the rename of the
3726              * current inode, otherwise don't delayed the rename
3727              * because we can end up with a circular dependency
3728              * of renames, resulting in some directories never
3729              * getting the respective rename operations issued in
3730              * the send stream or getting into infinite path build
3731              * loops.
3732              */
3733             ret = is_ancestor(sctx->parent_root,
3734                       sctx->cur_ino, sctx->cur_inode_gen,
3735                       ino, path_before);
3736             if (ret)
3737                 break;
3738         }
3739 
3740         fs_path_reset(path_before);
3741         fs_path_reset(path_after);
3742 
3743         ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3744                     &parent_ino_after_gen, path_after);
3745         if (ret < 0)
3746             goto out;
3747         ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3748                     NULL, path_before);
3749         if (ret < 0 && ret != -ENOENT) {
3750             goto out;
3751         } else if (ret == -ENOENT) {
3752             ret = 0;
3753             break;
3754         }
3755 
3756         len1 = fs_path_len(path_before);
3757         len2 = fs_path_len(path_after);
3758         if (ino > sctx->cur_ino &&
3759             (parent_ino_before != parent_ino_after || len1 != len2 ||
3760              memcmp(path_before->start, path_after->start, len1))) {
3761             u64 parent_ino_gen;
3762 
3763             ret = get_inode_info(sctx->parent_root, ino, NULL,
3764                          &parent_ino_gen, NULL, NULL, NULL,
3765                          NULL, NULL);
3766             if (ret < 0)
3767                 goto out;
3768             if (ino_gen == parent_ino_gen) {
3769                 ret = 1;
3770                 break;
3771             }
3772         }
3773         ino = parent_ino_after;
3774         ino_gen = parent_ino_after_gen;
3775     }
3776 
3777 out:
3778     fs_path_free(path_before);
3779     fs_path_free(path_after);
3780 
3781     if (ret == 1) {
3782         ret = add_pending_dir_move(sctx,
3783                        sctx->cur_ino,
3784                        sctx->cur_inode_gen,
3785                        ino,
3786                        &sctx->new_refs,
3787                        &sctx->deleted_refs,
3788                        is_orphan);
3789         if (!ret)
3790             ret = 1;
3791     }
3792 
3793     return ret;
3794 }
3795 
3796 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3797 {
3798     int ret;
3799     struct fs_path *new_path;
3800 
3801     /*
3802      * Our reference's name member points to its full_path member string, so
3803      * we use here a new path.
3804      */
3805     new_path = fs_path_alloc();
3806     if (!new_path)
3807         return -ENOMEM;
3808 
3809     ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3810     if (ret < 0) {
3811         fs_path_free(new_path);
3812         return ret;
3813     }
3814     ret = fs_path_add(new_path, ref->name, ref->name_len);
3815     if (ret < 0) {
3816         fs_path_free(new_path);
3817         return ret;
3818     }
3819 
3820     fs_path_free(ref->full_path);
3821     set_ref_path(ref, new_path);
3822 
3823     return 0;
3824 }
3825 
3826 /*
3827  * When processing the new references for an inode we may orphanize an existing
3828  * directory inode because its old name conflicts with one of the new references
3829  * of the current inode. Later, when processing another new reference of our
3830  * inode, we might need to orphanize another inode, but the path we have in the
3831  * reference reflects the pre-orphanization name of the directory we previously
3832  * orphanized. For example:
3833  *
3834  * parent snapshot looks like:
3835  *
3836  * .                                     (ino 256)
3837  * |----- f1                             (ino 257)
3838  * |----- f2                             (ino 258)
3839  * |----- d1/                            (ino 259)
3840  *        |----- d2/                     (ino 260)
3841  *
3842  * send snapshot looks like:
3843  *
3844  * .                                     (ino 256)
3845  * |----- d1                             (ino 258)
3846  * |----- f2/                            (ino 259)
3847  *        |----- f2_link/                (ino 260)
3848  *        |       |----- f1              (ino 257)
3849  *        |
3850  *        |----- d2                      (ino 258)
3851  *
3852  * When processing inode 257 we compute the name for inode 259 as "d1", and we
3853  * cache it in the name cache. Later when we start processing inode 258, when
3854  * collecting all its new references we set a full path of "d1/d2" for its new
3855  * reference with name "d2". When we start processing the new references we
3856  * start by processing the new reference with name "d1", and this results in
3857  * orphanizing inode 259, since its old reference causes a conflict. Then we
3858  * move on the next new reference, with name "d2", and we find out we must
3859  * orphanize inode 260, as its old reference conflicts with ours - but for the
3860  * orphanization we use a source path corresponding to the path we stored in the
3861  * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3862  * receiver fail since the path component "d1/" no longer exists, it was renamed
3863  * to "o259-6-0/" when processing the previous new reference. So in this case we
3864  * must recompute the path in the new reference and use it for the new
3865  * orphanization operation.
3866  */
3867 static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3868 {
3869     char *name;
3870     int ret;
3871 
3872     name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3873     if (!name)
3874         return -ENOMEM;
3875 
3876     fs_path_reset(ref->full_path);
3877     ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3878     if (ret < 0)
3879         goto out;
3880 
3881     ret = fs_path_add(ref->full_path, name, ref->name_len);
3882     if (ret < 0)
3883         goto out;
3884 
3885     /* Update the reference's base name pointer. */
3886     set_ref_path(ref, ref->full_path);
3887 out:
3888     kfree(name);
3889     return ret;
3890 }
3891 
3892 /*
3893  * This does all the move/link/unlink/rmdir magic.
3894  */
3895 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3896 {
3897     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3898     int ret = 0;
3899     struct recorded_ref *cur;
3900     struct recorded_ref *cur2;
3901     struct list_head check_dirs;
3902     struct fs_path *valid_path = NULL;
3903     u64 ow_inode = 0;
3904     u64 ow_gen;
3905     u64 ow_mode;
3906     int did_overwrite = 0;
3907     int is_orphan = 0;
3908     u64 last_dir_ino_rm = 0;
3909     bool can_rename = true;
3910     bool orphanized_dir = false;
3911     bool orphanized_ancestor = false;
3912 
3913     btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3914 
3915     /*
3916      * This should never happen as the root dir always has the same ref
3917      * which is always '..'
3918      */
3919     BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3920     INIT_LIST_HEAD(&check_dirs);
3921 
3922     valid_path = fs_path_alloc();
3923     if (!valid_path) {
3924         ret = -ENOMEM;
3925         goto out;
3926     }
3927 
3928     /*
3929      * First, check if the first ref of the current inode was overwritten
3930      * before. If yes, we know that the current inode was already orphanized
3931      * and thus use the orphan name. If not, we can use get_cur_path to
3932      * get the path of the first ref as it would like while receiving at
3933      * this point in time.
3934      * New inodes are always orphan at the beginning, so force to use the
3935      * orphan name in this case.
3936      * The first ref is stored in valid_path and will be updated if it
3937      * gets moved around.
3938      */
3939     if (!sctx->cur_inode_new) {
3940         ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3941                 sctx->cur_inode_gen);
3942         if (ret < 0)
3943             goto out;
3944         if (ret)
3945             did_overwrite = 1;
3946     }
3947     if (sctx->cur_inode_new || did_overwrite) {
3948         ret = gen_unique_name(sctx, sctx->cur_ino,
3949                 sctx->cur_inode_gen, valid_path);
3950         if (ret < 0)
3951             goto out;
3952         is_orphan = 1;
3953     } else {
3954         ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3955                 valid_path);
3956         if (ret < 0)
3957             goto out;
3958     }
3959 
3960     /*
3961      * Before doing any rename and link operations, do a first pass on the
3962      * new references to orphanize any unprocessed inodes that may have a
3963      * reference that conflicts with one of the new references of the current
3964      * inode. This needs to happen first because a new reference may conflict
3965      * with the old reference of a parent directory, so we must make sure
3966      * that the path used for link and rename commands don't use an
3967      * orphanized name when an ancestor was not yet orphanized.
3968      *
3969      * Example:
3970      *
3971      * Parent snapshot:
3972      *
3973      * .                                                      (ino 256)
3974      * |----- testdir/                                        (ino 259)
3975      * |          |----- a                                    (ino 257)
3976      * |
3977      * |----- b                                               (ino 258)
3978      *
3979      * Send snapshot:
3980      *
3981      * .                                                      (ino 256)
3982      * |----- testdir_2/                                      (ino 259)
3983      * |          |----- a                                    (ino 260)
3984      * |
3985      * |----- testdir                                         (ino 257)
3986      * |----- b                                               (ino 257)
3987      * |----- b2                                              (ino 258)
3988      *
3989      * Processing the new reference for inode 257 with name "b" may happen
3990      * before processing the new reference with name "testdir". If so, we
3991      * must make sure that by the time we send a link command to create the
3992      * hard link "b", inode 259 was already orphanized, since the generated
3993      * path in "valid_path" already contains the orphanized name for 259.
3994      * We are processing inode 257, so only later when processing 259 we do
3995      * the rename operation to change its temporary (orphanized) name to
3996      * "testdir_2".
3997      */
3998     list_for_each_entry(cur, &sctx->new_refs, list) {
3999         ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4000         if (ret < 0)
4001             goto out;
4002         if (ret == inode_state_will_create)
4003             continue;
4004 
4005         /*
4006          * Check if this new ref would overwrite the first ref of another
4007          * unprocessed inode. If yes, orphanize the overwritten inode.
4008          * If we find an overwritten ref that is not the first ref,
4009          * simply unlink it.
4010          */
4011         ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4012                 cur->name, cur->name_len,
4013                 &ow_inode, &ow_gen, &ow_mode);
4014         if (ret < 0)
4015             goto out;
4016         if (ret) {
4017             ret = is_first_ref(sctx->parent_root,
4018                        ow_inode, cur->dir, cur->name,
4019                        cur->name_len);
4020             if (ret < 0)
4021                 goto out;
4022             if (ret) {
4023                 struct name_cache_entry *nce;
4024                 struct waiting_dir_move *wdm;
4025 
4026                 if (orphanized_dir) {
4027                     ret = refresh_ref_path(sctx, cur);
4028                     if (ret < 0)
4029                         goto out;
4030                 }
4031 
4032                 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4033                         cur->full_path);
4034                 if (ret < 0)
4035                     goto out;
4036                 if (S_ISDIR(ow_mode))
4037                     orphanized_dir = true;
4038 
4039                 /*
4040                  * If ow_inode has its rename operation delayed
4041                  * make sure that its orphanized name is used in
4042                  * the source path when performing its rename
4043                  * operation.
4044                  */
4045                 if (is_waiting_for_move(sctx, ow_inode)) {
4046                     wdm = get_waiting_dir_move(sctx,
4047                                    ow_inode);
4048                     ASSERT(wdm);
4049                     wdm->orphanized = true;
4050                 }
4051 
4052                 /*
4053                  * Make sure we clear our orphanized inode's
4054                  * name from the name cache. This is because the
4055                  * inode ow_inode might be an ancestor of some
4056                  * other inode that will be orphanized as well
4057                  * later and has an inode number greater than
4058                  * sctx->send_progress. We need to prevent
4059                  * future name lookups from using the old name
4060                  * and get instead the orphan name.
4061                  */
4062                 nce = name_cache_search(sctx, ow_inode, ow_gen);
4063                 if (nce) {
4064                     name_cache_delete(sctx, nce);
4065                     kfree(nce);
4066                 }
4067 
4068                 /*
4069                  * ow_inode might currently be an ancestor of
4070                  * cur_ino, therefore compute valid_path (the
4071                  * current path of cur_ino) again because it
4072                  * might contain the pre-orphanization name of
4073                  * ow_inode, which is no longer valid.
4074                  */
4075                 ret = is_ancestor(sctx->parent_root,
4076                           ow_inode, ow_gen,
4077                           sctx->cur_ino, NULL);
4078                 if (ret > 0) {
4079                     orphanized_ancestor = true;
4080                     fs_path_reset(valid_path);
4081                     ret = get_cur_path(sctx, sctx->cur_ino,
4082                                sctx->cur_inode_gen,
4083                                valid_path);
4084                 }
4085                 if (ret < 0)
4086                     goto out;
4087             } else {
4088                 /*
4089                  * If we previously orphanized a directory that
4090                  * collided with a new reference that we already
4091                  * processed, recompute the current path because
4092                  * that directory may be part of the path.
4093                  */
4094                 if (orphanized_dir) {
4095                     ret = refresh_ref_path(sctx, cur);
4096                     if (ret < 0)
4097                         goto out;
4098                 }
4099                 ret = send_unlink(sctx, cur->full_path);
4100                 if (ret < 0)
4101                     goto out;
4102             }
4103         }
4104 
4105     }
4106 
4107     list_for_each_entry(cur, &sctx->new_refs, list) {
4108         /*
4109          * We may have refs where the parent directory does not exist
4110          * yet. This happens if the parent directories inum is higher
4111          * than the current inum. To handle this case, we create the
4112          * parent directory out of order. But we need to check if this
4113          * did already happen before due to other refs in the same dir.
4114          */
4115         ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4116         if (ret < 0)
4117             goto out;
4118         if (ret == inode_state_will_create) {
4119             ret = 0;
4120             /*
4121              * First check if any of the current inodes refs did
4122              * already create the dir.
4123              */
4124             list_for_each_entry(cur2, &sctx->new_refs, list) {
4125                 if (cur == cur2)
4126                     break;
4127                 if (cur2->dir == cur->dir) {
4128                     ret = 1;
4129                     break;
4130                 }
4131             }
4132 
4133             /*
4134              * If that did not happen, check if a previous inode
4135              * did already create the dir.
4136              */
4137             if (!ret)
4138                 ret = did_create_dir(sctx, cur->dir);
4139             if (ret < 0)
4140                 goto out;
4141             if (!ret) {
4142                 ret = send_create_inode(sctx, cur->dir);
4143                 if (ret < 0)
4144                     goto out;
4145             }
4146         }
4147 
4148         if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4149             ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4150             if (ret < 0)
4151                 goto out;
4152             if (ret == 1) {
4153                 can_rename = false;
4154                 *pending_move = 1;
4155             }
4156         }
4157 
4158         if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4159             can_rename) {
4160             ret = wait_for_parent_move(sctx, cur, is_orphan);
4161             if (ret < 0)
4162                 goto out;
4163             if (ret == 1) {
4164                 can_rename = false;
4165                 *pending_move = 1;
4166             }
4167         }
4168 
4169         /*
4170          * link/move the ref to the new place. If we have an orphan
4171          * inode, move it and update valid_path. If not, link or move
4172          * it depending on the inode mode.
4173          */
4174         if (is_orphan && can_rename) {
4175             ret = send_rename(sctx, valid_path, cur->full_path);
4176             if (ret < 0)
4177                 goto out;
4178             is_orphan = 0;
4179             ret = fs_path_copy(valid_path, cur->full_path);
4180             if (ret < 0)
4181                 goto out;
4182         } else if (can_rename) {
4183             if (S_ISDIR(sctx->cur_inode_mode)) {
4184                 /*
4185                  * Dirs can't be linked, so move it. For moved
4186                  * dirs, we always have one new and one deleted
4187                  * ref. The deleted ref is ignored later.
4188                  */
4189                 ret = send_rename(sctx, valid_path,
4190                           cur->full_path);
4191                 if (!ret)
4192                     ret = fs_path_copy(valid_path,
4193                                cur->full_path);
4194                 if (ret < 0)
4195                     goto out;
4196             } else {
4197                 /*
4198                  * We might have previously orphanized an inode
4199                  * which is an ancestor of our current inode,
4200                  * so our reference's full path, which was
4201                  * computed before any such orphanizations, must
4202                  * be updated.
4203                  */
4204                 if (orphanized_dir) {
4205                     ret = update_ref_path(sctx, cur);
4206                     if (ret < 0)
4207                         goto out;
4208                 }
4209                 ret = send_link(sctx, cur->full_path,
4210                         valid_path);
4211                 if (ret < 0)
4212                     goto out;
4213             }
4214         }
4215         ret = dup_ref(cur, &check_dirs);
4216         if (ret < 0)
4217             goto out;
4218     }
4219 
4220     if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4221         /*
4222          * Check if we can already rmdir the directory. If not,
4223          * orphanize it. For every dir item inside that gets deleted
4224          * later, we do this check again and rmdir it then if possible.
4225          * See the use of check_dirs for more details.
4226          */
4227         ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4228                 sctx->cur_ino);
4229         if (ret < 0)
4230             goto out;
4231         if (ret) {
4232             ret = send_rmdir(sctx, valid_path);
4233             if (ret < 0)
4234                 goto out;
4235         } else if (!is_orphan) {
4236             ret = orphanize_inode(sctx, sctx->cur_ino,
4237                     sctx->cur_inode_gen, valid_path);
4238             if (ret < 0)
4239                 goto out;
4240             is_orphan = 1;
4241         }
4242 
4243         list_for_each_entry(cur, &sctx->deleted_refs, list) {
4244             ret = dup_ref(cur, &check_dirs);
4245             if (ret < 0)
4246                 goto out;
4247         }
4248     } else if (S_ISDIR(sctx->cur_inode_mode) &&
4249            !list_empty(&sctx->deleted_refs)) {
4250         /*
4251          * We have a moved dir. Add the old parent to check_dirs
4252          */
4253         cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4254                 list);
4255         ret = dup_ref(cur, &check_dirs);
4256         if (ret < 0)
4257             goto out;
4258     } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4259         /*
4260          * We have a non dir inode. Go through all deleted refs and
4261          * unlink them if they were not already overwritten by other
4262          * inodes.
4263          */
4264         list_for_each_entry(cur, &sctx->deleted_refs, list) {
4265             ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4266                     sctx->cur_ino, sctx->cur_inode_gen,
4267                     cur->name, cur->name_len);
4268             if (ret < 0)
4269                 goto out;
4270             if (!ret) {
4271                 /*
4272                  * If we orphanized any ancestor before, we need
4273                  * to recompute the full path for deleted names,
4274                  * since any such path was computed before we
4275                  * processed any references and orphanized any
4276                  * ancestor inode.
4277                  */
4278                 if (orphanized_ancestor) {
4279                     ret = update_ref_path(sctx, cur);
4280                     if (ret < 0)
4281                         goto out;
4282                 }
4283                 ret = send_unlink(sctx, cur->full_path);
4284                 if (ret < 0)
4285                     goto out;
4286             }
4287             ret = dup_ref(cur, &check_dirs);
4288             if (ret < 0)
4289                 goto out;
4290         }
4291         /*
4292          * If the inode is still orphan, unlink the orphan. This may
4293          * happen when a previous inode did overwrite the first ref
4294          * of this inode and no new refs were added for the current
4295          * inode. Unlinking does not mean that the inode is deleted in
4296          * all cases. There may still be links to this inode in other
4297          * places.
4298          */
4299         if (is_orphan) {
4300             ret = send_unlink(sctx, valid_path);
4301             if (ret < 0)
4302                 goto out;
4303         }
4304     }
4305 
4306     /*
4307      * We did collect all parent dirs where cur_inode was once located. We
4308      * now go through all these dirs and check if they are pending for
4309      * deletion and if it's finally possible to perform the rmdir now.
4310      * We also update the inode stats of the parent dirs here.
4311      */
4312     list_for_each_entry(cur, &check_dirs, list) {
4313         /*
4314          * In case we had refs into dirs that were not processed yet,
4315          * we don't need to do the utime and rmdir logic for these dirs.
4316          * The dir will be processed later.
4317          */
4318         if (cur->dir > sctx->cur_ino)
4319             continue;
4320 
4321         ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4322         if (ret < 0)
4323             goto out;
4324 
4325         if (ret == inode_state_did_create ||
4326             ret == inode_state_no_change) {
4327             /* TODO delayed utimes */
4328             ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4329             if (ret < 0)
4330                 goto out;
4331         } else if (ret == inode_state_did_delete &&
4332                cur->dir != last_dir_ino_rm) {
4333             ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4334                     sctx->cur_ino);
4335             if (ret < 0)
4336                 goto out;
4337             if (ret) {
4338                 ret = get_cur_path(sctx, cur->dir,
4339                            cur->dir_gen, valid_path);
4340                 if (ret < 0)
4341                     goto out;
4342                 ret = send_rmdir(sctx, valid_path);
4343                 if (ret < 0)
4344                     goto out;
4345                 last_dir_ino_rm = cur->dir;
4346             }
4347         }
4348     }
4349 
4350     ret = 0;
4351 
4352 out:
4353     __free_recorded_refs(&check_dirs);
4354     free_recorded_refs(sctx);
4355     fs_path_free(valid_path);
4356     return ret;
4357 }
4358 
4359 static int rbtree_ref_comp(const void *k, const struct rb_node *node)
4360 {
4361     const struct recorded_ref *data = k;
4362     const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node);
4363     int result;
4364 
4365     if (data->dir > ref->dir)
4366         return 1;
4367     if (data->dir < ref->dir)
4368         return -1;
4369     if (data->dir_gen > ref->dir_gen)
4370         return 1;
4371     if (data->dir_gen < ref->dir_gen)
4372         return -1;
4373     if (data->name_len > ref->name_len)
4374         return 1;
4375     if (data->name_len < ref->name_len)
4376         return -1;
4377     result = strcmp(data->name, ref->name);
4378     if (result > 0)
4379         return 1;
4380     if (result < 0)
4381         return -1;
4382     return 0;
4383 }
4384 
4385 static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent)
4386 {
4387     const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node);
4388 
4389     return rbtree_ref_comp(entry, parent) < 0;
4390 }
4391 
4392 static int record_ref_in_tree(struct rb_root *root, struct list_head *refs,
4393                   struct fs_path *name, u64 dir, u64 dir_gen,
4394                   struct send_ctx *sctx)
4395 {
4396     int ret = 0;
4397     struct fs_path *path = NULL;
4398     struct recorded_ref *ref = NULL;
4399 
4400     path = fs_path_alloc();
4401     if (!path) {
4402         ret = -ENOMEM;
4403         goto out;
4404     }
4405 
4406     ref = recorded_ref_alloc();
4407     if (!ref) {
4408         ret = -ENOMEM;
4409         goto out;
4410     }
4411 
4412     ret = get_cur_path(sctx, dir, dir_gen, path);
4413     if (ret < 0)
4414         goto out;
4415     ret = fs_path_add_path(path, name);
4416     if (ret < 0)
4417         goto out;
4418 
4419     ref->dir = dir;
4420     ref->dir_gen = dir_gen;
4421     set_ref_path(ref, path);
4422     list_add_tail(&ref->list, refs);
4423     rb_add(&ref->node, root, rbtree_ref_less);
4424     ref->root = root;
4425 out:
4426     if (ret) {
4427         if (path && (!ref || !ref->full_path))
4428             fs_path_free(path);
4429         recorded_ref_free(ref);
4430     }
4431     return ret;
4432 }
4433 
4434 static int record_new_ref_if_needed(int num, u64 dir, int index,
4435                     struct fs_path *name, void *ctx)
4436 {
4437     int ret = 0;
4438     struct send_ctx *sctx = ctx;
4439     struct rb_node *node = NULL;
4440     struct recorded_ref data;
4441     struct recorded_ref *ref;
4442     u64 dir_gen;
4443 
4444     ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4445                  NULL, NULL, NULL, NULL);
4446     if (ret < 0)
4447         goto out;
4448 
4449     data.dir = dir;
4450     data.dir_gen = dir_gen;
4451     set_ref_path(&data, name);
4452     node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp);
4453     if (node) {
4454         ref = rb_entry(node, struct recorded_ref, node);
4455         recorded_ref_free(ref);
4456     } else {
4457         ret = record_ref_in_tree(&sctx->rbtree_new_refs,
4458                      &sctx->new_refs, name, dir, dir_gen,
4459                      sctx);
4460     }
4461 out:
4462     return ret;
4463 }
4464 
4465 static int record_deleted_ref_if_needed(int num, u64 dir, int index,
4466                     struct fs_path *name, void *ctx)
4467 {
4468     int ret = 0;
4469     struct send_ctx *sctx = ctx;
4470     struct rb_node *node = NULL;
4471     struct recorded_ref data;
4472     struct recorded_ref *ref;
4473     u64 dir_gen;
4474 
4475     ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4476                  NULL, NULL, NULL, NULL);
4477     if (ret < 0)
4478         goto out;
4479 
4480     data.dir = dir;
4481     data.dir_gen = dir_gen;
4482     set_ref_path(&data, name);
4483     node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp);
4484     if (node) {
4485         ref = rb_entry(node, struct recorded_ref, node);
4486         recorded_ref_free(ref);
4487     } else {
4488         ret = record_ref_in_tree(&sctx->rbtree_deleted_refs,
4489                      &sctx->deleted_refs, name, dir,
4490                      dir_gen, sctx);
4491     }
4492 out:
4493     return ret;
4494 }
4495 
4496 static int record_new_ref(struct send_ctx *sctx)
4497 {
4498     int ret;
4499 
4500     ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4501                 sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4502     if (ret < 0)
4503         goto out;
4504     ret = 0;
4505 
4506 out:
4507     return ret;
4508 }
4509 
4510 static int record_deleted_ref(struct send_ctx *sctx)
4511 {
4512     int ret;
4513 
4514     ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4515                 sctx->cmp_key, 0, record_deleted_ref_if_needed,
4516                 sctx);
4517     if (ret < 0)
4518         goto out;
4519     ret = 0;
4520 
4521 out:
4522     return ret;
4523 }
4524 
4525 static int record_changed_ref(struct send_ctx *sctx)
4526 {
4527     int ret = 0;
4528 
4529     ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4530             sctx->cmp_key, 0, record_new_ref_if_needed, sctx);
4531     if (ret < 0)
4532         goto out;
4533     ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4534             sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx);
4535     if (ret < 0)
4536         goto out;
4537     ret = 0;
4538 
4539 out:
4540     return ret;
4541 }
4542 
4543 /*
4544  * Record and process all refs at once. Needed when an inode changes the
4545  * generation number, which means that it was deleted and recreated.
4546  */
4547 static int process_all_refs(struct send_ctx *sctx,
4548                 enum btrfs_compare_tree_result cmd)
4549 {
4550     int ret = 0;
4551     int iter_ret = 0;
4552     struct btrfs_root *root;
4553     struct btrfs_path *path;
4554     struct btrfs_key key;
4555     struct btrfs_key found_key;
4556     iterate_inode_ref_t cb;
4557     int pending_move = 0;
4558 
4559     path = alloc_path_for_send();
4560     if (!path)
4561         return -ENOMEM;
4562 
4563     if (cmd == BTRFS_COMPARE_TREE_NEW) {
4564         root = sctx->send_root;
4565         cb = record_new_ref_if_needed;
4566     } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4567         root = sctx->parent_root;
4568         cb = record_deleted_ref_if_needed;
4569     } else {
4570         btrfs_err(sctx->send_root->fs_info,
4571                 "Wrong command %d in process_all_refs", cmd);
4572         ret = -EINVAL;
4573         goto out;
4574     }
4575 
4576     key.objectid = sctx->cmp_key->objectid;
4577     key.type = BTRFS_INODE_REF_KEY;
4578     key.offset = 0;
4579     btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
4580         if (found_key.objectid != key.objectid ||
4581             (found_key.type != BTRFS_INODE_REF_KEY &&
4582              found_key.type != BTRFS_INODE_EXTREF_KEY))
4583             break;
4584 
4585         ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4586         if (ret < 0)
4587             goto out;
4588     }
4589     /* Catch error found during iteration */
4590     if (iter_ret < 0) {
4591         ret = iter_ret;
4592         goto out;
4593     }
4594     btrfs_release_path(path);
4595 
4596     /*
4597      * We don't actually care about pending_move as we are simply
4598      * re-creating this inode and will be rename'ing it into place once we
4599      * rename the parent directory.
4600      */
4601     ret = process_recorded_refs(sctx, &pending_move);
4602 out:
4603     btrfs_free_path(path);
4604     return ret;
4605 }
4606 
4607 static int send_set_xattr(struct send_ctx *sctx,
4608               struct fs_path *path,
4609               const char *name, int name_len,
4610               const char *data, int data_len)
4611 {
4612     int ret = 0;
4613 
4614     ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4615     if (ret < 0)
4616         goto out;
4617 
4618     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4619     TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4620     TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4621 
4622     ret = send_cmd(sctx);
4623 
4624 tlv_put_failure:
4625 out:
4626     return ret;
4627 }
4628 
4629 static int send_remove_xattr(struct send_ctx *sctx,
4630               struct fs_path *path,
4631               const char *name, int name_len)
4632 {
4633     int ret = 0;
4634 
4635     ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4636     if (ret < 0)
4637         goto out;
4638 
4639     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4640     TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4641 
4642     ret = send_cmd(sctx);
4643 
4644 tlv_put_failure:
4645 out:
4646     return ret;
4647 }
4648 
4649 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4650                    const char *name, int name_len, const char *data,
4651                    int data_len, void *ctx)
4652 {
4653     int ret;
4654     struct send_ctx *sctx = ctx;
4655     struct fs_path *p;
4656     struct posix_acl_xattr_header dummy_acl;
4657 
4658     /* Capabilities are emitted by finish_inode_if_needed */
4659     if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4660         return 0;
4661 
4662     p = fs_path_alloc();
4663     if (!p)
4664         return -ENOMEM;
4665 
4666     /*
4667      * This hack is needed because empty acls are stored as zero byte
4668      * data in xattrs. Problem with that is, that receiving these zero byte
4669      * acls will fail later. To fix this, we send a dummy acl list that
4670      * only contains the version number and no entries.
4671      */
4672     if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4673         !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4674         if (data_len == 0) {
4675             dummy_acl.a_version =
4676                     cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4677             data = (char *)&dummy_acl;
4678             data_len = sizeof(dummy_acl);
4679         }
4680     }
4681 
4682     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4683     if (ret < 0)
4684         goto out;
4685 
4686     ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4687 
4688 out:
4689     fs_path_free(p);
4690     return ret;
4691 }
4692 
4693 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4694                    const char *name, int name_len,
4695                    const char *data, int data_len, void *ctx)
4696 {
4697     int ret;
4698     struct send_ctx *sctx = ctx;
4699     struct fs_path *p;
4700 
4701     p = fs_path_alloc();
4702     if (!p)
4703         return -ENOMEM;
4704 
4705     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4706     if (ret < 0)
4707         goto out;
4708 
4709     ret = send_remove_xattr(sctx, p, name, name_len);
4710 
4711 out:
4712     fs_path_free(p);
4713     return ret;
4714 }
4715 
4716 static int process_new_xattr(struct send_ctx *sctx)
4717 {
4718     int ret = 0;
4719 
4720     ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4721                    __process_new_xattr, sctx);
4722 
4723     return ret;
4724 }
4725 
4726 static int process_deleted_xattr(struct send_ctx *sctx)
4727 {
4728     return iterate_dir_item(sctx->parent_root, sctx->right_path,
4729                 __process_deleted_xattr, sctx);
4730 }
4731 
4732 struct find_xattr_ctx {
4733     const char *name;
4734     int name_len;
4735     int found_idx;
4736     char *found_data;
4737     int found_data_len;
4738 };
4739 
4740 static int __find_xattr(int num, struct btrfs_key *di_key, const char *name,
4741             int name_len, const char *data, int data_len, void *vctx)
4742 {
4743     struct find_xattr_ctx *ctx = vctx;
4744 
4745     if (name_len == ctx->name_len &&
4746         strncmp(name, ctx->name, name_len) == 0) {
4747         ctx->found_idx = num;
4748         ctx->found_data_len = data_len;
4749         ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4750         if (!ctx->found_data)
4751             return -ENOMEM;
4752         return 1;
4753     }
4754     return 0;
4755 }
4756 
4757 static int find_xattr(struct btrfs_root *root,
4758               struct btrfs_path *path,
4759               struct btrfs_key *key,
4760               const char *name, int name_len,
4761               char **data, int *data_len)
4762 {
4763     int ret;
4764     struct find_xattr_ctx ctx;
4765 
4766     ctx.name = name;
4767     ctx.name_len = name_len;
4768     ctx.found_idx = -1;
4769     ctx.found_data = NULL;
4770     ctx.found_data_len = 0;
4771 
4772     ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4773     if (ret < 0)
4774         return ret;
4775 
4776     if (ctx.found_idx == -1)
4777         return -ENOENT;
4778     if (data) {
4779         *data = ctx.found_data;
4780         *data_len = ctx.found_data_len;
4781     } else {
4782         kfree(ctx.found_data);
4783     }
4784     return ctx.found_idx;
4785 }
4786 
4787 
4788 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4789                        const char *name, int name_len,
4790                        const char *data, int data_len,
4791                        void *ctx)
4792 {
4793     int ret;
4794     struct send_ctx *sctx = ctx;
4795     char *found_data = NULL;
4796     int found_data_len  = 0;
4797 
4798     ret = find_xattr(sctx->parent_root, sctx->right_path,
4799              sctx->cmp_key, name, name_len, &found_data,
4800              &found_data_len);
4801     if (ret == -ENOENT) {
4802         ret = __process_new_xattr(num, di_key, name, name_len, data,
4803                       data_len, ctx);
4804     } else if (ret >= 0) {
4805         if (data_len != found_data_len ||
4806             memcmp(data, found_data, data_len)) {
4807             ret = __process_new_xattr(num, di_key, name, name_len,
4808                           data, data_len, ctx);
4809         } else {
4810             ret = 0;
4811         }
4812     }
4813 
4814     kfree(found_data);
4815     return ret;
4816 }
4817 
4818 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4819                        const char *name, int name_len,
4820                        const char *data, int data_len,
4821                        void *ctx)
4822 {
4823     int ret;
4824     struct send_ctx *sctx = ctx;
4825 
4826     ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4827              name, name_len, NULL, NULL);
4828     if (ret == -ENOENT)
4829         ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4830                           data_len, ctx);
4831     else if (ret >= 0)
4832         ret = 0;
4833 
4834     return ret;
4835 }
4836 
4837 static int process_changed_xattr(struct send_ctx *sctx)
4838 {
4839     int ret = 0;
4840 
4841     ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4842             __process_changed_new_xattr, sctx);
4843     if (ret < 0)
4844         goto out;
4845     ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4846             __process_changed_deleted_xattr, sctx);
4847 
4848 out:
4849     return ret;
4850 }
4851 
4852 static int process_all_new_xattrs(struct send_ctx *sctx)
4853 {
4854     int ret = 0;
4855     int iter_ret = 0;
4856     struct btrfs_root *root;
4857     struct btrfs_path *path;
4858     struct btrfs_key key;
4859     struct btrfs_key found_key;
4860 
4861     path = alloc_path_for_send();
4862     if (!path)
4863         return -ENOMEM;
4864 
4865     root = sctx->send_root;
4866 
4867     key.objectid = sctx->cmp_key->objectid;
4868     key.type = BTRFS_XATTR_ITEM_KEY;
4869     key.offset = 0;
4870     btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
4871         if (found_key.objectid != key.objectid ||
4872             found_key.type != key.type) {
4873             ret = 0;
4874             break;
4875         }
4876 
4877         ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4878         if (ret < 0)
4879             break;
4880     }
4881     /* Catch error found during iteration */
4882     if (iter_ret < 0)
4883         ret = iter_ret;
4884 
4885     btrfs_free_path(path);
4886     return ret;
4887 }
4888 
4889 static inline u64 max_send_read_size(const struct send_ctx *sctx)
4890 {
4891     return sctx->send_max_size - SZ_16K;
4892 }
4893 
4894 static int put_data_header(struct send_ctx *sctx, u32 len)
4895 {
4896     if (WARN_ON_ONCE(sctx->put_data))
4897         return -EINVAL;
4898     sctx->put_data = true;
4899     if (sctx->proto >= 2) {
4900         /*
4901          * Since v2, the data attribute header doesn't include a length,
4902          * it is implicitly to the end of the command.
4903          */
4904         if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len)
4905             return -EOVERFLOW;
4906         put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size);
4907         sctx->send_size += sizeof(__le16);
4908     } else {
4909         struct btrfs_tlv_header *hdr;
4910 
4911         if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
4912             return -EOVERFLOW;
4913         hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
4914         put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
4915         put_unaligned_le16(len, &hdr->tlv_len);
4916         sctx->send_size += sizeof(*hdr);
4917     }
4918     return 0;
4919 }
4920 
4921 static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4922 {
4923     struct btrfs_root *root = sctx->send_root;
4924     struct btrfs_fs_info *fs_info = root->fs_info;
4925     struct page *page;
4926     pgoff_t index = offset >> PAGE_SHIFT;
4927     pgoff_t last_index;
4928     unsigned pg_offset = offset_in_page(offset);
4929     int ret;
4930 
4931     ret = put_data_header(sctx, len);
4932     if (ret)
4933         return ret;
4934 
4935     last_index = (offset + len - 1) >> PAGE_SHIFT;
4936 
4937     while (index <= last_index) {
4938         unsigned cur_len = min_t(unsigned, len,
4939                      PAGE_SIZE - pg_offset);
4940 
4941         page = find_lock_page(sctx->cur_inode->i_mapping, index);
4942         if (!page) {
4943             page_cache_sync_readahead(sctx->cur_inode->i_mapping,
4944                           &sctx->ra, NULL, index,
4945                           last_index + 1 - index);
4946 
4947             page = find_or_create_page(sctx->cur_inode->i_mapping,
4948                            index, GFP_KERNEL);
4949             if (!page) {
4950                 ret = -ENOMEM;
4951                 break;
4952             }
4953         }
4954 
4955         if (PageReadahead(page))
4956             page_cache_async_readahead(sctx->cur_inode->i_mapping,
4957                            &sctx->ra, NULL, page_folio(page),
4958                            index, last_index + 1 - index);
4959 
4960         if (!PageUptodate(page)) {
4961             btrfs_read_folio(NULL, page_folio(page));
4962             lock_page(page);
4963             if (!PageUptodate(page)) {
4964                 unlock_page(page);
4965                 btrfs_err(fs_info,
4966             "send: IO error at offset %llu for inode %llu root %llu",
4967                     page_offset(page), sctx->cur_ino,
4968                     sctx->send_root->root_key.objectid);
4969                 put_page(page);
4970                 ret = -EIO;
4971                 break;
4972             }
4973         }
4974 
4975         memcpy_from_page(sctx->send_buf + sctx->send_size, page,
4976                  pg_offset, cur_len);
4977         unlock_page(page);
4978         put_page(page);
4979         index++;
4980         pg_offset = 0;
4981         len -= cur_len;
4982         sctx->send_size += cur_len;
4983     }
4984 
4985     return ret;
4986 }
4987 
4988 /*
4989  * Read some bytes from the current inode/file and send a write command to
4990  * user space.
4991  */
4992 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4993 {
4994     struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4995     int ret = 0;
4996     struct fs_path *p;
4997 
4998     p = fs_path_alloc();
4999     if (!p)
5000         return -ENOMEM;
5001 
5002     btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5003 
5004     ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5005     if (ret < 0)
5006         goto out;
5007 
5008     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5009     if (ret < 0)
5010         goto out;
5011 
5012     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5013     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5014     ret = put_file_data(sctx, offset, len);
5015     if (ret < 0)
5016         goto out;
5017 
5018     ret = send_cmd(sctx);
5019 
5020 tlv_put_failure:
5021 out:
5022     fs_path_free(p);
5023     return ret;
5024 }
5025 
5026 /*
5027  * Send a clone command to user space.
5028  */
5029 static int send_clone(struct send_ctx *sctx,
5030               u64 offset, u32 len,
5031               struct clone_root *clone_root)
5032 {
5033     int ret = 0;
5034     struct fs_path *p;
5035     u64 gen;
5036 
5037     btrfs_debug(sctx->send_root->fs_info,
5038             "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5039             offset, len, clone_root->root->root_key.objectid,
5040             clone_root->ino, clone_root->offset);
5041 
5042     p = fs_path_alloc();
5043     if (!p)
5044         return -ENOMEM;
5045 
5046     ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5047     if (ret < 0)
5048         goto out;
5049 
5050     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5051     if (ret < 0)
5052         goto out;
5053 
5054     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5055     TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5056     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5057 
5058     if (clone_root->root == sctx->send_root) {
5059         ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5060                 &gen, NULL, NULL, NULL, NULL, NULL);
5061         if (ret < 0)
5062             goto out;
5063         ret = get_cur_path(sctx, clone_root->ino, gen, p);
5064     } else {
5065         ret = get_inode_path(clone_root->root, clone_root->ino, p);
5066     }
5067     if (ret < 0)
5068         goto out;
5069 
5070     /*
5071      * If the parent we're using has a received_uuid set then use that as
5072      * our clone source as that is what we will look for when doing a
5073      * receive.
5074      *
5075      * This covers the case that we create a snapshot off of a received
5076      * subvolume and then use that as the parent and try to receive on a
5077      * different host.
5078      */
5079     if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5080         TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5081                  clone_root->root->root_item.received_uuid);
5082     else
5083         TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5084                  clone_root->root->root_item.uuid);
5085     TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5086             btrfs_root_ctransid(&clone_root->root->root_item));
5087     TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5088     TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5089             clone_root->offset);
5090 
5091     ret = send_cmd(sctx);
5092 
5093 tlv_put_failure:
5094 out:
5095     fs_path_free(p);
5096     return ret;
5097 }
5098 
5099 /*
5100  * Send an update extent command to user space.
5101  */
5102 static int send_update_extent(struct send_ctx *sctx,
5103                   u64 offset, u32 len)
5104 {
5105     int ret = 0;
5106     struct fs_path *p;
5107 
5108     p = fs_path_alloc();
5109     if (!p)
5110         return -ENOMEM;
5111 
5112     ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5113     if (ret < 0)
5114         goto out;
5115 
5116     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5117     if (ret < 0)
5118         goto out;
5119 
5120     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5121     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5122     TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5123 
5124     ret = send_cmd(sctx);
5125 
5126 tlv_put_failure:
5127 out:
5128     fs_path_free(p);
5129     return ret;
5130 }
5131 
5132 static int send_hole(struct send_ctx *sctx, u64 end)
5133 {
5134     struct fs_path *p = NULL;
5135     u64 read_size = max_send_read_size(sctx);
5136     u64 offset = sctx->cur_inode_last_extent;
5137     int ret = 0;
5138 
5139     /*
5140      * A hole that starts at EOF or beyond it. Since we do not yet support
5141      * fallocate (for extent preallocation and hole punching), sending a
5142      * write of zeroes starting at EOF or beyond would later require issuing
5143      * a truncate operation which would undo the write and achieve nothing.
5144      */
5145     if (offset >= sctx->cur_inode_size)
5146         return 0;
5147 
5148     /*
5149      * Don't go beyond the inode's i_size due to prealloc extents that start
5150      * after the i_size.
5151      */
5152     end = min_t(u64, end, sctx->cur_inode_size);
5153 
5154     if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5155         return send_update_extent(sctx, offset, end - offset);
5156 
5157     p = fs_path_alloc();
5158     if (!p)
5159         return -ENOMEM;
5160     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5161     if (ret < 0)
5162         goto tlv_put_failure;
5163     while (offset < end) {
5164         u64 len = min(end - offset, read_size);
5165 
5166         ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5167         if (ret < 0)
5168             break;
5169         TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5170         TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5171         ret = put_data_header(sctx, len);
5172         if (ret < 0)
5173             break;
5174         memset(sctx->send_buf + sctx->send_size, 0, len);
5175         sctx->send_size += len;
5176         ret = send_cmd(sctx);
5177         if (ret < 0)
5178             break;
5179         offset += len;
5180     }
5181     sctx->cur_inode_next_write_offset = offset;
5182 tlv_put_failure:
5183     fs_path_free(p);
5184     return ret;
5185 }
5186 
5187 static int send_encoded_inline_extent(struct send_ctx *sctx,
5188                       struct btrfs_path *path, u64 offset,
5189                       u64 len)
5190 {
5191     struct btrfs_root *root = sctx->send_root;
5192     struct btrfs_fs_info *fs_info = root->fs_info;
5193     struct inode *inode;
5194     struct fs_path *fspath;
5195     struct extent_buffer *leaf = path->nodes[0];
5196     struct btrfs_key key;
5197     struct btrfs_file_extent_item *ei;
5198     u64 ram_bytes;
5199     size_t inline_size;
5200     int ret;
5201 
5202     inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5203     if (IS_ERR(inode))
5204         return PTR_ERR(inode);
5205 
5206     fspath = fs_path_alloc();
5207     if (!fspath) {
5208         ret = -ENOMEM;
5209         goto out;
5210     }
5211 
5212     ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5213     if (ret < 0)
5214         goto out;
5215 
5216     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5217     if (ret < 0)
5218         goto out;
5219 
5220     btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5221     ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5222     ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei);
5223     inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]);
5224 
5225     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5226     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5227     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5228             min(key.offset + ram_bytes - offset, len));
5229     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes);
5230     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset);
5231     ret = btrfs_encoded_io_compression_from_extent(fs_info,
5232                 btrfs_file_extent_compression(leaf, ei));
5233     if (ret < 0)
5234         goto out;
5235     TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5236 
5237     ret = put_data_header(sctx, inline_size);
5238     if (ret < 0)
5239         goto out;
5240     read_extent_buffer(leaf, sctx->send_buf + sctx->send_size,
5241                btrfs_file_extent_inline_start(ei), inline_size);
5242     sctx->send_size += inline_size;
5243 
5244     ret = send_cmd(sctx);
5245 
5246 tlv_put_failure:
5247 out:
5248     fs_path_free(fspath);
5249     iput(inode);
5250     return ret;
5251 }
5252 
5253 static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
5254                    u64 offset, u64 len)
5255 {
5256     struct btrfs_root *root = sctx->send_root;
5257     struct btrfs_fs_info *fs_info = root->fs_info;
5258     struct inode *inode;
5259     struct fs_path *fspath;
5260     struct extent_buffer *leaf = path->nodes[0];
5261     struct btrfs_key key;
5262     struct btrfs_file_extent_item *ei;
5263     u64 disk_bytenr, disk_num_bytes;
5264     u32 data_offset;
5265     struct btrfs_cmd_header *hdr;
5266     u32 crc;
5267     int ret;
5268 
5269     inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
5270     if (IS_ERR(inode))
5271         return PTR_ERR(inode);
5272 
5273     fspath = fs_path_alloc();
5274     if (!fspath) {
5275         ret = -ENOMEM;
5276         goto out;
5277     }
5278 
5279     ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE);
5280     if (ret < 0)
5281         goto out;
5282 
5283     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5284     if (ret < 0)
5285         goto out;
5286 
5287     btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5288     ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
5289     disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
5290     disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei);
5291 
5292     TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath);
5293     TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5294     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN,
5295             min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset,
5296             len));
5297     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN,
5298             btrfs_file_extent_ram_bytes(leaf, ei));
5299     TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET,
5300             offset - key.offset + btrfs_file_extent_offset(leaf, ei));
5301     ret = btrfs_encoded_io_compression_from_extent(fs_info,
5302                 btrfs_file_extent_compression(leaf, ei));
5303     if (ret < 0)
5304         goto out;
5305     TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret);
5306     TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0);
5307 
5308     ret = put_data_header(sctx, disk_num_bytes);
5309     if (ret < 0)
5310         goto out;
5311 
5312     /*
5313      * We want to do I/O directly into the send buffer, so get the next page
5314      * boundary in the send buffer. This means that there may be a gap
5315      * between the beginning of the command and the file data.
5316      */
5317     data_offset = ALIGN(sctx->send_size, PAGE_SIZE);
5318     if (data_offset > sctx->send_max_size ||
5319         sctx->send_max_size - data_offset < disk_num_bytes) {
5320         ret = -EOVERFLOW;
5321         goto out;
5322     }
5323 
5324     /*
5325      * Note that send_buf is a mapping of send_buf_pages, so this is really
5326      * reading into send_buf.
5327      */
5328     ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
5329                             disk_bytenr, disk_num_bytes,
5330                             sctx->send_buf_pages +
5331                             (data_offset >> PAGE_SHIFT));
5332     if (ret)
5333         goto out;
5334 
5335     hdr = (struct btrfs_cmd_header *)sctx->send_buf;
5336     hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
5337     hdr->crc = 0;
5338     crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size);
5339     crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
5340     hdr->crc = cpu_to_le32(crc);
5341 
5342     ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
5343             &sctx->send_off);
5344     if (!ret) {
5345         ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset,
5346                 disk_num_bytes, &sctx->send_off);
5347     }
5348     sctx->send_size = 0;
5349     sctx->put_data = false;
5350 
5351 tlv_put_failure:
5352 out:
5353     fs_path_free(fspath);
5354     iput(inode);
5355     return ret;
5356 }
5357 
5358 static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path,
5359                 const u64 offset, const u64 len)
5360 {
5361     const u64 end = offset + len;
5362     struct extent_buffer *leaf = path->nodes[0];
5363     struct btrfs_file_extent_item *ei;
5364     u64 read_size = max_send_read_size(sctx);
5365     u64 sent = 0;
5366 
5367     if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5368         return send_update_extent(sctx, offset, len);
5369 
5370     ei = btrfs_item_ptr(leaf, path->slots[0],
5371                 struct btrfs_file_extent_item);
5372     if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) &&
5373         btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) {
5374         bool is_inline = (btrfs_file_extent_type(leaf, ei) ==
5375                   BTRFS_FILE_EXTENT_INLINE);
5376 
5377         /*
5378          * Send the compressed extent unless the compressed data is
5379          * larger than the decompressed data. This can happen if we're
5380          * not sending the entire extent, either because it has been
5381          * partially overwritten/truncated or because this is a part of
5382          * the extent that we couldn't clone in clone_range().
5383          */
5384         if (is_inline &&
5385             btrfs_file_extent_inline_item_len(leaf,
5386                               path->slots[0]) <= len) {
5387             return send_encoded_inline_extent(sctx, path, offset,
5388                               len);
5389         } else if (!is_inline &&
5390                btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) {
5391             return send_encoded_extent(sctx, path, offset, len);
5392         }
5393     }
5394 
5395     if (sctx->cur_inode == NULL) {
5396         struct btrfs_root *root = sctx->send_root;
5397 
5398         sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root);
5399         if (IS_ERR(sctx->cur_inode)) {
5400             int err = PTR_ERR(sctx->cur_inode);
5401 
5402             sctx->cur_inode = NULL;
5403             return err;
5404         }
5405         memset(&sctx->ra, 0, sizeof(struct file_ra_state));
5406         file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping);
5407 
5408         /*
5409          * It's very likely there are no pages from this inode in the page
5410          * cache, so after reading extents and sending their data, we clean
5411          * the page cache to avoid trashing the page cache (adding pressure
5412          * to the page cache and forcing eviction of other data more useful
5413          * for applications).
5414          *
5415          * We decide if we should clean the page cache simply by checking
5416          * if the inode's mapping nrpages is 0 when we first open it, and
5417          * not by using something like filemap_range_has_page() before
5418          * reading an extent because when we ask the readahead code to
5419          * read a given file range, it may (and almost always does) read
5420          * pages from beyond that range (see the documentation for
5421          * page_cache_sync_readahead()), so it would not be reliable,
5422          * because after reading the first extent future calls to
5423          * filemap_range_has_page() would return true because the readahead
5424          * on the previous extent resulted in reading pages of the current
5425          * extent as well.
5426          */
5427         sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0);
5428         sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE);
5429     }
5430 
5431     while (sent < len) {
5432         u64 size = min(len - sent, read_size);
5433         int ret;
5434 
5435         ret = send_write(sctx, offset + sent, size);
5436         if (ret < 0)
5437             return ret;
5438         sent += size;
5439     }
5440 
5441     if (sctx->clean_page_cache && IS_ALIGNED(end, PAGE_SIZE)) {
5442         /*
5443          * Always operate only on ranges that are a multiple of the page
5444          * size. This is not only to prevent zeroing parts of a page in
5445          * the case of subpage sector size, but also to guarantee we evict
5446          * pages, as passing a range that is smaller than page size does
5447          * not evict the respective page (only zeroes part of its content).
5448          *
5449          * Always start from the end offset of the last range cleared.
5450          * This is because the readahead code may (and very often does)
5451          * reads pages beyond the range we request for readahead. So if
5452          * we have an extent layout like this:
5453          *
5454          *            [ extent A ] [ extent B ] [ extent C ]
5455          *
5456          * When we ask page_cache_sync_readahead() to read extent A, it
5457          * may also trigger reads for pages of extent B. If we are doing
5458          * an incremental send and extent B has not changed between the
5459          * parent and send snapshots, some or all of its pages may end
5460          * up being read and placed in the page cache. So when truncating
5461          * the page cache we always start from the end offset of the
5462          * previously processed extent up to the end of the current
5463          * extent.
5464          */
5465         truncate_inode_pages_range(&sctx->cur_inode->i_data,
5466                        sctx->page_cache_clear_start,
5467                        end - 1);
5468         sctx->page_cache_clear_start = end;
5469     }
5470 
5471     return 0;
5472 }
5473 
5474 /*
5475  * Search for a capability xattr related to sctx->cur_ino. If the capability is
5476  * found, call send_set_xattr function to emit it.
5477  *
5478  * Return 0 if there isn't a capability, or when the capability was emitted
5479  * successfully, or < 0 if an error occurred.
5480  */
5481 static int send_capabilities(struct send_ctx *sctx)
5482 {
5483     struct fs_path *fspath = NULL;
5484     struct btrfs_path *path;
5485     struct btrfs_dir_item *di;
5486     struct extent_buffer *leaf;
5487     unsigned long data_ptr;
5488     char *buf = NULL;
5489     int buf_len;
5490     int ret = 0;
5491 
5492     path = alloc_path_for_send();
5493     if (!path)
5494         return -ENOMEM;
5495 
5496     di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5497                 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5498     if (!di) {
5499         /* There is no xattr for this inode */
5500         goto out;
5501     } else if (IS_ERR(di)) {
5502         ret = PTR_ERR(di);
5503         goto out;
5504     }
5505 
5506     leaf = path->nodes[0];
5507     buf_len = btrfs_dir_data_len(leaf, di);
5508 
5509     fspath = fs_path_alloc();
5510     buf = kmalloc(buf_len, GFP_KERNEL);
5511     if (!fspath || !buf) {
5512         ret = -ENOMEM;
5513         goto out;
5514     }
5515 
5516     ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5517     if (ret < 0)
5518         goto out;
5519 
5520     data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5521     read_extent_buffer(leaf, buf, data_ptr, buf_len);
5522 
5523     ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5524             strlen(XATTR_NAME_CAPS), buf, buf_len);
5525 out:
5526     kfree(buf);
5527     fs_path_free(fspath);
5528     btrfs_free_path(path);
5529     return ret;
5530 }
5531 
5532 static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path,
5533                struct clone_root *clone_root, const u64 disk_byte,
5534                u64 data_offset, u64 offset, u64 len)
5535 {
5536     struct btrfs_path *path;
5537     struct btrfs_key key;
5538     int ret;
5539     u64 clone_src_i_size = 0;
5540 
5541     /*
5542      * Prevent cloning from a zero offset with a length matching the sector
5543      * size because in some scenarios this will make the receiver fail.
5544      *
5545      * For example, if in the source filesystem the extent at offset 0
5546      * has a length of sectorsize and it was written using direct IO, then
5547      * it can never be an inline extent (even if compression is enabled).
5548      * Then this extent can be cloned in the original filesystem to a non
5549      * zero file offset, but it may not be possible to clone in the
5550      * destination filesystem because it can be inlined due to compression
5551      * on the destination filesystem (as the receiver's write operations are
5552      * always done using buffered IO). The same happens when the original
5553      * filesystem does not have compression enabled but the destination
5554      * filesystem has.
5555      */
5556     if (clone_root->offset == 0 &&
5557         len == sctx->send_root->fs_info->sectorsize)
5558         return send_extent_data(sctx, dst_path, offset, len);
5559 
5560     path = alloc_path_for_send();
5561     if (!path)
5562         return -ENOMEM;
5563 
5564     /*
5565      * There are inodes that have extents that lie behind its i_size. Don't
5566      * accept clones from these extents.
5567      */
5568     ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5569                    &clone_src_i_size, NULL, NULL, NULL, NULL, NULL,
5570                    NULL);
5571     btrfs_release_path(path);
5572     if (ret < 0)
5573         goto out;
5574 
5575     /*
5576      * We can't send a clone operation for the entire range if we find
5577      * extent items in the respective range in the source file that
5578      * refer to different extents or if we find holes.
5579      * So check for that and do a mix of clone and regular write/copy
5580      * operations if needed.
5581      *
5582      * Example:
5583      *
5584      * mkfs.btrfs -f /dev/sda
5585      * mount /dev/sda /mnt
5586      * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5587      * cp --reflink=always /mnt/foo /mnt/bar
5588      * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5589      * btrfs subvolume snapshot -r /mnt /mnt/snap
5590      *
5591      * If when we send the snapshot and we are processing file bar (which
5592      * has a higher inode number than foo) we blindly send a clone operation
5593      * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5594      * a file bar that matches the content of file foo - iow, doesn't match
5595      * the content from bar in the original filesystem.
5596      */
5597     key.objectid = clone_root->ino;
5598     key.type = BTRFS_EXTENT_DATA_KEY;
5599     key.offset = clone_root->offset;
5600     ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5601     if (ret < 0)
5602         goto out;
5603     if (ret > 0 && path->slots[0] > 0) {
5604         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5605         if (key.objectid == clone_root->ino &&
5606             key.type == BTRFS_EXTENT_DATA_KEY)
5607             path->slots[0]--;
5608     }
5609 
5610     while (true) {
5611         struct extent_buffer *leaf = path->nodes[0];
5612         int slot = path->slots[0];
5613         struct btrfs_file_extent_item *ei;
5614         u8 type;
5615         u64 ext_len;
5616         u64 clone_len;
5617         u64 clone_data_offset;
5618 
5619         if (slot >= btrfs_header_nritems(leaf)) {
5620             ret = btrfs_next_leaf(clone_root->root, path);
5621             if (ret < 0)
5622                 goto out;
5623             else if (ret > 0)
5624                 break;
5625             continue;
5626         }
5627 
5628         btrfs_item_key_to_cpu(leaf, &key, slot);
5629 
5630         /*
5631          * We might have an implicit trailing hole (NO_HOLES feature
5632          * enabled). We deal with it after leaving this loop.
5633          */
5634         if (key.objectid != clone_root->ino ||
5635             key.type != BTRFS_EXTENT_DATA_KEY)
5636             break;
5637 
5638         ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5639         type = btrfs_file_extent_type(leaf, ei);
5640         if (type == BTRFS_FILE_EXTENT_INLINE) {
5641             ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5642             ext_len = PAGE_ALIGN(ext_len);
5643         } else {
5644             ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5645         }
5646 
5647         if (key.offset + ext_len <= clone_root->offset)
5648             goto next;
5649 
5650         if (key.offset > clone_root->offset) {
5651             /* Implicit hole, NO_HOLES feature enabled. */
5652             u64 hole_len = key.offset - clone_root->offset;
5653 
5654             if (hole_len > len)
5655                 hole_len = len;
5656             ret = send_extent_data(sctx, dst_path, offset,
5657                            hole_len);
5658             if (ret < 0)
5659                 goto out;
5660 
5661             len -= hole_len;
5662             if (len == 0)
5663                 break;
5664             offset += hole_len;
5665             clone_root->offset += hole_len;
5666             data_offset += hole_len;
5667         }
5668 
5669         if (key.offset >= clone_root->offset + len)
5670             break;
5671 
5672         if (key.offset >= clone_src_i_size)
5673             break;
5674 
5675         if (key.offset + ext_len > clone_src_i_size)
5676             ext_len = clone_src_i_size - key.offset;
5677 
5678         clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5679         if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5680             clone_root->offset = key.offset;
5681             if (clone_data_offset < data_offset &&
5682                 clone_data_offset + ext_len > data_offset) {
5683                 u64 extent_offset;
5684 
5685                 extent_offset = data_offset - clone_data_offset;
5686                 ext_len -= extent_offset;
5687                 clone_data_offset += extent_offset;
5688                 clone_root->offset += extent_offset;
5689             }
5690         }
5691 
5692         clone_len = min_t(u64, ext_len, len);
5693 
5694         if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5695             clone_data_offset == data_offset) {
5696             const u64 src_end = clone_root->offset + clone_len;
5697             const u64 sectorsize = SZ_64K;
5698 
5699             /*
5700              * We can't clone the last block, when its size is not
5701              * sector size aligned, into the middle of a file. If we
5702              * do so, the receiver will get a failure (-EINVAL) when
5703              * trying to clone or will silently corrupt the data in
5704              * the destination file if it's on a kernel without the
5705              * fix introduced by commit ac765f83f1397646
5706              * ("Btrfs: fix data corruption due to cloning of eof
5707              * block).
5708              *
5709              * So issue a clone of the aligned down range plus a
5710              * regular write for the eof block, if we hit that case.
5711              *
5712              * Also, we use the maximum possible sector size, 64K,
5713              * because we don't know what's the sector size of the
5714              * filesystem that receives the stream, so we have to
5715              * assume the largest possible sector size.
5716              */
5717             if (src_end == clone_src_i_size &&
5718                 !IS_ALIGNED(src_end, sectorsize) &&
5719                 offset + clone_len < sctx->cur_inode_size) {
5720                 u64 slen;
5721 
5722                 slen = ALIGN_DOWN(src_end - clone_root->offset,
5723                           sectorsize);
5724                 if (slen > 0) {
5725                     ret = send_clone(sctx, offset, slen,
5726                              clone_root);
5727                     if (ret < 0)
5728                         goto out;
5729                 }
5730                 ret = send_extent_data(sctx, dst_path,
5731                                offset + slen,
5732                                clone_len - slen);
5733             } else {
5734                 ret = send_clone(sctx, offset, clone_len,
5735                          clone_root);
5736             }
5737         } else {
5738             ret = send_extent_data(sctx, dst_path, offset,
5739                            clone_len);
5740         }
5741 
5742         if (ret < 0)
5743             goto out;
5744 
5745         len -= clone_len;
5746         if (len == 0)
5747             break;
5748         offset += clone_len;
5749         clone_root->offset += clone_len;
5750 
5751         /*
5752          * If we are cloning from the file we are currently processing,
5753          * and using the send root as the clone root, we must stop once
5754          * the current clone offset reaches the current eof of the file
5755          * at the receiver, otherwise we would issue an invalid clone
5756          * operation (source range going beyond eof) and cause the
5757          * receiver to fail. So if we reach the current eof, bail out
5758          * and fallback to a regular write.
5759          */
5760         if (clone_root->root == sctx->send_root &&
5761             clone_root->ino == sctx->cur_ino &&
5762             clone_root->offset >= sctx->cur_inode_next_write_offset)
5763             break;
5764 
5765         data_offset += clone_len;
5766 next:
5767         path->slots[0]++;
5768     }
5769 
5770     if (len > 0)
5771         ret = send_extent_data(sctx, dst_path, offset, len);
5772     else
5773         ret = 0;
5774 out:
5775     btrfs_free_path(path);
5776     return ret;
5777 }
5778 
5779 static int send_write_or_clone(struct send_ctx *sctx,
5780                    struct btrfs_path *path,
5781                    struct btrfs_key *key,
5782                    struct clone_root *clone_root)
5783 {
5784     int ret = 0;
5785     u64 offset = key->offset;
5786     u64 end;
5787     u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5788 
5789     end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
5790     if (offset >= end)
5791         return 0;
5792 
5793     if (clone_root && IS_ALIGNED(end, bs)) {
5794         struct btrfs_file_extent_item *ei;
5795         u64 disk_byte;
5796         u64 data_offset;
5797 
5798         ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5799                     struct btrfs_file_extent_item);
5800         disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5801         data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5802         ret = clone_range(sctx, path, clone_root, disk_byte,
5803                   data_offset, offset, end - offset);
5804     } else {
5805         ret = send_extent_data(sctx, path, offset, end - offset);
5806     }
5807     sctx->cur_inode_next_write_offset = end;
5808     return ret;
5809 }
5810 
5811 static int is_extent_unchanged(struct send_ctx *sctx,
5812                    struct btrfs_path *left_path,
5813                    struct btrfs_key *ekey)
5814 {
5815     int ret = 0;
5816     struct btrfs_key key;
5817     struct btrfs_path *path = NULL;
5818     struct extent_buffer *eb;
5819     int slot;
5820     struct btrfs_key found_key;
5821     struct btrfs_file_extent_item *ei;
5822     u64 left_disknr;
5823     u64 right_disknr;
5824     u64 left_offset;
5825     u64 right_offset;
5826     u64 left_offset_fixed;
5827     u64 left_len;
5828     u64 right_len;
5829     u64 left_gen;
5830     u64 right_gen;
5831     u8 left_type;
5832     u8 right_type;
5833 
5834     path = alloc_path_for_send();
5835     if (!path)
5836         return -ENOMEM;
5837 
5838     eb = left_path->nodes[0];
5839     slot = left_path->slots[0];
5840     ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5841     left_type = btrfs_file_extent_type(eb, ei);
5842 
5843     if (left_type != BTRFS_FILE_EXTENT_REG) {
5844         ret = 0;
5845         goto out;
5846     }
5847     left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5848     left_len = btrfs_file_extent_num_bytes(eb, ei);
5849     left_offset = btrfs_file_extent_offset(eb, ei);
5850     left_gen = btrfs_file_extent_generation(eb, ei);
5851 
5852     /*
5853      * Following comments will refer to these graphics. L is the left
5854      * extents which we are checking at the moment. 1-8 are the right
5855      * extents that we iterate.
5856      *
5857      *       |-----L-----|
5858      * |-1-|-2a-|-3-|-4-|-5-|-6-|
5859      *
5860      *       |-----L-----|
5861      * |--1--|-2b-|...(same as above)
5862      *
5863      * Alternative situation. Happens on files where extents got split.
5864      *       |-----L-----|
5865      * |-----------7-----------|-6-|
5866      *
5867      * Alternative situation. Happens on files which got larger.
5868      *       |-----L-----|
5869      * |-8-|
5870      * Nothing follows after 8.
5871      */
5872 
5873     key.objectid = ekey->objectid;
5874     key.type = BTRFS_EXTENT_DATA_KEY;
5875     key.offset = ekey->offset;
5876     ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5877     if (ret < 0)
5878         goto out;
5879     if (ret) {
5880         ret = 0;
5881         goto out;
5882     }
5883 
5884     /*
5885      * Handle special case where the right side has no extents at all.
5886      */
5887     eb = path->nodes[0];
5888     slot = path->slots[0];
5889     btrfs_item_key_to_cpu(eb, &found_key, slot);
5890     if (found_key.objectid != key.objectid ||
5891         found_key.type != key.type) {
5892         /* If we're a hole then just pretend nothing changed */
5893         ret = (left_disknr) ? 0 : 1;
5894         goto out;
5895     }
5896 
5897     /*
5898      * We're now on 2a, 2b or 7.
5899      */
5900     key = found_key;
5901     while (key.offset < ekey->offset + left_len) {
5902         ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5903         right_type = btrfs_file_extent_type(eb, ei);
5904         if (right_type != BTRFS_FILE_EXTENT_REG &&
5905             right_type != BTRFS_FILE_EXTENT_INLINE) {
5906             ret = 0;
5907             goto out;
5908         }
5909 
5910         if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5911             right_len = btrfs_file_extent_ram_bytes(eb, ei);
5912             right_len = PAGE_ALIGN(right_len);
5913         } else {
5914             right_len = btrfs_file_extent_num_bytes(eb, ei);
5915         }
5916 
5917         /*
5918          * Are we at extent 8? If yes, we know the extent is changed.
5919          * This may only happen on the first iteration.
5920          */
5921         if (found_key.offset + right_len <= ekey->offset) {
5922             /* If we're a hole just pretend nothing changed */
5923             ret = (left_disknr) ? 0 : 1;
5924             goto out;
5925         }
5926 
5927         /*
5928          * We just wanted to see if when we have an inline extent, what
5929          * follows it is a regular extent (wanted to check the above
5930          * condition for inline extents too). This should normally not
5931          * happen but it's possible for example when we have an inline
5932          * compressed extent representing data with a size matching
5933          * the page size (currently the same as sector size).
5934          */
5935         if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5936             ret = 0;
5937             goto out;
5938         }
5939 
5940         right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5941         right_offset = btrfs_file_extent_offset(eb, ei);
5942         right_gen = btrfs_file_extent_generation(eb, ei);
5943 
5944         left_offset_fixed = left_offset;
5945         if (key.offset < ekey->offset) {
5946             /* Fix the right offset for 2a and 7. */
5947             right_offset += ekey->offset - key.offset;
5948         } else {
5949             /* Fix the left offset for all behind 2a and 2b */
5950             left_offset_fixed += key.offset - ekey->offset;
5951         }
5952 
5953         /*
5954          * Check if we have the same extent.
5955          */
5956         if (left_disknr != right_disknr ||
5957             left_offset_fixed != right_offset ||
5958             left_gen != right_gen) {
5959             ret = 0;
5960             goto out;
5961         }
5962 
5963         /*
5964          * Go to the next extent.
5965          */
5966         ret = btrfs_next_item(sctx->parent_root, path);
5967         if (ret < 0)
5968             goto out;
5969         if (!ret) {
5970             eb = path->nodes[0];
5971             slot = path->slots[0];
5972             btrfs_item_key_to_cpu(eb, &found_key, slot);
5973         }
5974         if (ret || found_key.objectid != key.objectid ||
5975             found_key.type != key.type) {
5976             key.offset += right_len;
5977             break;
5978         }
5979         if (found_key.offset != key.offset + right_len) {
5980             ret = 0;
5981             goto out;
5982         }
5983         key = found_key;
5984     }
5985 
5986     /*
5987      * We're now behind the left extent (treat as unchanged) or at the end
5988      * of the right side (treat as changed).
5989      */
5990     if (key.offset >= ekey->offset + left_len)
5991         ret = 1;
5992     else
5993         ret = 0;
5994 
5995 
5996 out:
5997     btrfs_free_path(path);
5998     return ret;
5999 }
6000 
6001 static int get_last_extent(struct send_ctx *sctx, u64 offset)
6002 {
6003     struct btrfs_path *path;
6004     struct btrfs_root *root = sctx->send_root;
6005     struct btrfs_key key;
6006     int ret;
6007 
6008     path = alloc_path_for_send();
6009     if (!path)
6010         return -ENOMEM;
6011 
6012     sctx->cur_inode_last_extent = 0;
6013 
6014     key.objectid = sctx->cur_ino;
6015     key.type = BTRFS_EXTENT_DATA_KEY;
6016     key.offset = offset;
6017     ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
6018     if (ret < 0)
6019         goto out;
6020     ret = 0;
6021     btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
6022     if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
6023         goto out;
6024 
6025     sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6026 out:
6027     btrfs_free_path(path);
6028     return ret;
6029 }
6030 
6031 static int range_is_hole_in_parent(struct send_ctx *sctx,
6032                    const u64 start,
6033                    const u64 end)
6034 {
6035     struct btrfs_path *path;
6036     struct btrfs_key key;
6037     struct btrfs_root *root = sctx->parent_root;
6038     u64 search_start = start;
6039     int ret;
6040 
6041     path = alloc_path_for_send();
6042     if (!path)
6043         return -ENOMEM;
6044 
6045     key.objectid = sctx->cur_ino;
6046     key.type = BTRFS_EXTENT_DATA_KEY;
6047     key.offset = search_start;
6048     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6049     if (ret < 0)
6050         goto out;
6051     if (ret > 0 && path->slots[0] > 0)
6052         path->slots[0]--;
6053 
6054     while (search_start < end) {
6055         struct extent_buffer *leaf = path->nodes[0];
6056         int slot = path->slots[0];
6057         struct btrfs_file_extent_item *fi;
6058         u64 extent_end;
6059 
6060         if (slot >= btrfs_header_nritems(leaf)) {
6061             ret = btrfs_next_leaf(root, path);
6062             if (ret < 0)
6063                 goto out;
6064             else if (ret > 0)
6065                 break;
6066             continue;
6067         }
6068 
6069         btrfs_item_key_to_cpu(leaf, &key, slot);
6070         if (key.objectid < sctx->cur_ino ||
6071             key.type < BTRFS_EXTENT_DATA_KEY)
6072             goto next;
6073         if (key.objectid > sctx->cur_ino ||
6074             key.type > BTRFS_EXTENT_DATA_KEY ||
6075             key.offset >= end)
6076             break;
6077 
6078         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
6079         extent_end = btrfs_file_extent_end(path);
6080         if (extent_end <= start)
6081             goto next;
6082         if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
6083             search_start = extent_end;
6084             goto next;
6085         }
6086         ret = 0;
6087         goto out;
6088 next:
6089         path->slots[0]++;
6090     }
6091     ret = 1;
6092 out:
6093     btrfs_free_path(path);
6094     return ret;
6095 }
6096 
6097 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
6098                struct btrfs_key *key)
6099 {
6100     int ret = 0;
6101 
6102     if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
6103         return 0;
6104 
6105     if (sctx->cur_inode_last_extent == (u64)-1) {
6106         ret = get_last_extent(sctx, key->offset - 1);
6107         if (ret)
6108             return ret;
6109     }
6110 
6111     if (path->slots[0] == 0 &&
6112         sctx->cur_inode_last_extent < key->offset) {
6113         /*
6114          * We might have skipped entire leafs that contained only
6115          * file extent items for our current inode. These leafs have
6116          * a generation number smaller (older) than the one in the
6117          * current leaf and the leaf our last extent came from, and
6118          * are located between these 2 leafs.
6119          */
6120         ret = get_last_extent(sctx, key->offset - 1);
6121         if (ret)
6122             return ret;
6123     }
6124 
6125     if (sctx->cur_inode_last_extent < key->offset) {
6126         ret = range_is_hole_in_parent(sctx,
6127                           sctx->cur_inode_last_extent,
6128                           key->offset);
6129         if (ret < 0)
6130             return ret;
6131         else if (ret == 0)
6132             ret = send_hole(sctx, key->offset);
6133         else
6134             ret = 0;
6135     }
6136     sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
6137     return ret;
6138 }
6139 
6140 static int process_extent(struct send_ctx *sctx,
6141               struct btrfs_path *path,
6142               struct btrfs_key *key)
6143 {
6144     struct clone_root *found_clone = NULL;
6145     int ret = 0;
6146 
6147     if (S_ISLNK(sctx->cur_inode_mode))
6148         return 0;
6149 
6150     if (sctx->parent_root && !sctx->cur_inode_new) {
6151         ret = is_extent_unchanged(sctx, path, key);
6152         if (ret < 0)
6153             goto out;
6154         if (ret) {
6155             ret = 0;
6156             goto out_hole;
6157         }
6158     } else {
6159         struct btrfs_file_extent_item *ei;
6160         u8 type;
6161 
6162         ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
6163                     struct btrfs_file_extent_item);
6164         type = btrfs_file_extent_type(path->nodes[0], ei);
6165         if (type == BTRFS_FILE_EXTENT_PREALLOC ||
6166             type == BTRFS_FILE_EXTENT_REG) {
6167             /*
6168              * The send spec does not have a prealloc command yet,
6169              * so just leave a hole for prealloc'ed extents until
6170              * we have enough commands queued up to justify rev'ing
6171              * the send spec.
6172              */
6173             if (type == BTRFS_FILE_EXTENT_PREALLOC) {
6174                 ret = 0;
6175                 goto out;
6176             }
6177 
6178             /* Have a hole, just skip it. */
6179             if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
6180                 ret = 0;
6181                 goto out;
6182             }
6183         }
6184     }
6185 
6186     ret = find_extent_clone(sctx, path, key->objectid, key->offset,
6187             sctx->cur_inode_size, &found_clone);
6188     if (ret != -ENOENT && ret < 0)
6189         goto out;
6190 
6191     ret = send_write_or_clone(sctx, path, key, found_clone);
6192     if (ret)
6193         goto out;
6194 out_hole:
6195     ret = maybe_send_hole(sctx, path, key);
6196 out:
6197     return ret;
6198 }
6199 
6200 static int process_all_extents(struct send_ctx *sctx)
6201 {
6202     int ret = 0;
6203     int iter_ret = 0;
6204     struct btrfs_root *root;
6205     struct btrfs_path *path;
6206     struct btrfs_key key;
6207     struct btrfs_key found_key;
6208 
6209     root = sctx->send_root;
6210     path = alloc_path_for_send();
6211     if (!path)
6212         return -ENOMEM;
6213 
6214     key.objectid = sctx->cmp_key->objectid;
6215     key.type = BTRFS_EXTENT_DATA_KEY;
6216     key.offset = 0;
6217     btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
6218         if (found_key.objectid != key.objectid ||
6219             found_key.type != key.type) {
6220             ret = 0;
6221             break;
6222         }
6223 
6224         ret = process_extent(sctx, path, &found_key);
6225         if (ret < 0)
6226             break;
6227     }
6228     /* Catch error found during iteration */
6229     if (iter_ret < 0)
6230         ret = iter_ret;
6231 
6232     btrfs_free_path(path);
6233     return ret;
6234 }
6235 
6236 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6237                        int *pending_move,
6238                        int *refs_processed)
6239 {
6240     int ret = 0;
6241 
6242     if (sctx->cur_ino == 0)
6243         goto out;
6244     if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6245         sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6246         goto out;
6247     if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6248         goto out;
6249 
6250     ret = process_recorded_refs(sctx, pending_move);
6251     if (ret < 0)
6252         goto out;
6253 
6254     *refs_processed = 1;
6255 out:
6256     return ret;
6257 }
6258 
6259 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6260 {
6261     int ret = 0;
6262     u64 left_mode;
6263     u64 left_uid;
6264     u64 left_gid;
6265     u64 left_fileattr;
6266     u64 right_mode;
6267     u64 right_uid;
6268     u64 right_gid;
6269     u64 right_fileattr;
6270     int need_chmod = 0;
6271     int need_chown = 0;
6272     bool need_fileattr = false;
6273     int need_truncate = 1;
6274     int pending_move = 0;
6275     int refs_processed = 0;
6276 
6277     if (sctx->ignore_cur_inode)
6278         return 0;
6279 
6280     ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6281                           &refs_processed);
6282     if (ret < 0)
6283         goto out;
6284 
6285     /*
6286      * We have processed the refs and thus need to advance send_progress.
6287      * Now, calls to get_cur_xxx will take the updated refs of the current
6288      * inode into account.
6289      *
6290      * On the other hand, if our current inode is a directory and couldn't
6291      * be moved/renamed because its parent was renamed/moved too and it has
6292      * a higher inode number, we can only move/rename our current inode
6293      * after we moved/renamed its parent. Therefore in this case operate on
6294      * the old path (pre move/rename) of our current inode, and the
6295      * move/rename will be performed later.
6296      */
6297     if (refs_processed && !pending_move)
6298         sctx->send_progress = sctx->cur_ino + 1;
6299 
6300     if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6301         goto out;
6302     if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6303         goto out;
6304 
6305     ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6306             &left_mode, &left_uid, &left_gid, NULL, &left_fileattr);
6307     if (ret < 0)
6308         goto out;
6309 
6310     if (!sctx->parent_root || sctx->cur_inode_new) {
6311         need_chown = 1;
6312         if (!S_ISLNK(sctx->cur_inode_mode))
6313             need_chmod = 1;
6314         if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6315             need_truncate = 0;
6316     } else {
6317         u64 old_size;
6318 
6319         ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6320                 &old_size, NULL, &right_mode, &right_uid,
6321                 &right_gid, NULL, &right_fileattr);
6322         if (ret < 0)
6323             goto out;
6324 
6325         if (left_uid != right_uid || left_gid != right_gid)
6326             need_chown = 1;
6327         if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6328             need_chmod = 1;
6329         if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr)
6330             need_fileattr = true;
6331         if ((old_size == sctx->cur_inode_size) ||
6332             (sctx->cur_inode_size > old_size &&
6333              sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6334             need_truncate = 0;
6335     }
6336 
6337     if (S_ISREG(sctx->cur_inode_mode)) {
6338         if (need_send_hole(sctx)) {
6339             if (sctx->cur_inode_last_extent == (u64)-1 ||
6340                 sctx->cur_inode_last_extent <
6341                 sctx->cur_inode_size) {
6342                 ret = get_last_extent(sctx, (u64)-1);
6343                 if (ret)
6344                     goto out;
6345             }
6346             if (sctx->cur_inode_last_extent <
6347                 sctx->cur_inode_size) {
6348                 ret = send_hole(sctx, sctx->cur_inode_size);
6349                 if (ret)
6350                     goto out;
6351             }
6352         }
6353         if (need_truncate) {
6354             ret = send_truncate(sctx, sctx->cur_ino,
6355                         sctx->cur_inode_gen,
6356                         sctx->cur_inode_size);
6357             if (ret < 0)
6358                 goto out;
6359         }
6360     }
6361 
6362     if (need_chown) {
6363         ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6364                 left_uid, left_gid);
6365         if (ret < 0)
6366             goto out;
6367     }
6368     if (need_chmod) {
6369         ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6370                 left_mode);
6371         if (ret < 0)
6372             goto out;
6373     }
6374     if (need_fileattr) {
6375         ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6376                     left_fileattr);
6377         if (ret < 0)
6378             goto out;
6379     }
6380 
6381     ret = send_capabilities(sctx);
6382     if (ret < 0)
6383         goto out;
6384 
6385     /*
6386      * If other directory inodes depended on our current directory
6387      * inode's move/rename, now do their move/rename operations.
6388      */
6389     if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6390         ret = apply_children_dir_moves(sctx);
6391         if (ret)
6392             goto out;
6393         /*
6394          * Need to send that every time, no matter if it actually
6395          * changed between the two trees as we have done changes to
6396          * the inode before. If our inode is a directory and it's
6397          * waiting to be moved/renamed, we will send its utimes when
6398          * it's moved/renamed, therefore we don't need to do it here.
6399          */
6400         sctx->send_progress = sctx->cur_ino + 1;
6401         ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6402         if (ret < 0)
6403             goto out;
6404     }
6405 
6406 out:
6407     return ret;
6408 }
6409 
6410 struct parent_paths_ctx {
6411     struct list_head *refs;
6412     struct send_ctx *sctx;
6413 };
6414 
6415 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6416                  void *ctx)
6417 {
6418     struct parent_paths_ctx *ppctx = ctx;
6419 
6420     /*
6421      * Pass 0 as the generation for the directory, we don't care about it
6422      * here as we have no new references to add, we just want to delete all
6423      * references for an inode.
6424      */
6425     return record_ref_in_tree(&ppctx->sctx->rbtree_deleted_refs, ppctx->refs,
6426                   name, dir, 0, ppctx->sctx);
6427 }
6428 
6429 /*
6430  * Issue unlink operations for all paths of the current inode found in the
6431  * parent snapshot.
6432  */
6433 static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6434 {
6435     LIST_HEAD(deleted_refs);
6436     struct btrfs_path *path;
6437     struct btrfs_root *root = sctx->parent_root;
6438     struct btrfs_key key;
6439     struct btrfs_key found_key;
6440     struct parent_paths_ctx ctx;
6441     int iter_ret = 0;
6442     int ret;
6443 
6444     path = alloc_path_for_send();
6445     if (!path)
6446         return -ENOMEM;
6447 
6448     key.objectid = sctx->cur_ino;
6449     key.type = BTRFS_INODE_REF_KEY;
6450     key.offset = 0;
6451 
6452     ctx.refs = &deleted_refs;
6453     ctx.sctx = sctx;
6454 
6455     btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
6456         if (found_key.objectid != key.objectid)
6457             break;
6458         if (found_key.type != key.type &&
6459             found_key.type != BTRFS_INODE_EXTREF_KEY)
6460             break;
6461 
6462         ret = iterate_inode_ref(root, path, &found_key, 1,
6463                     record_parent_ref, &ctx);
6464         if (ret < 0)
6465             goto out;
6466     }
6467     /* Catch error found during iteration */
6468     if (iter_ret < 0) {
6469         ret = iter_ret;
6470         goto out;
6471     }
6472 
6473     while (!list_empty(&deleted_refs)) {
6474         struct recorded_ref *ref;
6475 
6476         ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6477         ret = send_unlink(sctx, ref->full_path);
6478         if (ret < 0)
6479             goto out;
6480         recorded_ref_free(ref);
6481     }
6482     ret = 0;
6483 out:
6484     btrfs_free_path(path);
6485     if (ret)
6486         __free_recorded_refs(&deleted_refs);
6487     return ret;
6488 }
6489 
6490 static void close_current_inode(struct send_ctx *sctx)
6491 {
6492     u64 i_size;
6493 
6494     if (sctx->cur_inode == NULL)
6495         return;
6496 
6497     i_size = i_size_read(sctx->cur_inode);
6498 
6499     /*
6500      * If we are doing an incremental send, we may have extents between the
6501      * last processed extent and the i_size that have not been processed
6502      * because they haven't changed but we may have read some of their pages
6503      * through readahead, see the comments at send_extent_data().
6504      */
6505     if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size)
6506         truncate_inode_pages_range(&sctx->cur_inode->i_data,
6507                        sctx->page_cache_clear_start,
6508                        round_up(i_size, PAGE_SIZE) - 1);
6509 
6510     iput(sctx->cur_inode);
6511     sctx->cur_inode = NULL;
6512 }
6513 
6514 static int changed_inode(struct send_ctx *sctx,
6515              enum btrfs_compare_tree_result result)
6516 {
6517     int ret = 0;
6518     struct btrfs_key *key = sctx->cmp_key;
6519     struct btrfs_inode_item *left_ii = NULL;
6520     struct btrfs_inode_item *right_ii = NULL;
6521     u64 left_gen = 0;
6522     u64 right_gen = 0;
6523 
6524     close_current_inode(sctx);
6525 
6526     sctx->cur_ino = key->objectid;
6527     sctx->cur_inode_new_gen = false;
6528     sctx->cur_inode_last_extent = (u64)-1;
6529     sctx->cur_inode_next_write_offset = 0;
6530     sctx->ignore_cur_inode = false;
6531 
6532     /*
6533      * Set send_progress to current inode. This will tell all get_cur_xxx
6534      * functions that the current inode's refs are not updated yet. Later,
6535      * when process_recorded_refs is finished, it is set to cur_ino + 1.
6536      */
6537     sctx->send_progress = sctx->cur_ino;
6538 
6539     if (result == BTRFS_COMPARE_TREE_NEW ||
6540         result == BTRFS_COMPARE_TREE_CHANGED) {
6541         left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6542                 sctx->left_path->slots[0],
6543                 struct btrfs_inode_item);
6544         left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6545                 left_ii);
6546     } else {
6547         right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6548                 sctx->right_path->slots[0],
6549                 struct btrfs_inode_item);
6550         right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6551                 right_ii);
6552     }
6553     if (result == BTRFS_COMPARE_TREE_CHANGED) {
6554         right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6555                 sctx->right_path->slots[0],
6556                 struct btrfs_inode_item);
6557 
6558         right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6559                 right_ii);
6560 
6561         /*
6562          * The cur_ino = root dir case is special here. We can't treat
6563          * the inode as deleted+reused because it would generate a
6564          * stream that tries to delete/mkdir the root dir.
6565          */
6566         if (left_gen != right_gen &&
6567             sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6568             sctx->cur_inode_new_gen = true;
6569     }
6570 
6571     /*
6572      * Normally we do not find inodes with a link count of zero (orphans)
6573      * because the most common case is to create a snapshot and use it
6574      * for a send operation. However other less common use cases involve
6575      * using a subvolume and send it after turning it to RO mode just
6576      * after deleting all hard links of a file while holding an open
6577      * file descriptor against it or turning a RO snapshot into RW mode,
6578      * keep an open file descriptor against a file, delete it and then
6579      * turn the snapshot back to RO mode before using it for a send
6580      * operation. So if we find such cases, ignore the inode and all its
6581      * items completely if it's a new inode, or if it's a changed inode
6582      * make sure all its previous paths (from the parent snapshot) are all
6583      * unlinked and all other the inode items are ignored.
6584      */
6585     if (result == BTRFS_COMPARE_TREE_NEW ||
6586         result == BTRFS_COMPARE_TREE_CHANGED) {
6587         u32 nlinks;
6588 
6589         nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6590         if (nlinks == 0) {
6591             sctx->ignore_cur_inode = true;
6592             if (result == BTRFS_COMPARE_TREE_CHANGED)
6593                 ret = btrfs_unlink_all_paths(sctx);
6594             goto out;
6595         }
6596     }
6597 
6598     if (result == BTRFS_COMPARE_TREE_NEW) {
6599         sctx->cur_inode_gen = left_gen;
6600         sctx->cur_inode_new = true;
6601         sctx->cur_inode_deleted = false;
6602         sctx->cur_inode_size = btrfs_inode_size(
6603                 sctx->left_path->nodes[0], left_ii);
6604         sctx->cur_inode_mode = btrfs_inode_mode(
6605                 sctx->left_path->nodes[0], left_ii);
6606         sctx->cur_inode_rdev = btrfs_inode_rdev(
6607                 sctx->left_path->nodes[0], left_ii);
6608         if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6609             ret = send_create_inode_if_needed(sctx);
6610     } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6611         sctx->cur_inode_gen = right_gen;
6612         sctx->cur_inode_new = false;
6613         sctx->cur_inode_deleted = true;
6614         sctx->cur_inode_size = btrfs_inode_size(
6615                 sctx->right_path->nodes[0], right_ii);
6616         sctx->cur_inode_mode = btrfs_inode_mode(
6617                 sctx->right_path->nodes[0], right_ii);
6618     } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6619         /*
6620          * We need to do some special handling in case the inode was
6621          * reported as changed with a changed generation number. This
6622          * means that the original inode was deleted and new inode
6623          * reused the same inum. So we have to treat the old inode as
6624          * deleted and the new one as new.
6625          */
6626         if (sctx->cur_inode_new_gen) {
6627             /*
6628              * First, process the inode as if it was deleted.
6629              */
6630             sctx->cur_inode_gen = right_gen;
6631             sctx->cur_inode_new = false;
6632             sctx->cur_inode_deleted = true;
6633             sctx->cur_inode_size = btrfs_inode_size(
6634                     sctx->right_path->nodes[0], right_ii);
6635             sctx->cur_inode_mode = btrfs_inode_mode(
6636                     sctx->right_path->nodes[0], right_ii);
6637             ret = process_all_refs(sctx,
6638                     BTRFS_COMPARE_TREE_DELETED);
6639             if (ret < 0)
6640                 goto out;
6641 
6642             /*
6643              * Now process the inode as if it was new.
6644              */
6645             sctx->cur_inode_gen = left_gen;
6646             sctx->cur_inode_new = true;
6647             sctx->cur_inode_deleted = false;
6648             sctx->cur_inode_size = btrfs_inode_size(
6649                     sctx->left_path->nodes[0], left_ii);
6650             sctx->cur_inode_mode = btrfs_inode_mode(
6651                     sctx->left_path->nodes[0], left_ii);
6652             sctx->cur_inode_rdev = btrfs_inode_rdev(
6653                     sctx->left_path->nodes[0], left_ii);
6654             ret = send_create_inode_if_needed(sctx);
6655             if (ret < 0)
6656                 goto out;
6657 
6658             ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6659             if (ret < 0)
6660                 goto out;
6661             /*
6662              * Advance send_progress now as we did not get into
6663              * process_recorded_refs_if_needed in the new_gen case.
6664              */
6665             sctx->send_progress = sctx->cur_ino + 1;
6666 
6667             /*
6668              * Now process all extents and xattrs of the inode as if
6669              * they were all new.
6670              */
6671             ret = process_all_extents(sctx);
6672             if (ret < 0)
6673                 goto out;
6674             ret = process_all_new_xattrs(sctx);
6675             if (ret < 0)
6676                 goto out;
6677         } else {
6678             sctx->cur_inode_gen = left_gen;
6679             sctx->cur_inode_new = false;
6680             sctx->cur_inode_new_gen = false;
6681             sctx->cur_inode_deleted = false;
6682             sctx->cur_inode_size = btrfs_inode_size(
6683                     sctx->left_path->nodes[0], left_ii);
6684             sctx->cur_inode_mode = btrfs_inode_mode(
6685                     sctx->left_path->nodes[0], left_ii);
6686         }
6687     }
6688 
6689 out:
6690     return ret;
6691 }
6692 
6693 /*
6694  * We have to process new refs before deleted refs, but compare_trees gives us
6695  * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6696  * first and later process them in process_recorded_refs.
6697  * For the cur_inode_new_gen case, we skip recording completely because
6698  * changed_inode did already initiate processing of refs. The reason for this is
6699  * that in this case, compare_tree actually compares the refs of 2 different
6700  * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6701  * refs of the right tree as deleted and all refs of the left tree as new.
6702  */
6703 static int changed_ref(struct send_ctx *sctx,
6704                enum btrfs_compare_tree_result result)
6705 {
6706     int ret = 0;
6707 
6708     if (sctx->cur_ino != sctx->cmp_key->objectid) {
6709         inconsistent_snapshot_error(sctx, result, "reference");
6710         return -EIO;
6711     }
6712 
6713     if (!sctx->cur_inode_new_gen &&
6714         sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6715         if (result == BTRFS_COMPARE_TREE_NEW)
6716             ret = record_new_ref(sctx);
6717         else if (result == BTRFS_COMPARE_TREE_DELETED)
6718             ret = record_deleted_ref(sctx);
6719         else if (result == BTRFS_COMPARE_TREE_CHANGED)
6720             ret = record_changed_ref(sctx);
6721     }
6722 
6723     return ret;
6724 }
6725 
6726 /*
6727  * Process new/deleted/changed xattrs. We skip processing in the
6728  * cur_inode_new_gen case because changed_inode did already initiate processing
6729  * of xattrs. The reason is the same as in changed_ref
6730  */
6731 static int changed_xattr(struct send_ctx *sctx,
6732              enum btrfs_compare_tree_result result)
6733 {
6734     int ret = 0;
6735 
6736     if (sctx->cur_ino != sctx->cmp_key->objectid) {
6737         inconsistent_snapshot_error(sctx, result, "xattr");
6738         return -EIO;
6739     }
6740 
6741     if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6742         if (result == BTRFS_COMPARE_TREE_NEW)
6743             ret = process_new_xattr(sctx);
6744         else if (result == BTRFS_COMPARE_TREE_DELETED)
6745             ret = process_deleted_xattr(sctx);
6746         else if (result == BTRFS_COMPARE_TREE_CHANGED)
6747             ret = process_changed_xattr(sctx);
6748     }
6749 
6750     return ret;
6751 }
6752 
6753 /*
6754  * Process new/deleted/changed extents. We skip processing in the
6755  * cur_inode_new_gen case because changed_inode did already initiate processing
6756  * of extents. The reason is the same as in changed_ref
6757  */
6758 static int changed_extent(struct send_ctx *sctx,
6759               enum btrfs_compare_tree_result result)
6760 {
6761     int ret = 0;
6762 
6763     /*
6764      * We have found an extent item that changed without the inode item
6765      * having changed. This can happen either after relocation (where the
6766      * disk_bytenr of an extent item is replaced at
6767      * relocation.c:replace_file_extents()) or after deduplication into a
6768      * file in both the parent and send snapshots (where an extent item can
6769      * get modified or replaced with a new one). Note that deduplication
6770      * updates the inode item, but it only changes the iversion (sequence
6771      * field in the inode item) of the inode, so if a file is deduplicated
6772      * the same amount of times in both the parent and send snapshots, its
6773      * iversion becomes the same in both snapshots, whence the inode item is
6774      * the same on both snapshots.
6775      */
6776     if (sctx->cur_ino != sctx->cmp_key->objectid)
6777         return 0;
6778 
6779     if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6780         if (result != BTRFS_COMPARE_TREE_DELETED)
6781             ret = process_extent(sctx, sctx->left_path,
6782                     sctx->cmp_key);
6783     }
6784 
6785     return ret;
6786 }
6787 
6788 static int dir_changed(struct send_ctx *sctx, u64 dir)
6789 {
6790     u64 orig_gen, new_gen;
6791     int ret;
6792 
6793     ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6794                  NULL, NULL, NULL);
6795     if (ret)
6796         return ret;
6797 
6798     ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6799                  NULL, NULL, NULL, NULL);
6800     if (ret)
6801         return ret;
6802 
6803     return (orig_gen != new_gen) ? 1 : 0;
6804 }
6805 
6806 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6807             struct btrfs_key *key)
6808 {
6809     struct btrfs_inode_extref *extref;
6810     struct extent_buffer *leaf;
6811     u64 dirid = 0, last_dirid = 0;
6812     unsigned long ptr;
6813     u32 item_size;
6814     u32 cur_offset = 0;
6815     int ref_name_len;
6816     int ret = 0;
6817 
6818     /* Easy case, just check this one dirid */
6819     if (key->type == BTRFS_INODE_REF_KEY) {
6820         dirid = key->offset;
6821 
6822         ret = dir_changed(sctx, dirid);
6823         goto out;
6824     }
6825 
6826     leaf = path->nodes[0];
6827     item_size = btrfs_item_size(leaf, path->slots[0]);
6828     ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6829     while (cur_offset < item_size) {
6830         extref = (struct btrfs_inode_extref *)(ptr +
6831                                cur_offset);
6832         dirid = btrfs_inode_extref_parent(leaf, extref);
6833         ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6834         cur_offset += ref_name_len + sizeof(*extref);
6835         if (dirid == last_dirid)
6836             continue;
6837         ret = dir_changed(sctx, dirid);
6838         if (ret)
6839             break;
6840         last_dirid = dirid;
6841     }
6842 out:
6843     return ret;
6844 }
6845 
6846 /*
6847  * Updates compare related fields in sctx and simply forwards to the actual
6848  * changed_xxx functions.
6849  */
6850 static int changed_cb(struct btrfs_path *left_path,
6851               struct btrfs_path *right_path,
6852               struct btrfs_key *key,
6853               enum btrfs_compare_tree_result result,
6854               struct send_ctx *sctx)
6855 {
6856     int ret = 0;
6857 
6858     /*
6859      * We can not hold the commit root semaphore here. This is because in
6860      * the case of sending and receiving to the same filesystem, using a
6861      * pipe, could result in a deadlock:
6862      *
6863      * 1) The task running send blocks on the pipe because it's full;
6864      *
6865      * 2) The task running receive, which is the only consumer of the pipe,
6866      *    is waiting for a transaction commit (for example due to a space
6867      *    reservation when doing a write or triggering a transaction commit
6868      *    when creating a subvolume);
6869      *
6870      * 3) The transaction is waiting to write lock the commit root semaphore,
6871      *    but can not acquire it since it's being held at 1).
6872      *
6873      * Down this call chain we write to the pipe through kernel_write().
6874      * The same type of problem can also happen when sending to a file that
6875      * is stored in the same filesystem - when reserving space for a write
6876      * into the file, we can trigger a transaction commit.
6877      *
6878      * Our caller has supplied us with clones of leaves from the send and
6879      * parent roots, so we're safe here from a concurrent relocation and
6880      * further reallocation of metadata extents while we are here. Below we
6881      * also assert that the leaves are clones.
6882      */
6883     lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem);
6884 
6885     /*
6886      * We always have a send root, so left_path is never NULL. We will not
6887      * have a leaf when we have reached the end of the send root but have
6888      * not yet reached the end of the parent root.
6889      */
6890     if (left_path->nodes[0])
6891         ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6892                 &left_path->nodes[0]->bflags));
6893     /*
6894      * When doing a full send we don't have a parent root, so right_path is
6895      * NULL. When doing an incremental send, we may have reached the end of
6896      * the parent root already, so we don't have a leaf at right_path.
6897      */
6898     if (right_path && right_path->nodes[0])
6899         ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED,
6900                 &right_path->nodes[0]->bflags));
6901 
6902     if (result == BTRFS_COMPARE_TREE_SAME) {
6903         if (key->type == BTRFS_INODE_REF_KEY ||
6904             key->type == BTRFS_INODE_EXTREF_KEY) {
6905             ret = compare_refs(sctx, left_path, key);
6906             if (!ret)
6907                 return 0;
6908             if (ret < 0)
6909                 return ret;
6910         } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6911             return maybe_send_hole(sctx, left_path, key);
6912         } else {
6913             return 0;
6914         }
6915         result = BTRFS_COMPARE_TREE_CHANGED;
6916         ret = 0;
6917     }
6918 
6919     sctx->left_path = left_path;
6920     sctx->right_path = right_path;
6921     sctx->cmp_key = key;
6922 
6923     ret = finish_inode_if_needed(sctx, 0);
6924     if (ret < 0)
6925         goto out;
6926 
6927     /* Ignore non-FS objects */
6928     if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6929         key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6930         goto out;
6931 
6932     if (key->type == BTRFS_INODE_ITEM_KEY) {
6933         ret = changed_inode(sctx, result);
6934     } else if (!sctx->ignore_cur_inode) {
6935         if (key->type == BTRFS_INODE_REF_KEY ||
6936             key->type == BTRFS_INODE_EXTREF_KEY)
6937             ret = changed_ref(sctx, result);
6938         else if (key->type == BTRFS_XATTR_ITEM_KEY)
6939             ret = changed_xattr(sctx, result);
6940         else if (key->type == BTRFS_EXTENT_DATA_KEY)
6941             ret = changed_extent(sctx, result);
6942     }
6943 
6944 out:
6945     return ret;
6946 }
6947 
6948 static int search_key_again(const struct send_ctx *sctx,
6949                 struct btrfs_root *root,
6950                 struct btrfs_path *path,
6951                 const struct btrfs_key *key)
6952 {
6953     int ret;
6954 
6955     if (!path->need_commit_sem)
6956         lockdep_assert_held_read(&root->fs_info->commit_root_sem);
6957 
6958     /*
6959      * Roots used for send operations are readonly and no one can add,
6960      * update or remove keys from them, so we should be able to find our
6961      * key again. The only exception is deduplication, which can operate on
6962      * readonly roots and add, update or remove keys to/from them - but at
6963      * the moment we don't allow it to run in parallel with send.
6964      */
6965     ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6966     ASSERT(ret <= 0);
6967     if (ret > 0) {
6968         btrfs_print_tree(path->nodes[path->lowest_level], false);
6969         btrfs_err(root->fs_info,
6970 "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
6971               key->objectid, key->type, key->offset,
6972               (root == sctx->parent_root ? "parent" : "send"),
6973               root->root_key.objectid, path->lowest_level,
6974               path->slots[path->lowest_level]);
6975         return -EUCLEAN;
6976     }
6977 
6978     return ret;
6979 }
6980 
6981 static int full_send_tree(struct send_ctx *sctx)
6982 {
6983     int ret;
6984     struct btrfs_root *send_root = sctx->send_root;
6985     struct btrfs_key key;
6986     struct btrfs_fs_info *fs_info = send_root->fs_info;
6987     struct btrfs_path *path;
6988 
6989     path = alloc_path_for_send();
6990     if (!path)
6991         return -ENOMEM;
6992     path->reada = READA_FORWARD_ALWAYS;
6993 
6994     key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6995     key.type = BTRFS_INODE_ITEM_KEY;
6996     key.offset = 0;
6997 
6998     down_read(&fs_info->commit_root_sem);
6999     sctx->last_reloc_trans = fs_info->last_reloc_trans;
7000     up_read(&fs_info->commit_root_sem);
7001 
7002     ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
7003     if (ret < 0)
7004         goto out;
7005     if (ret)
7006         goto out_finish;
7007 
7008     while (1) {
7009         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
7010 
7011         ret = changed_cb(path, NULL, &key,
7012                  BTRFS_COMPARE_TREE_NEW, sctx);
7013         if (ret < 0)
7014             goto out;
7015 
7016         down_read(&fs_info->commit_root_sem);
7017         if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7018             sctx->last_reloc_trans = fs_info->last_reloc_trans;
7019             up_read(&fs_info->commit_root_sem);
7020             /*
7021              * A transaction used for relocating a block group was
7022              * committed or is about to finish its commit. Release
7023              * our path (leaf) and restart the search, so that we
7024              * avoid operating on any file extent items that are
7025              * stale, with a disk_bytenr that reflects a pre
7026              * relocation value. This way we avoid as much as
7027              * possible to fallback to regular writes when checking
7028              * if we can clone file ranges.
7029              */
7030             btrfs_release_path(path);
7031             ret = search_key_again(sctx, send_root, path, &key);
7032             if (ret < 0)
7033                 goto out;
7034         } else {
7035             up_read(&fs_info->commit_root_sem);
7036         }
7037 
7038         ret = btrfs_next_item(send_root, path);
7039         if (ret < 0)
7040             goto out;
7041         if (ret) {
7042             ret  = 0;
7043             break;
7044         }
7045     }
7046 
7047 out_finish:
7048     ret = finish_inode_if_needed(sctx, 1);
7049 
7050 out:
7051     btrfs_free_path(path);
7052     return ret;
7053 }
7054 
7055 static int replace_node_with_clone(struct btrfs_path *path, int level)
7056 {
7057     struct extent_buffer *clone;
7058 
7059     clone = btrfs_clone_extent_buffer(path->nodes[level]);
7060     if (!clone)
7061         return -ENOMEM;
7062 
7063     free_extent_buffer(path->nodes[level]);
7064     path->nodes[level] = clone;
7065 
7066     return 0;
7067 }
7068 
7069 static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
7070 {
7071     struct extent_buffer *eb;
7072     struct extent_buffer *parent = path->nodes[*level];
7073     int slot = path->slots[*level];
7074     const int nritems = btrfs_header_nritems(parent);
7075     u64 reada_max;
7076     u64 reada_done = 0;
7077 
7078     lockdep_assert_held_read(&parent->fs_info->commit_root_sem);
7079 
7080     BUG_ON(*level == 0);
7081     eb = btrfs_read_node_slot(parent, slot);
7082     if (IS_ERR(eb))
7083         return PTR_ERR(eb);
7084 
7085     /*
7086      * Trigger readahead for the next leaves we will process, so that it is
7087      * very likely that when we need them they are already in memory and we
7088      * will not block on disk IO. For nodes we only do readahead for one,
7089      * since the time window between processing nodes is typically larger.
7090      */
7091     reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
7092 
7093     for (slot++; slot < nritems && reada_done < reada_max; slot++) {
7094         if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
7095             btrfs_readahead_node_child(parent, slot);
7096             reada_done += eb->fs_info->nodesize;
7097         }
7098     }
7099 
7100     path->nodes[*level - 1] = eb;
7101     path->slots[*level - 1] = 0;
7102     (*level)--;
7103 
7104     if (*level == 0)
7105         return replace_node_with_clone(path, 0);
7106 
7107     return 0;
7108 }
7109 
7110 static int tree_move_next_or_upnext(struct btrfs_path *path,
7111                     int *level, int root_level)
7112 {
7113     int ret = 0;
7114     int nritems;
7115     nritems = btrfs_header_nritems(path->nodes[*level]);
7116 
7117     path->slots[*level]++;
7118 
7119     while (path->slots[*level] >= nritems) {
7120         if (*level == root_level) {
7121             path->slots[*level] = nritems - 1;
7122             return -1;
7123         }
7124 
7125         /* move upnext */
7126         path->slots[*level] = 0;
7127         free_extent_buffer(path->nodes[*level]);
7128         path->nodes[*level] = NULL;
7129         (*level)++;
7130         path->slots[*level]++;
7131 
7132         nritems = btrfs_header_nritems(path->nodes[*level]);
7133         ret = 1;
7134     }
7135     return ret;
7136 }
7137 
7138 /*
7139  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
7140  * or down.
7141  */
7142 static int tree_advance(struct btrfs_path *path,
7143             int *level, int root_level,
7144             int allow_down,
7145             struct btrfs_key *key,
7146             u64 reada_min_gen)
7147 {
7148     int ret;
7149 
7150     if (*level == 0 || !allow_down) {
7151         ret = tree_move_next_or_upnext(path, level, root_level);
7152     } else {
7153         ret = tree_move_down(path, level, reada_min_gen);
7154     }
7155 
7156     /*
7157      * Even if we have reached the end of a tree, ret is -1, update the key
7158      * anyway, so that in case we need to restart due to a block group
7159      * relocation, we can assert that the last key of the root node still
7160      * exists in the tree.
7161      */
7162     if (*level == 0)
7163         btrfs_item_key_to_cpu(path->nodes[*level], key,
7164                       path->slots[*level]);
7165     else
7166         btrfs_node_key_to_cpu(path->nodes[*level], key,
7167                       path->slots[*level]);
7168 
7169     return ret;
7170 }
7171 
7172 static int tree_compare_item(struct btrfs_path *left_path,
7173                  struct btrfs_path *right_path,
7174                  char *tmp_buf)
7175 {
7176     int cmp;
7177     int len1, len2;
7178     unsigned long off1, off2;
7179 
7180     len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]);
7181     len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]);
7182     if (len1 != len2)
7183         return 1;
7184 
7185     off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
7186     off2 = btrfs_item_ptr_offset(right_path->nodes[0],
7187                 right_path->slots[0]);
7188 
7189     read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
7190 
7191     cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
7192     if (cmp)
7193         return 1;
7194     return 0;
7195 }
7196 
7197 /*
7198  * A transaction used for relocating a block group was committed or is about to
7199  * finish its commit. Release our paths and restart the search, so that we are
7200  * not using stale extent buffers:
7201  *
7202  * 1) For levels > 0, we are only holding references of extent buffers, without
7203  *    any locks on them, which does not prevent them from having been relocated
7204  *    and reallocated after the last time we released the commit root semaphore.
7205  *    The exception are the root nodes, for which we always have a clone, see
7206  *    the comment at btrfs_compare_trees();
7207  *
7208  * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so
7209  *    we are safe from the concurrent relocation and reallocation. However they
7210  *    can have file extent items with a pre relocation disk_bytenr value, so we
7211  *    restart the start from the current commit roots and clone the new leaves so
7212  *    that we get the post relocation disk_bytenr values. Not doing so, could
7213  *    make us clone the wrong data in case there are new extents using the old
7214  *    disk_bytenr that happen to be shared.
7215  */
7216 static int restart_after_relocation(struct btrfs_path *left_path,
7217                     struct btrfs_path *right_path,
7218                     const struct btrfs_key *left_key,
7219                     const struct btrfs_key *right_key,
7220                     int left_level,
7221                     int right_level,
7222                     const struct send_ctx *sctx)
7223 {
7224     int root_level;
7225     int ret;
7226 
7227     lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem);
7228 
7229     btrfs_release_path(left_path);
7230     btrfs_release_path(right_path);
7231 
7232     /*
7233      * Since keys can not be added or removed to/from our roots because they
7234      * are readonly and we do not allow deduplication to run in parallel
7235      * (which can add, remove or change keys), the layout of the trees should
7236      * not change.
7237      */
7238     left_path->lowest_level = left_level;
7239     ret = search_key_again(sctx, sctx->send_root, left_path, left_key);
7240     if (ret < 0)
7241         return ret;
7242 
7243     right_path->lowest_level = right_level;
7244     ret = search_key_again(sctx, sctx->parent_root, right_path, right_key);
7245     if (ret < 0)
7246         return ret;
7247 
7248     /*
7249      * If the lowest level nodes are leaves, clone them so that they can be
7250      * safely used by changed_cb() while not under the protection of the
7251      * commit root semaphore, even if relocation and reallocation happens in
7252      * parallel.
7253      */
7254     if (left_level == 0) {
7255         ret = replace_node_with_clone(left_path, 0);
7256         if (ret < 0)
7257             return ret;
7258     }
7259 
7260     if (right_level == 0) {
7261         ret = replace_node_with_clone(right_path, 0);
7262         if (ret < 0)
7263             return ret;
7264     }
7265 
7266     /*
7267      * Now clone the root nodes (unless they happen to be the leaves we have
7268      * already cloned). This is to protect against concurrent snapshotting of
7269      * the send and parent roots (see the comment at btrfs_compare_trees()).
7270      */
7271     root_level = btrfs_header_level(sctx->send_root->commit_root);
7272     if (root_level > 0) {
7273         ret = replace_node_with_clone(left_path, root_level);
7274         if (ret < 0)
7275             return ret;
7276     }
7277 
7278     root_level = btrfs_header_level(sctx->parent_root->commit_root);
7279     if (root_level > 0) {
7280         ret = replace_node_with_clone(right_path, root_level);
7281         if (ret < 0)
7282             return ret;
7283     }
7284 
7285     return 0;
7286 }
7287 
7288 /*
7289  * This function compares two trees and calls the provided callback for
7290  * every changed/new/deleted item it finds.
7291  * If shared tree blocks are encountered, whole subtrees are skipped, making
7292  * the compare pretty fast on snapshotted subvolumes.
7293  *
7294  * This currently works on commit roots only. As commit roots are read only,
7295  * we don't do any locking. The commit roots are protected with transactions.
7296  * Transactions are ended and rejoined when a commit is tried in between.
7297  *
7298  * This function checks for modifications done to the trees while comparing.
7299  * If it detects a change, it aborts immediately.
7300  */
7301 static int btrfs_compare_trees(struct btrfs_root *left_root,
7302             struct btrfs_root *right_root, struct send_ctx *sctx)
7303 {
7304     struct btrfs_fs_info *fs_info = left_root->fs_info;
7305     int ret;
7306     int cmp;
7307     struct btrfs_path *left_path = NULL;
7308     struct btrfs_path *right_path = NULL;
7309     struct btrfs_key left_key;
7310     struct btrfs_key right_key;
7311     char *tmp_buf = NULL;
7312     int left_root_level;
7313     int right_root_level;
7314     int left_level;
7315     int right_level;
7316     int left_end_reached = 0;
7317     int right_end_reached = 0;
7318     int advance_left = 0;
7319     int advance_right = 0;
7320     u64 left_blockptr;
7321     u64 right_blockptr;
7322     u64 left_gen;
7323     u64 right_gen;
7324     u64 reada_min_gen;
7325 
7326     left_path = btrfs_alloc_path();
7327     if (!left_path) {
7328         ret = -ENOMEM;
7329         goto out;
7330     }
7331     right_path = btrfs_alloc_path();
7332     if (!right_path) {
7333         ret = -ENOMEM;
7334         goto out;
7335     }
7336 
7337     tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
7338     if (!tmp_buf) {
7339         ret = -ENOMEM;
7340         goto out;
7341     }
7342 
7343     left_path->search_commit_root = 1;
7344     left_path->skip_locking = 1;
7345     right_path->search_commit_root = 1;
7346     right_path->skip_locking = 1;
7347 
7348     /*
7349      * Strategy: Go to the first items of both trees. Then do
7350      *
7351      * If both trees are at level 0
7352      *   Compare keys of current items
7353      *     If left < right treat left item as new, advance left tree
7354      *       and repeat
7355      *     If left > right treat right item as deleted, advance right tree
7356      *       and repeat
7357      *     If left == right do deep compare of items, treat as changed if
7358      *       needed, advance both trees and repeat
7359      * If both trees are at the same level but not at level 0
7360      *   Compare keys of current nodes/leafs
7361      *     If left < right advance left tree and repeat
7362      *     If left > right advance right tree and repeat
7363      *     If left == right compare blockptrs of the next nodes/leafs
7364      *       If they match advance both trees but stay at the same level
7365      *         and repeat
7366      *       If they don't match advance both trees while allowing to go
7367      *         deeper and repeat
7368      * If tree levels are different
7369      *   Advance the tree that needs it and repeat
7370      *
7371      * Advancing a tree means:
7372      *   If we are at level 0, try to go to the next slot. If that's not
7373      *   possible, go one level up and repeat. Stop when we found a level
7374      *   where we could go to the next slot. We may at this point be on a
7375      *   node or a leaf.
7376      *
7377      *   If we are not at level 0 and not on shared tree blocks, go one
7378      *   level deeper.
7379      *
7380      *   If we are not at level 0 and on shared tree blocks, go one slot to
7381      *   the right if possible or go up and right.
7382      */
7383 
7384     down_read(&fs_info->commit_root_sem);
7385     left_level = btrfs_header_level(left_root->commit_root);
7386     left_root_level = left_level;
7387     /*
7388      * We clone the root node of the send and parent roots to prevent races
7389      * with snapshot creation of these roots. Snapshot creation COWs the
7390      * root node of a tree, so after the transaction is committed the old
7391      * extent can be reallocated while this send operation is still ongoing.
7392      * So we clone them, under the commit root semaphore, to be race free.
7393      */
7394     left_path->nodes[left_level] =
7395             btrfs_clone_extent_buffer(left_root->commit_root);
7396     if (!left_path->nodes[left_level]) {
7397         ret = -ENOMEM;
7398         goto out_unlock;
7399     }
7400 
7401     right_level = btrfs_header_level(right_root->commit_root);
7402     right_root_level = right_level;
7403     right_path->nodes[right_level] =
7404             btrfs_clone_extent_buffer(right_root->commit_root);
7405     if (!right_path->nodes[right_level]) {
7406         ret = -ENOMEM;
7407         goto out_unlock;
7408     }
7409     /*
7410      * Our right root is the parent root, while the left root is the "send"
7411      * root. We know that all new nodes/leaves in the left root must have
7412      * a generation greater than the right root's generation, so we trigger
7413      * readahead for those nodes and leaves of the left root, as we know we
7414      * will need to read them at some point.
7415      */
7416     reada_min_gen = btrfs_header_generation(right_root->commit_root);
7417 
7418     if (left_level == 0)
7419         btrfs_item_key_to_cpu(left_path->nodes[left_level],
7420                 &left_key, left_path->slots[left_level]);
7421     else
7422         btrfs_node_key_to_cpu(left_path->nodes[left_level],
7423                 &left_key, left_path->slots[left_level]);
7424     if (right_level == 0)
7425         btrfs_item_key_to_cpu(right_path->nodes[right_level],
7426                 &right_key, right_path->slots[right_level]);
7427     else
7428         btrfs_node_key_to_cpu(right_path->nodes[right_level],
7429                 &right_key, right_path->slots[right_level]);
7430 
7431     sctx->last_reloc_trans = fs_info->last_reloc_trans;
7432 
7433     while (1) {
7434         if (need_resched() ||
7435             rwsem_is_contended(&fs_info->commit_root_sem)) {
7436             up_read(&fs_info->commit_root_sem);
7437             cond_resched();
7438             down_read(&fs_info->commit_root_sem);
7439         }
7440 
7441         if (fs_info->last_reloc_trans > sctx->last_reloc_trans) {
7442             ret = restart_after_relocation(left_path, right_path,
7443                                &left_key, &right_key,
7444                                left_level, right_level,
7445                                sctx);
7446             if (ret < 0)
7447                 goto out_unlock;
7448             sctx->last_reloc_trans = fs_info->last_reloc_trans;
7449         }
7450 
7451         if (advance_left && !left_end_reached) {
7452             ret = tree_advance(left_path, &left_level,
7453                     left_root_level,
7454                     advance_left != ADVANCE_ONLY_NEXT,
7455                     &left_key, reada_min_gen);
7456             if (ret == -1)
7457                 left_end_reached = ADVANCE;
7458             else if (ret < 0)
7459                 goto out_unlock;
7460             advance_left = 0;
7461         }
7462         if (advance_right && !right_end_reached) {
7463             ret = tree_advance(right_path, &right_level,
7464                     right_root_level,
7465                     advance_right != ADVANCE_ONLY_NEXT,
7466                     &right_key, reada_min_gen);
7467             if (ret == -1)
7468                 right_end_reached = ADVANCE;
7469             else if (ret < 0)
7470                 goto out_unlock;
7471             advance_right = 0;
7472         }
7473 
7474         if (left_end_reached && right_end_reached) {
7475             ret = 0;
7476             goto out_unlock;
7477         } else if (left_end_reached) {
7478             if (right_level == 0) {
7479                 up_read(&fs_info->commit_root_sem);
7480                 ret = changed_cb(left_path, right_path,
7481                         &right_key,
7482                         BTRFS_COMPARE_TREE_DELETED,
7483                         sctx);
7484                 if (ret < 0)
7485                     goto out;
7486                 down_read(&fs_info->commit_root_sem);
7487             }
7488             advance_right = ADVANCE;
7489             continue;
7490         } else if (right_end_reached) {
7491             if (left_level == 0) {
7492                 up_read(&fs_info->commit_root_sem);
7493                 ret = changed_cb(left_path, right_path,
7494                         &left_key,
7495                         BTRFS_COMPARE_TREE_NEW,
7496                         sctx);
7497                 if (ret < 0)
7498                     goto out;
7499                 down_read(&fs_info->commit_root_sem);
7500             }
7501             advance_left = ADVANCE;
7502             continue;
7503         }
7504 
7505         if (left_level == 0 && right_level == 0) {
7506             up_read(&fs_info->commit_root_sem);
7507             cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7508             if (cmp < 0) {
7509                 ret = changed_cb(left_path, right_path,
7510                         &left_key,
7511                         BTRFS_COMPARE_TREE_NEW,
7512                         sctx);
7513                 advance_left = ADVANCE;
7514             } else if (cmp > 0) {
7515                 ret = changed_cb(left_path, right_path,
7516                         &right_key,
7517                         BTRFS_COMPARE_TREE_DELETED,
7518                         sctx);
7519                 advance_right = ADVANCE;
7520             } else {
7521                 enum btrfs_compare_tree_result result;
7522 
7523                 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7524                 ret = tree_compare_item(left_path, right_path,
7525                             tmp_buf);
7526                 if (ret)
7527                     result = BTRFS_COMPARE_TREE_CHANGED;
7528                 else
7529                     result = BTRFS_COMPARE_TREE_SAME;
7530                 ret = changed_cb(left_path, right_path,
7531                          &left_key, result, sctx);
7532                 advance_left = ADVANCE;
7533                 advance_right = ADVANCE;
7534             }
7535 
7536             if (ret < 0)
7537                 goto out;
7538             down_read(&fs_info->commit_root_sem);
7539         } else if (left_level == right_level) {
7540             cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7541             if (cmp < 0) {
7542                 advance_left = ADVANCE;
7543             } else if (cmp > 0) {
7544                 advance_right = ADVANCE;
7545             } else {
7546                 left_blockptr = btrfs_node_blockptr(
7547                         left_path->nodes[left_level],
7548                         left_path->slots[left_level]);
7549                 right_blockptr = btrfs_node_blockptr(
7550                         right_path->nodes[right_level],
7551                         right_path->slots[right_level]);
7552                 left_gen = btrfs_node_ptr_generation(
7553                         left_path->nodes[left_level],
7554                         left_path->slots[left_level]);
7555                 right_gen = btrfs_node_ptr_generation(
7556                         right_path->nodes[right_level],
7557                         right_path->slots[right_level]);
7558                 if (left_blockptr == right_blockptr &&
7559                     left_gen == right_gen) {
7560                     /*
7561                      * As we're on a shared block, don't
7562                      * allow to go deeper.
7563                      */
7564                     advance_left = ADVANCE_ONLY_NEXT;
7565                     advance_right = ADVANCE_ONLY_NEXT;
7566                 } else {
7567                     advance_left = ADVANCE;
7568                     advance_right = ADVANCE;
7569                 }
7570             }
7571         } else if (left_level < right_level) {
7572             advance_right = ADVANCE;
7573         } else {
7574             advance_left = ADVANCE;
7575         }
7576     }
7577 
7578 out_unlock:
7579     up_read(&fs_info->commit_root_sem);
7580 out:
7581     btrfs_free_path(left_path);
7582     btrfs_free_path(right_path);
7583     kvfree(tmp_buf);
7584     return ret;
7585 }
7586 
7587 static int send_subvol(struct send_ctx *sctx)
7588 {
7589     int ret;
7590 
7591     if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7592         ret = send_header(sctx);
7593         if (ret < 0)
7594             goto out;
7595     }
7596 
7597     ret = send_subvol_begin(sctx);
7598     if (ret < 0)
7599         goto out;
7600 
7601     if (sctx->parent_root) {
7602         ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7603         if (ret < 0)
7604             goto out;
7605         ret = finish_inode_if_needed(sctx, 1);
7606         if (ret < 0)
7607             goto out;
7608     } else {
7609         ret = full_send_tree(sctx);
7610         if (ret < 0)
7611             goto out;
7612     }
7613 
7614 out:
7615     free_recorded_refs(sctx);
7616     return ret;
7617 }
7618 
7619 /*
7620  * If orphan cleanup did remove any orphans from a root, it means the tree
7621  * was modified and therefore the commit root is not the same as the current
7622  * root anymore. This is a problem, because send uses the commit root and
7623  * therefore can see inode items that don't exist in the current root anymore,
7624  * and for example make calls to btrfs_iget, which will do tree lookups based
7625  * on the current root and not on the commit root. Those lookups will fail,
7626  * returning a -ESTALE error, and making send fail with that error. So make
7627  * sure a send does not see any orphans we have just removed, and that it will
7628  * see the same inodes regardless of whether a transaction commit happened
7629  * before it started (meaning that the commit root will be the same as the
7630  * current root) or not.
7631  */
7632 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7633 {
7634     int i;
7635     struct btrfs_trans_handle *trans = NULL;
7636 
7637 again:
7638     if (sctx->parent_root &&
7639         sctx->parent_root->node != sctx->parent_root->commit_root)
7640         goto commit_trans;
7641 
7642     for (i = 0; i < sctx->clone_roots_cnt; i++)
7643         if (sctx->clone_roots[i].root->node !=
7644             sctx->clone_roots[i].root->commit_root)
7645             goto commit_trans;
7646 
7647     if (trans)
7648         return btrfs_end_transaction(trans);
7649 
7650     return 0;
7651 
7652 commit_trans:
7653     /* Use any root, all fs roots will get their commit roots updated. */
7654     if (!trans) {
7655         trans = btrfs_join_transaction(sctx->send_root);
7656         if (IS_ERR(trans))
7657             return PTR_ERR(trans);
7658         goto again;
7659     }
7660 
7661     return btrfs_commit_transaction(trans);
7662 }
7663 
7664 /*
7665  * Make sure any existing dellaloc is flushed for any root used by a send
7666  * operation so that we do not miss any data and we do not race with writeback
7667  * finishing and changing a tree while send is using the tree. This could
7668  * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7669  * a send operation then uses the subvolume.
7670  * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7671  */
7672 static int flush_delalloc_roots(struct send_ctx *sctx)
7673 {
7674     struct btrfs_root *root = sctx->parent_root;
7675     int ret;
7676     int i;
7677 
7678     if (root) {
7679         ret = btrfs_start_delalloc_snapshot(root, false);
7680         if (ret)
7681             return ret;
7682         btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7683     }
7684 
7685     for (i = 0; i < sctx->clone_roots_cnt; i++) {
7686         root = sctx->clone_roots[i].root;
7687         ret = btrfs_start_delalloc_snapshot(root, false);
7688         if (ret)
7689             return ret;
7690         btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7691     }
7692 
7693     return 0;
7694 }
7695 
7696 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7697 {
7698     spin_lock(&root->root_item_lock);
7699     root->send_in_progress--;
7700     /*
7701      * Not much left to do, we don't know why it's unbalanced and
7702      * can't blindly reset it to 0.
7703      */
7704     if (root->send_in_progress < 0)
7705         btrfs_err(root->fs_info,
7706               "send_in_progress unbalanced %d root %llu",
7707               root->send_in_progress, root->root_key.objectid);
7708     spin_unlock(&root->root_item_lock);
7709 }
7710 
7711 static void dedupe_in_progress_warn(const struct btrfs_root *root)
7712 {
7713     btrfs_warn_rl(root->fs_info,
7714 "cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7715               root->root_key.objectid, root->dedupe_in_progress);
7716 }
7717 
7718 long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
7719 {
7720     int ret = 0;
7721     struct btrfs_root *send_root = BTRFS_I(inode)->root;
7722     struct btrfs_fs_info *fs_info = send_root->fs_info;
7723     struct btrfs_root *clone_root;
7724     struct send_ctx *sctx = NULL;
7725     u32 i;
7726     u64 *clone_sources_tmp = NULL;
7727     int clone_sources_to_rollback = 0;
7728     size_t alloc_size;
7729     int sort_clone_roots = 0;
7730 
7731     if (!capable(CAP_SYS_ADMIN))
7732         return -EPERM;
7733 
7734     /*
7735      * The subvolume must remain read-only during send, protect against
7736      * making it RW. This also protects against deletion.
7737      */
7738     spin_lock(&send_root->root_item_lock);
7739     if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7740         dedupe_in_progress_warn(send_root);
7741         spin_unlock(&send_root->root_item_lock);
7742         return -EAGAIN;
7743     }
7744     send_root->send_in_progress++;
7745     spin_unlock(&send_root->root_item_lock);
7746 
7747     /*
7748      * Userspace tools do the checks and warn the user if it's
7749      * not RO.
7750      */
7751     if (!btrfs_root_readonly(send_root)) {
7752         ret = -EPERM;
7753         goto out;
7754     }
7755 
7756     /*
7757      * Check that we don't overflow at later allocations, we request
7758      * clone_sources_count + 1 items, and compare to unsigned long inside
7759      * access_ok.
7760      */
7761     if (arg->clone_sources_count >
7762         ULONG_MAX / sizeof(struct clone_root) - 1) {
7763         ret = -EINVAL;
7764         goto out;
7765     }
7766 
7767     if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7768         ret = -EINVAL;
7769         goto out;
7770     }
7771 
7772     sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7773     if (!sctx) {
7774         ret = -ENOMEM;
7775         goto out;
7776     }
7777 
7778     INIT_LIST_HEAD(&sctx->new_refs);
7779     INIT_LIST_HEAD(&sctx->deleted_refs);
7780     INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7781     INIT_LIST_HEAD(&sctx->name_cache_list);
7782 
7783     sctx->flags = arg->flags;
7784 
7785     if (arg->flags & BTRFS_SEND_FLAG_VERSION) {
7786         if (arg->version > BTRFS_SEND_STREAM_VERSION) {
7787             ret = -EPROTO;
7788             goto out;
7789         }
7790         /* Zero means "use the highest version" */
7791         sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION;
7792     } else {
7793         sctx->proto = 1;
7794     }
7795     if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) {
7796         ret = -EINVAL;
7797         goto out;
7798     }
7799 
7800     sctx->send_filp = fget(arg->send_fd);
7801     if (!sctx->send_filp) {
7802         ret = -EBADF;
7803         goto out;
7804     }
7805 
7806     sctx->send_root = send_root;
7807     /*
7808      * Unlikely but possible, if the subvolume is marked for deletion but
7809      * is slow to remove the directory entry, send can still be started
7810      */
7811     if (btrfs_root_dead(sctx->send_root)) {
7812         ret = -EPERM;
7813         goto out;
7814     }
7815 
7816     sctx->clone_roots_cnt = arg->clone_sources_count;
7817 
7818     if (sctx->proto >= 2) {
7819         u32 send_buf_num_pages;
7820 
7821         sctx->send_max_size = ALIGN(SZ_16K + BTRFS_MAX_COMPRESSED, PAGE_SIZE);
7822         sctx->send_buf = vmalloc(sctx->send_max_size);
7823         if (!sctx->send_buf) {
7824             ret = -ENOMEM;
7825             goto out;
7826         }
7827         send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT;
7828         sctx->send_buf_pages = kcalloc(send_buf_num_pages,
7829                            sizeof(*sctx->send_buf_pages),
7830                            GFP_KERNEL);
7831         if (!sctx->send_buf_pages) {
7832             ret = -ENOMEM;
7833             goto out;
7834         }
7835         for (i = 0; i < send_buf_num_pages; i++) {
7836             sctx->send_buf_pages[i] =
7837                 vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT));
7838         }
7839     } else {
7840         sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1;
7841         sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7842     }
7843     if (!sctx->send_buf) {
7844         ret = -ENOMEM;
7845         goto out;
7846     }
7847 
7848     sctx->pending_dir_moves = RB_ROOT;
7849     sctx->waiting_dir_moves = RB_ROOT;
7850     sctx->orphan_dirs = RB_ROOT;
7851     sctx->rbtree_new_refs = RB_ROOT;
7852     sctx->rbtree_deleted_refs = RB_ROOT;
7853 
7854     sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
7855                      arg->clone_sources_count + 1,
7856                      GFP_KERNEL);
7857     if (!sctx->clone_roots) {
7858         ret = -ENOMEM;
7859         goto out;
7860     }
7861 
7862     alloc_size = array_size(sizeof(*arg->clone_sources),
7863                 arg->clone_sources_count);
7864 
7865     if (arg->clone_sources_count) {
7866         clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7867         if (!clone_sources_tmp) {
7868             ret = -ENOMEM;
7869             goto out;
7870         }
7871 
7872         ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7873                 alloc_size);
7874         if (ret) {
7875             ret = -EFAULT;
7876             goto out;
7877         }
7878 
7879         for (i = 0; i < arg->clone_sources_count; i++) {
7880             clone_root = btrfs_get_fs_root(fs_info,
7881                         clone_sources_tmp[i], true);
7882             if (IS_ERR(clone_root)) {
7883                 ret = PTR_ERR(clone_root);
7884                 goto out;
7885             }
7886             spin_lock(&clone_root->root_item_lock);
7887             if (!btrfs_root_readonly(clone_root) ||
7888                 btrfs_root_dead(clone_root)) {
7889                 spin_unlock(&clone_root->root_item_lock);
7890                 btrfs_put_root(clone_root);
7891                 ret = -EPERM;
7892                 goto out;
7893             }
7894             if (clone_root->dedupe_in_progress) {
7895                 dedupe_in_progress_warn(clone_root);
7896                 spin_unlock(&clone_root->root_item_lock);
7897                 btrfs_put_root(clone_root);
7898                 ret = -EAGAIN;
7899                 goto out;
7900             }
7901             clone_root->send_in_progress++;
7902             spin_unlock(&clone_root->root_item_lock);
7903 
7904             sctx->clone_roots[i].root = clone_root;
7905             clone_sources_to_rollback = i + 1;
7906         }
7907         kvfree(clone_sources_tmp);
7908         clone_sources_tmp = NULL;
7909     }
7910 
7911     if (arg->parent_root) {
7912         sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7913                               true);
7914         if (IS_ERR(sctx->parent_root)) {
7915             ret = PTR_ERR(sctx->parent_root);
7916             goto out;
7917         }
7918 
7919         spin_lock(&sctx->parent_root->root_item_lock);
7920         sctx->parent_root->send_in_progress++;
7921         if (!btrfs_root_readonly(sctx->parent_root) ||
7922                 btrfs_root_dead(sctx->parent_root)) {
7923             spin_unlock(&sctx->parent_root->root_item_lock);
7924             ret = -EPERM;
7925             goto out;
7926         }
7927         if (sctx->parent_root->dedupe_in_progress) {
7928             dedupe_in_progress_warn(sctx->parent_root);
7929             spin_unlock(&sctx->parent_root->root_item_lock);
7930             ret = -EAGAIN;
7931             goto out;
7932         }
7933         spin_unlock(&sctx->parent_root->root_item_lock);
7934     }
7935 
7936     /*
7937      * Clones from send_root are allowed, but only if the clone source
7938      * is behind the current send position. This is checked while searching
7939      * for possible clone sources.
7940      */
7941     sctx->clone_roots[sctx->clone_roots_cnt++].root =
7942         btrfs_grab_root(sctx->send_root);
7943 
7944     /* We do a bsearch later */
7945     sort(sctx->clone_roots, sctx->clone_roots_cnt,
7946             sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7947             NULL);
7948     sort_clone_roots = 1;
7949 
7950     ret = flush_delalloc_roots(sctx);
7951     if (ret)
7952         goto out;
7953 
7954     ret = ensure_commit_roots_uptodate(sctx);
7955     if (ret)
7956         goto out;
7957 
7958     ret = send_subvol(sctx);
7959     if (ret < 0)
7960         goto out;
7961 
7962     if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7963         ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7964         if (ret < 0)
7965             goto out;
7966         ret = send_cmd(sctx);
7967         if (ret < 0)
7968             goto out;
7969     }
7970 
7971 out:
7972     WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7973     while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7974         struct rb_node *n;
7975         struct pending_dir_move *pm;
7976 
7977         n = rb_first(&sctx->pending_dir_moves);
7978         pm = rb_entry(n, struct pending_dir_move, node);
7979         while (!list_empty(&pm->list)) {
7980             struct pending_dir_move *pm2;
7981 
7982             pm2 = list_first_entry(&pm->list,
7983                            struct pending_dir_move, list);
7984             free_pending_move(sctx, pm2);
7985         }
7986         free_pending_move(sctx, pm);
7987     }
7988 
7989     WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7990     while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7991         struct rb_node *n;
7992         struct waiting_dir_move *dm;
7993 
7994         n = rb_first(&sctx->waiting_dir_moves);
7995         dm = rb_entry(n, struct waiting_dir_move, node);
7996         rb_erase(&dm->node, &sctx->waiting_dir_moves);
7997         kfree(dm);
7998     }
7999 
8000     WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
8001     while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
8002         struct rb_node *n;
8003         struct orphan_dir_info *odi;
8004 
8005         n = rb_first(&sctx->orphan_dirs);
8006         odi = rb_entry(n, struct orphan_dir_info, node);
8007         free_orphan_dir_info(sctx, odi);
8008     }
8009 
8010     if (sort_clone_roots) {
8011         for (i = 0; i < sctx->clone_roots_cnt; i++) {
8012             btrfs_root_dec_send_in_progress(
8013                     sctx->clone_roots[i].root);
8014             btrfs_put_root(sctx->clone_roots[i].root);
8015         }
8016     } else {
8017         for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
8018             btrfs_root_dec_send_in_progress(
8019                     sctx->clone_roots[i].root);
8020             btrfs_put_root(sctx->clone_roots[i].root);
8021         }
8022 
8023         btrfs_root_dec_send_in_progress(send_root);
8024     }
8025     if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
8026         btrfs_root_dec_send_in_progress(sctx->parent_root);
8027         btrfs_put_root(sctx->parent_root);
8028     }
8029 
8030     kvfree(clone_sources_tmp);
8031 
8032     if (sctx) {
8033         if (sctx->send_filp)
8034             fput(sctx->send_filp);
8035 
8036         kvfree(sctx->clone_roots);
8037         kfree(sctx->send_buf_pages);
8038         kvfree(sctx->send_buf);
8039 
8040         name_cache_free(sctx);
8041 
8042         close_current_inode(sctx);
8043 
8044         kfree(sctx);
8045     }
8046 
8047     return ret;
8048 }