Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2007 Oracle.  All rights reserved.
0004  */
0005 
0006 #include <linux/bio.h>
0007 #include <linux/slab.h>
0008 #include <linux/pagemap.h>
0009 #include <linux/highmem.h>
0010 #include <linux/sched/mm.h>
0011 #include <crypto/hash.h>
0012 #include "misc.h"
0013 #include "ctree.h"
0014 #include "disk-io.h"
0015 #include "transaction.h"
0016 #include "volumes.h"
0017 #include "print-tree.h"
0018 #include "compression.h"
0019 
0020 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
0021                    sizeof(struct btrfs_item) * 2) / \
0022                   size) - 1))
0023 
0024 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
0025                        PAGE_SIZE))
0026 
0027 /**
0028  * Set inode's size according to filesystem options
0029  *
0030  * @inode:      inode we want to update the disk_i_size for
0031  * @new_i_size: i_size we want to set to, 0 if we use i_size
0032  *
0033  * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read()
0034  * returns as it is perfectly fine with a file that has holes without hole file
0035  * extent items.
0036  *
0037  * However without NO_HOLES we need to only return the area that is contiguous
0038  * from the 0 offset of the file.  Otherwise we could end up adjust i_size up
0039  * to an extent that has a gap in between.
0040  *
0041  * Finally new_i_size should only be set in the case of truncate where we're not
0042  * ready to use i_size_read() as the limiter yet.
0043  */
0044 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
0045 {
0046     struct btrfs_fs_info *fs_info = inode->root->fs_info;
0047     u64 start, end, i_size;
0048     int ret;
0049 
0050     i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
0051     if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
0052         inode->disk_i_size = i_size;
0053         return;
0054     }
0055 
0056     spin_lock(&inode->lock);
0057     ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
0058                      &end, EXTENT_DIRTY);
0059     if (!ret && start == 0)
0060         i_size = min(i_size, end + 1);
0061     else
0062         i_size = 0;
0063     inode->disk_i_size = i_size;
0064     spin_unlock(&inode->lock);
0065 }
0066 
0067 /**
0068  * Mark range within a file as having a new extent inserted
0069  *
0070  * @inode: inode being modified
0071  * @start: start file offset of the file extent we've inserted
0072  * @len:   logical length of the file extent item
0073  *
0074  * Call when we are inserting a new file extent where there was none before.
0075  * Does not need to call this in the case where we're replacing an existing file
0076  * extent, however if not sure it's fine to call this multiple times.
0077  *
0078  * The start and len must match the file extent item, so thus must be sectorsize
0079  * aligned.
0080  */
0081 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
0082                       u64 len)
0083 {
0084     if (len == 0)
0085         return 0;
0086 
0087     ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
0088 
0089     if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
0090         return 0;
0091     return set_extent_bits(&inode->file_extent_tree, start, start + len - 1,
0092                    EXTENT_DIRTY);
0093 }
0094 
0095 /**
0096  * Marks an inode range as not having a backing extent
0097  *
0098  * @inode: inode being modified
0099  * @start: start file offset of the file extent we've inserted
0100  * @len:   logical length of the file extent item
0101  *
0102  * Called when we drop a file extent, for example when we truncate.  Doesn't
0103  * need to be called for cases where we're replacing a file extent, like when
0104  * we've COWed a file extent.
0105  *
0106  * The start and len must match the file extent item, so thus must be sectorsize
0107  * aligned.
0108  */
0109 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
0110                     u64 len)
0111 {
0112     if (len == 0)
0113         return 0;
0114 
0115     ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
0116            len == (u64)-1);
0117 
0118     if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
0119         return 0;
0120     return clear_extent_bit(&inode->file_extent_tree, start,
0121                 start + len - 1, EXTENT_DIRTY, 0, 0, NULL);
0122 }
0123 
0124 static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
0125                     u16 csum_size)
0126 {
0127     u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
0128 
0129     return ncsums * fs_info->sectorsize;
0130 }
0131 
0132 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
0133                  struct btrfs_root *root,
0134                  u64 objectid, u64 pos,
0135                  u64 disk_offset, u64 disk_num_bytes,
0136                  u64 num_bytes, u64 offset, u64 ram_bytes,
0137                  u8 compression, u8 encryption, u16 other_encoding)
0138 {
0139     int ret = 0;
0140     struct btrfs_file_extent_item *item;
0141     struct btrfs_key file_key;
0142     struct btrfs_path *path;
0143     struct extent_buffer *leaf;
0144 
0145     path = btrfs_alloc_path();
0146     if (!path)
0147         return -ENOMEM;
0148     file_key.objectid = objectid;
0149     file_key.offset = pos;
0150     file_key.type = BTRFS_EXTENT_DATA_KEY;
0151 
0152     ret = btrfs_insert_empty_item(trans, root, path, &file_key,
0153                       sizeof(*item));
0154     if (ret < 0)
0155         goto out;
0156     BUG_ON(ret); /* Can't happen */
0157     leaf = path->nodes[0];
0158     item = btrfs_item_ptr(leaf, path->slots[0],
0159                   struct btrfs_file_extent_item);
0160     btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
0161     btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
0162     btrfs_set_file_extent_offset(leaf, item, offset);
0163     btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
0164     btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
0165     btrfs_set_file_extent_generation(leaf, item, trans->transid);
0166     btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
0167     btrfs_set_file_extent_compression(leaf, item, compression);
0168     btrfs_set_file_extent_encryption(leaf, item, encryption);
0169     btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
0170 
0171     btrfs_mark_buffer_dirty(leaf);
0172 out:
0173     btrfs_free_path(path);
0174     return ret;
0175 }
0176 
0177 static struct btrfs_csum_item *
0178 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
0179           struct btrfs_root *root,
0180           struct btrfs_path *path,
0181           u64 bytenr, int cow)
0182 {
0183     struct btrfs_fs_info *fs_info = root->fs_info;
0184     int ret;
0185     struct btrfs_key file_key;
0186     struct btrfs_key found_key;
0187     struct btrfs_csum_item *item;
0188     struct extent_buffer *leaf;
0189     u64 csum_offset = 0;
0190     const u32 csum_size = fs_info->csum_size;
0191     int csums_in_item;
0192 
0193     file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
0194     file_key.offset = bytenr;
0195     file_key.type = BTRFS_EXTENT_CSUM_KEY;
0196     ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
0197     if (ret < 0)
0198         goto fail;
0199     leaf = path->nodes[0];
0200     if (ret > 0) {
0201         ret = 1;
0202         if (path->slots[0] == 0)
0203             goto fail;
0204         path->slots[0]--;
0205         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
0206         if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
0207             goto fail;
0208 
0209         csum_offset = (bytenr - found_key.offset) >>
0210                 fs_info->sectorsize_bits;
0211         csums_in_item = btrfs_item_size(leaf, path->slots[0]);
0212         csums_in_item /= csum_size;
0213 
0214         if (csum_offset == csums_in_item) {
0215             ret = -EFBIG;
0216             goto fail;
0217         } else if (csum_offset > csums_in_item) {
0218             goto fail;
0219         }
0220     }
0221     item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
0222     item = (struct btrfs_csum_item *)((unsigned char *)item +
0223                       csum_offset * csum_size);
0224     return item;
0225 fail:
0226     if (ret > 0)
0227         ret = -ENOENT;
0228     return ERR_PTR(ret);
0229 }
0230 
0231 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
0232                  struct btrfs_root *root,
0233                  struct btrfs_path *path, u64 objectid,
0234                  u64 offset, int mod)
0235 {
0236     struct btrfs_key file_key;
0237     int ins_len = mod < 0 ? -1 : 0;
0238     int cow = mod != 0;
0239 
0240     file_key.objectid = objectid;
0241     file_key.offset = offset;
0242     file_key.type = BTRFS_EXTENT_DATA_KEY;
0243 
0244     return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
0245 }
0246 
0247 /*
0248  * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and
0249  * estore the result to @dst.
0250  *
0251  * Return >0 for the number of sectors we found.
0252  * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum
0253  * for it. Caller may want to try next sector until one range is hit.
0254  * Return <0 for fatal error.
0255  */
0256 static int search_csum_tree(struct btrfs_fs_info *fs_info,
0257                 struct btrfs_path *path, u64 disk_bytenr,
0258                 u64 len, u8 *dst)
0259 {
0260     struct btrfs_root *csum_root;
0261     struct btrfs_csum_item *item = NULL;
0262     struct btrfs_key key;
0263     const u32 sectorsize = fs_info->sectorsize;
0264     const u32 csum_size = fs_info->csum_size;
0265     u32 itemsize;
0266     int ret;
0267     u64 csum_start;
0268     u64 csum_len;
0269 
0270     ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) &&
0271            IS_ALIGNED(len, sectorsize));
0272 
0273     /* Check if the current csum item covers disk_bytenr */
0274     if (path->nodes[0]) {
0275         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
0276                       struct btrfs_csum_item);
0277         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
0278         itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
0279 
0280         csum_start = key.offset;
0281         csum_len = (itemsize / csum_size) * sectorsize;
0282 
0283         if (in_range(disk_bytenr, csum_start, csum_len))
0284             goto found;
0285     }
0286 
0287     /* Current item doesn't contain the desired range, search again */
0288     btrfs_release_path(path);
0289     csum_root = btrfs_csum_root(fs_info, disk_bytenr);
0290     item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0);
0291     if (IS_ERR(item)) {
0292         ret = PTR_ERR(item);
0293         goto out;
0294     }
0295     btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
0296     itemsize = btrfs_item_size(path->nodes[0], path->slots[0]);
0297 
0298     csum_start = key.offset;
0299     csum_len = (itemsize / csum_size) * sectorsize;
0300     ASSERT(in_range(disk_bytenr, csum_start, csum_len));
0301 
0302 found:
0303     ret = (min(csum_start + csum_len, disk_bytenr + len) -
0304            disk_bytenr) >> fs_info->sectorsize_bits;
0305     read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
0306             ret * csum_size);
0307 out:
0308     if (ret == -ENOENT || ret == -EFBIG)
0309         ret = 0;
0310     return ret;
0311 }
0312 
0313 /*
0314  * Locate the file_offset of @cur_disk_bytenr of a @bio.
0315  *
0316  * Bio of btrfs represents read range of
0317  * [bi_sector << 9, bi_sector << 9 + bi_size).
0318  * Knowing this, we can iterate through each bvec to locate the page belong to
0319  * @cur_disk_bytenr and get the file offset.
0320  *
0321  * @inode is used to determine if the bvec page really belongs to @inode.
0322  *
0323  * Return 0 if we can't find the file offset
0324  * Return >0 if we find the file offset and restore it to @file_offset_ret
0325  */
0326 static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
0327                      u64 disk_bytenr, u64 *file_offset_ret)
0328 {
0329     struct bvec_iter iter;
0330     struct bio_vec bvec;
0331     u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
0332     int ret = 0;
0333 
0334     bio_for_each_segment(bvec, bio, iter) {
0335         struct page *page = bvec.bv_page;
0336 
0337         if (cur > disk_bytenr)
0338             break;
0339         if (cur + bvec.bv_len <= disk_bytenr) {
0340             cur += bvec.bv_len;
0341             continue;
0342         }
0343         ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
0344         if (page->mapping && page->mapping->host &&
0345             page->mapping->host == inode) {
0346             ret = 1;
0347             *file_offset_ret = page_offset(page) + bvec.bv_offset +
0348                        disk_bytenr - cur;
0349             break;
0350         }
0351     }
0352     return ret;
0353 }
0354 
0355 /**
0356  * Lookup the checksum for the read bio in csum tree.
0357  *
0358  * @inode: inode that the bio is for.
0359  * @bio: bio to look up.
0360  * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
0361  *       checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
0362  *       NULL, the checksum buffer is allocated and returned in
0363  *       btrfs_bio(bio)->csum instead.
0364  *
0365  * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
0366  */
0367 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
0368 {
0369     struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
0370     struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
0371     struct btrfs_bio *bbio = NULL;
0372     struct btrfs_path *path;
0373     const u32 sectorsize = fs_info->sectorsize;
0374     const u32 csum_size = fs_info->csum_size;
0375     u32 orig_len = bio->bi_iter.bi_size;
0376     u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
0377     u64 cur_disk_bytenr;
0378     u8 *csum;
0379     const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
0380     int count = 0;
0381     blk_status_t ret = BLK_STS_OK;
0382 
0383     if ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) ||
0384         test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
0385         return BLK_STS_OK;
0386 
0387     /*
0388      * This function is only called for read bio.
0389      *
0390      * This means two things:
0391      * - All our csums should only be in csum tree
0392      *   No ordered extents csums, as ordered extents are only for write
0393      *   path.
0394      * - No need to bother any other info from bvec
0395      *   Since we're looking up csums, the only important info is the
0396      *   disk_bytenr and the length, which can be extracted from bi_iter
0397      *   directly.
0398      */
0399     ASSERT(bio_op(bio) == REQ_OP_READ);
0400     path = btrfs_alloc_path();
0401     if (!path)
0402         return BLK_STS_RESOURCE;
0403 
0404     if (!dst) {
0405         bbio = btrfs_bio(bio);
0406 
0407         if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
0408             bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS);
0409             if (!bbio->csum) {
0410                 btrfs_free_path(path);
0411                 return BLK_STS_RESOURCE;
0412             }
0413         } else {
0414             bbio->csum = bbio->csum_inline;
0415         }
0416         csum = bbio->csum;
0417     } else {
0418         csum = dst;
0419     }
0420 
0421     /*
0422      * If requested number of sectors is larger than one leaf can contain,
0423      * kick the readahead for csum tree.
0424      */
0425     if (nblocks > fs_info->csums_per_leaf)
0426         path->reada = READA_FORWARD;
0427 
0428     /*
0429      * the free space stuff is only read when it hasn't been
0430      * updated in the current transaction.  So, we can safely
0431      * read from the commit root and sidestep a nasty deadlock
0432      * between reading the free space cache and updating the csum tree.
0433      */
0434     if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
0435         path->search_commit_root = 1;
0436         path->skip_locking = 1;
0437     }
0438 
0439     for (cur_disk_bytenr = orig_disk_bytenr;
0440          cur_disk_bytenr < orig_disk_bytenr + orig_len;
0441          cur_disk_bytenr += (count * sectorsize)) {
0442         u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
0443         unsigned int sector_offset;
0444         u8 *csum_dst;
0445 
0446         /*
0447          * Although both cur_disk_bytenr and orig_disk_bytenr is u64,
0448          * we're calculating the offset to the bio start.
0449          *
0450          * Bio size is limited to UINT_MAX, thus unsigned int is large
0451          * enough to contain the raw result, not to mention the right
0452          * shifted result.
0453          */
0454         ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
0455         sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
0456                 fs_info->sectorsize_bits;
0457         csum_dst = csum + sector_offset * csum_size;
0458 
0459         count = search_csum_tree(fs_info, path, cur_disk_bytenr,
0460                      search_len, csum_dst);
0461         if (count < 0) {
0462             ret = errno_to_blk_status(count);
0463             if (bbio)
0464                 btrfs_bio_free_csum(bbio);
0465             break;
0466         }
0467 
0468         /*
0469          * We didn't find a csum for this range.  We need to make sure
0470          * we complain loudly about this, because we are not NODATASUM.
0471          *
0472          * However for the DATA_RELOC inode we could potentially be
0473          * relocating data extents for a NODATASUM inode, so the inode
0474          * itself won't be marked with NODATASUM, but the extent we're
0475          * copying is in fact NODATASUM.  If we don't find a csum we
0476          * assume this is the case.
0477          */
0478         if (count == 0) {
0479             memset(csum_dst, 0, csum_size);
0480             count = 1;
0481 
0482             if (BTRFS_I(inode)->root->root_key.objectid ==
0483                 BTRFS_DATA_RELOC_TREE_OBJECTID) {
0484                 u64 file_offset;
0485                 int ret;
0486 
0487                 ret = search_file_offset_in_bio(bio, inode,
0488                         cur_disk_bytenr, &file_offset);
0489                 if (ret)
0490                     set_extent_bits(io_tree, file_offset,
0491                         file_offset + sectorsize - 1,
0492                         EXTENT_NODATASUM);
0493             } else {
0494                 btrfs_warn_rl(fs_info,
0495             "csum hole found for disk bytenr range [%llu, %llu)",
0496                 cur_disk_bytenr, cur_disk_bytenr + sectorsize);
0497             }
0498         }
0499     }
0500 
0501     btrfs_free_path(path);
0502     return ret;
0503 }
0504 
0505 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
0506                  struct list_head *list, int search_commit)
0507 {
0508     struct btrfs_fs_info *fs_info = root->fs_info;
0509     struct btrfs_key key;
0510     struct btrfs_path *path;
0511     struct extent_buffer *leaf;
0512     struct btrfs_ordered_sum *sums;
0513     struct btrfs_csum_item *item;
0514     LIST_HEAD(tmplist);
0515     unsigned long offset;
0516     int ret;
0517     size_t size;
0518     u64 csum_end;
0519     const u32 csum_size = fs_info->csum_size;
0520 
0521     ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
0522            IS_ALIGNED(end + 1, fs_info->sectorsize));
0523 
0524     path = btrfs_alloc_path();
0525     if (!path)
0526         return -ENOMEM;
0527 
0528     if (search_commit) {
0529         path->skip_locking = 1;
0530         path->reada = READA_FORWARD;
0531         path->search_commit_root = 1;
0532     }
0533 
0534     key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
0535     key.offset = start;
0536     key.type = BTRFS_EXTENT_CSUM_KEY;
0537 
0538     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
0539     if (ret < 0)
0540         goto fail;
0541     if (ret > 0 && path->slots[0] > 0) {
0542         leaf = path->nodes[0];
0543         btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
0544         if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
0545             key.type == BTRFS_EXTENT_CSUM_KEY) {
0546             offset = (start - key.offset) >> fs_info->sectorsize_bits;
0547             if (offset * csum_size <
0548                 btrfs_item_size(leaf, path->slots[0] - 1))
0549                 path->slots[0]--;
0550         }
0551     }
0552 
0553     while (start <= end) {
0554         leaf = path->nodes[0];
0555         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
0556             ret = btrfs_next_leaf(root, path);
0557             if (ret < 0)
0558                 goto fail;
0559             if (ret > 0)
0560                 break;
0561             leaf = path->nodes[0];
0562         }
0563 
0564         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
0565         if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
0566             key.type != BTRFS_EXTENT_CSUM_KEY ||
0567             key.offset > end)
0568             break;
0569 
0570         if (key.offset > start)
0571             start = key.offset;
0572 
0573         size = btrfs_item_size(leaf, path->slots[0]);
0574         csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
0575         if (csum_end <= start) {
0576             path->slots[0]++;
0577             continue;
0578         }
0579 
0580         csum_end = min(csum_end, end + 1);
0581         item = btrfs_item_ptr(path->nodes[0], path->slots[0],
0582                       struct btrfs_csum_item);
0583         while (start < csum_end) {
0584             size = min_t(size_t, csum_end - start,
0585                      max_ordered_sum_bytes(fs_info, csum_size));
0586             sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
0587                        GFP_NOFS);
0588             if (!sums) {
0589                 ret = -ENOMEM;
0590                 goto fail;
0591             }
0592 
0593             sums->bytenr = start;
0594             sums->len = (int)size;
0595 
0596             offset = (start - key.offset) >> fs_info->sectorsize_bits;
0597             offset *= csum_size;
0598             size >>= fs_info->sectorsize_bits;
0599 
0600             read_extent_buffer(path->nodes[0],
0601                        sums->sums,
0602                        ((unsigned long)item) + offset,
0603                        csum_size * size);
0604 
0605             start += fs_info->sectorsize * size;
0606             list_add_tail(&sums->list, &tmplist);
0607         }
0608         path->slots[0]++;
0609     }
0610     ret = 0;
0611 fail:
0612     while (ret < 0 && !list_empty(&tmplist)) {
0613         sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
0614         list_del(&sums->list);
0615         kfree(sums);
0616     }
0617     list_splice_tail(&tmplist, list);
0618 
0619     btrfs_free_path(path);
0620     return ret;
0621 }
0622 
0623 /**
0624  * Calculate checksums of the data contained inside a bio
0625  *
0626  * @inode:   Owner of the data inside the bio
0627  * @bio:     Contains the data to be checksummed
0628  * @offset:      If (u64)-1, @bio may contain discontiguous bio vecs, so the
0629  *               file offsets are determined from the page offsets in the bio.
0630  *               Otherwise, this is the starting file offset of the bio vecs in
0631  *               @bio, which must be contiguous.
0632  * @one_ordered: If true, @bio only refers to one ordered extent.
0633  */
0634 blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
0635                 u64 offset, bool one_ordered)
0636 {
0637     struct btrfs_fs_info *fs_info = inode->root->fs_info;
0638     SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
0639     struct btrfs_ordered_sum *sums;
0640     struct btrfs_ordered_extent *ordered = NULL;
0641     const bool use_page_offsets = (offset == (u64)-1);
0642     char *data;
0643     struct bvec_iter iter;
0644     struct bio_vec bvec;
0645     int index;
0646     unsigned int blockcount;
0647     unsigned long total_bytes = 0;
0648     unsigned long this_sum_bytes = 0;
0649     int i;
0650     unsigned nofs_flag;
0651 
0652     nofs_flag = memalloc_nofs_save();
0653     sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
0654                GFP_KERNEL);
0655     memalloc_nofs_restore(nofs_flag);
0656 
0657     if (!sums)
0658         return BLK_STS_RESOURCE;
0659 
0660     sums->len = bio->bi_iter.bi_size;
0661     INIT_LIST_HEAD(&sums->list);
0662 
0663     sums->bytenr = bio->bi_iter.bi_sector << 9;
0664     index = 0;
0665 
0666     shash->tfm = fs_info->csum_shash;
0667 
0668     bio_for_each_segment(bvec, bio, iter) {
0669         if (use_page_offsets)
0670             offset = page_offset(bvec.bv_page) + bvec.bv_offset;
0671 
0672         if (!ordered) {
0673             ordered = btrfs_lookup_ordered_extent(inode, offset);
0674             /*
0675              * The bio range is not covered by any ordered extent,
0676              * must be a code logic error.
0677              */
0678             if (unlikely(!ordered)) {
0679                 WARN(1, KERN_WARNING
0680             "no ordered extent for root %llu ino %llu offset %llu\n",
0681                      inode->root->root_key.objectid,
0682                      btrfs_ino(inode), offset);
0683                 kvfree(sums);
0684                 return BLK_STS_IOERR;
0685             }
0686         }
0687 
0688         blockcount = BTRFS_BYTES_TO_BLKS(fs_info,
0689                          bvec.bv_len + fs_info->sectorsize
0690                          - 1);
0691 
0692         for (i = 0; i < blockcount; i++) {
0693             if (!one_ordered &&
0694                 !in_range(offset, ordered->file_offset,
0695                       ordered->num_bytes)) {
0696                 unsigned long bytes_left;
0697 
0698                 sums->len = this_sum_bytes;
0699                 this_sum_bytes = 0;
0700                 btrfs_add_ordered_sum(ordered, sums);
0701                 btrfs_put_ordered_extent(ordered);
0702 
0703                 bytes_left = bio->bi_iter.bi_size - total_bytes;
0704 
0705                 nofs_flag = memalloc_nofs_save();
0706                 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
0707                               bytes_left), GFP_KERNEL);
0708                 memalloc_nofs_restore(nofs_flag);
0709                 BUG_ON(!sums); /* -ENOMEM */
0710                 sums->len = bytes_left;
0711                 ordered = btrfs_lookup_ordered_extent(inode,
0712                                 offset);
0713                 ASSERT(ordered); /* Logic error */
0714                 sums->bytenr = (bio->bi_iter.bi_sector << 9)
0715                     + total_bytes;
0716                 index = 0;
0717             }
0718 
0719             data = bvec_kmap_local(&bvec);
0720             crypto_shash_digest(shash,
0721                         data + (i * fs_info->sectorsize),
0722                         fs_info->sectorsize,
0723                         sums->sums + index);
0724             kunmap_local(data);
0725             index += fs_info->csum_size;
0726             offset += fs_info->sectorsize;
0727             this_sum_bytes += fs_info->sectorsize;
0728             total_bytes += fs_info->sectorsize;
0729         }
0730 
0731     }
0732     this_sum_bytes = 0;
0733     btrfs_add_ordered_sum(ordered, sums);
0734     btrfs_put_ordered_extent(ordered);
0735     return 0;
0736 }
0737 
0738 /*
0739  * helper function for csum removal, this expects the
0740  * key to describe the csum pointed to by the path, and it expects
0741  * the csum to overlap the range [bytenr, len]
0742  *
0743  * The csum should not be entirely contained in the range and the
0744  * range should not be entirely contained in the csum.
0745  *
0746  * This calls btrfs_truncate_item with the correct args based on the
0747  * overlap, and fixes up the key as required.
0748  */
0749 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
0750                        struct btrfs_path *path,
0751                        struct btrfs_key *key,
0752                        u64 bytenr, u64 len)
0753 {
0754     struct extent_buffer *leaf;
0755     const u32 csum_size = fs_info->csum_size;
0756     u64 csum_end;
0757     u64 end_byte = bytenr + len;
0758     u32 blocksize_bits = fs_info->sectorsize_bits;
0759 
0760     leaf = path->nodes[0];
0761     csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
0762     csum_end <<= blocksize_bits;
0763     csum_end += key->offset;
0764 
0765     if (key->offset < bytenr && csum_end <= end_byte) {
0766         /*
0767          *         [ bytenr - len ]
0768          *         [   ]
0769          *   [csum     ]
0770          *   A simple truncate off the end of the item
0771          */
0772         u32 new_size = (bytenr - key->offset) >> blocksize_bits;
0773         new_size *= csum_size;
0774         btrfs_truncate_item(path, new_size, 1);
0775     } else if (key->offset >= bytenr && csum_end > end_byte &&
0776            end_byte > key->offset) {
0777         /*
0778          *         [ bytenr - len ]
0779          *                 [ ]
0780          *                 [csum     ]
0781          * we need to truncate from the beginning of the csum
0782          */
0783         u32 new_size = (csum_end - end_byte) >> blocksize_bits;
0784         new_size *= csum_size;
0785 
0786         btrfs_truncate_item(path, new_size, 0);
0787 
0788         key->offset = end_byte;
0789         btrfs_set_item_key_safe(fs_info, path, key);
0790     } else {
0791         BUG();
0792     }
0793 }
0794 
0795 /*
0796  * deletes the csum items from the csum tree for a given
0797  * range of bytes.
0798  */
0799 int btrfs_del_csums(struct btrfs_trans_handle *trans,
0800             struct btrfs_root *root, u64 bytenr, u64 len)
0801 {
0802     struct btrfs_fs_info *fs_info = trans->fs_info;
0803     struct btrfs_path *path;
0804     struct btrfs_key key;
0805     u64 end_byte = bytenr + len;
0806     u64 csum_end;
0807     struct extent_buffer *leaf;
0808     int ret = 0;
0809     const u32 csum_size = fs_info->csum_size;
0810     u32 blocksize_bits = fs_info->sectorsize_bits;
0811 
0812     ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
0813            root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
0814 
0815     path = btrfs_alloc_path();
0816     if (!path)
0817         return -ENOMEM;
0818 
0819     while (1) {
0820         key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
0821         key.offset = end_byte - 1;
0822         key.type = BTRFS_EXTENT_CSUM_KEY;
0823 
0824         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
0825         if (ret > 0) {
0826             ret = 0;
0827             if (path->slots[0] == 0)
0828                 break;
0829             path->slots[0]--;
0830         } else if (ret < 0) {
0831             break;
0832         }
0833 
0834         leaf = path->nodes[0];
0835         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
0836 
0837         if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
0838             key.type != BTRFS_EXTENT_CSUM_KEY) {
0839             break;
0840         }
0841 
0842         if (key.offset >= end_byte)
0843             break;
0844 
0845         csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size;
0846         csum_end <<= blocksize_bits;
0847         csum_end += key.offset;
0848 
0849         /* this csum ends before we start, we're done */
0850         if (csum_end <= bytenr)
0851             break;
0852 
0853         /* delete the entire item, it is inside our range */
0854         if (key.offset >= bytenr && csum_end <= end_byte) {
0855             int del_nr = 1;
0856 
0857             /*
0858              * Check how many csum items preceding this one in this
0859              * leaf correspond to our range and then delete them all
0860              * at once.
0861              */
0862             if (key.offset > bytenr && path->slots[0] > 0) {
0863                 int slot = path->slots[0] - 1;
0864 
0865                 while (slot >= 0) {
0866                     struct btrfs_key pk;
0867 
0868                     btrfs_item_key_to_cpu(leaf, &pk, slot);
0869                     if (pk.offset < bytenr ||
0870                         pk.type != BTRFS_EXTENT_CSUM_KEY ||
0871                         pk.objectid !=
0872                         BTRFS_EXTENT_CSUM_OBJECTID)
0873                         break;
0874                     path->slots[0] = slot;
0875                     del_nr++;
0876                     key.offset = pk.offset;
0877                     slot--;
0878                 }
0879             }
0880             ret = btrfs_del_items(trans, root, path,
0881                           path->slots[0], del_nr);
0882             if (ret)
0883                 break;
0884             if (key.offset == bytenr)
0885                 break;
0886         } else if (key.offset < bytenr && csum_end > end_byte) {
0887             unsigned long offset;
0888             unsigned long shift_len;
0889             unsigned long item_offset;
0890             /*
0891              *        [ bytenr - len ]
0892              *     [csum                ]
0893              *
0894              * Our bytes are in the middle of the csum,
0895              * we need to split this item and insert a new one.
0896              *
0897              * But we can't drop the path because the
0898              * csum could change, get removed, extended etc.
0899              *
0900              * The trick here is the max size of a csum item leaves
0901              * enough room in the tree block for a single
0902              * item header.  So, we split the item in place,
0903              * adding a new header pointing to the existing
0904              * bytes.  Then we loop around again and we have
0905              * a nicely formed csum item that we can neatly
0906              * truncate.
0907              */
0908             offset = (bytenr - key.offset) >> blocksize_bits;
0909             offset *= csum_size;
0910 
0911             shift_len = (len >> blocksize_bits) * csum_size;
0912 
0913             item_offset = btrfs_item_ptr_offset(leaf,
0914                                 path->slots[0]);
0915 
0916             memzero_extent_buffer(leaf, item_offset + offset,
0917                          shift_len);
0918             key.offset = bytenr;
0919 
0920             /*
0921              * btrfs_split_item returns -EAGAIN when the
0922              * item changed size or key
0923              */
0924             ret = btrfs_split_item(trans, root, path, &key, offset);
0925             if (ret && ret != -EAGAIN) {
0926                 btrfs_abort_transaction(trans, ret);
0927                 break;
0928             }
0929             ret = 0;
0930 
0931             key.offset = end_byte - 1;
0932         } else {
0933             truncate_one_csum(fs_info, path, &key, bytenr, len);
0934             if (key.offset < bytenr)
0935                 break;
0936         }
0937         btrfs_release_path(path);
0938     }
0939     btrfs_free_path(path);
0940     return ret;
0941 }
0942 
0943 static int find_next_csum_offset(struct btrfs_root *root,
0944                  struct btrfs_path *path,
0945                  u64 *next_offset)
0946 {
0947     const u32 nritems = btrfs_header_nritems(path->nodes[0]);
0948     struct btrfs_key found_key;
0949     int slot = path->slots[0] + 1;
0950     int ret;
0951 
0952     if (nritems == 0 || slot >= nritems) {
0953         ret = btrfs_next_leaf(root, path);
0954         if (ret < 0) {
0955             return ret;
0956         } else if (ret > 0) {
0957             *next_offset = (u64)-1;
0958             return 0;
0959         }
0960         slot = path->slots[0];
0961     }
0962 
0963     btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
0964 
0965     if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
0966         found_key.type != BTRFS_EXTENT_CSUM_KEY)
0967         *next_offset = (u64)-1;
0968     else
0969         *next_offset = found_key.offset;
0970 
0971     return 0;
0972 }
0973 
0974 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
0975                struct btrfs_root *root,
0976                struct btrfs_ordered_sum *sums)
0977 {
0978     struct btrfs_fs_info *fs_info = root->fs_info;
0979     struct btrfs_key file_key;
0980     struct btrfs_key found_key;
0981     struct btrfs_path *path;
0982     struct btrfs_csum_item *item;
0983     struct btrfs_csum_item *item_end;
0984     struct extent_buffer *leaf = NULL;
0985     u64 next_offset;
0986     u64 total_bytes = 0;
0987     u64 csum_offset;
0988     u64 bytenr;
0989     u32 ins_size;
0990     int index = 0;
0991     int found_next;
0992     int ret;
0993     const u32 csum_size = fs_info->csum_size;
0994 
0995     path = btrfs_alloc_path();
0996     if (!path)
0997         return -ENOMEM;
0998 again:
0999     next_offset = (u64)-1;
1000     found_next = 0;
1001     bytenr = sums->bytenr + total_bytes;
1002     file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1003     file_key.offset = bytenr;
1004     file_key.type = BTRFS_EXTENT_CSUM_KEY;
1005 
1006     item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
1007     if (!IS_ERR(item)) {
1008         ret = 0;
1009         leaf = path->nodes[0];
1010         item_end = btrfs_item_ptr(leaf, path->slots[0],
1011                       struct btrfs_csum_item);
1012         item_end = (struct btrfs_csum_item *)((char *)item_end +
1013                btrfs_item_size(leaf, path->slots[0]));
1014         goto found;
1015     }
1016     ret = PTR_ERR(item);
1017     if (ret != -EFBIG && ret != -ENOENT)
1018         goto out;
1019 
1020     if (ret == -EFBIG) {
1021         u32 item_size;
1022         /* we found one, but it isn't big enough yet */
1023         leaf = path->nodes[0];
1024         item_size = btrfs_item_size(leaf, path->slots[0]);
1025         if ((item_size / csum_size) >=
1026             MAX_CSUM_ITEMS(fs_info, csum_size)) {
1027             /* already at max size, make a new one */
1028             goto insert;
1029         }
1030     } else {
1031         /* We didn't find a csum item, insert one. */
1032         ret = find_next_csum_offset(root, path, &next_offset);
1033         if (ret < 0)
1034             goto out;
1035         found_next = 1;
1036         goto insert;
1037     }
1038 
1039     /*
1040      * At this point, we know the tree has a checksum item that ends at an
1041      * offset matching the start of the checksum range we want to insert.
1042      * We try to extend that item as much as possible and then add as many
1043      * checksums to it as they fit.
1044      *
1045      * First check if the leaf has enough free space for at least one
1046      * checksum. If it has go directly to the item extension code, otherwise
1047      * release the path and do a search for insertion before the extension.
1048      */
1049     if (btrfs_leaf_free_space(leaf) >= csum_size) {
1050         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1051         csum_offset = (bytenr - found_key.offset) >>
1052             fs_info->sectorsize_bits;
1053         goto extend_csum;
1054     }
1055 
1056     btrfs_release_path(path);
1057     path->search_for_extension = 1;
1058     ret = btrfs_search_slot(trans, root, &file_key, path,
1059                 csum_size, 1);
1060     path->search_for_extension = 0;
1061     if (ret < 0)
1062         goto out;
1063 
1064     if (ret > 0) {
1065         if (path->slots[0] == 0)
1066             goto insert;
1067         path->slots[0]--;
1068     }
1069 
1070     leaf = path->nodes[0];
1071     btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1072     csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits;
1073 
1074     if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
1075         found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
1076         csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
1077         goto insert;
1078     }
1079 
1080 extend_csum:
1081     if (csum_offset == btrfs_item_size(leaf, path->slots[0]) /
1082         csum_size) {
1083         int extend_nr;
1084         u64 tmp;
1085         u32 diff;
1086 
1087         tmp = sums->len - total_bytes;
1088         tmp >>= fs_info->sectorsize_bits;
1089         WARN_ON(tmp < 1);
1090         extend_nr = max_t(int, 1, tmp);
1091 
1092         /*
1093          * A log tree can already have checksum items with a subset of
1094          * the checksums we are trying to log. This can happen after
1095          * doing a sequence of partial writes into prealloc extents and
1096          * fsyncs in between, with a full fsync logging a larger subrange
1097          * of an extent for which a previous fast fsync logged a smaller
1098          * subrange. And this happens in particular due to merging file
1099          * extent items when we complete an ordered extent for a range
1100          * covered by a prealloc extent - this is done at
1101          * btrfs_mark_extent_written().
1102          *
1103          * So if we try to extend the previous checksum item, which has
1104          * a range that ends at the start of the range we want to insert,
1105          * make sure we don't extend beyond the start offset of the next
1106          * checksum item. If we are at the last item in the leaf, then
1107          * forget the optimization of extending and add a new checksum
1108          * item - it is not worth the complexity of releasing the path,
1109          * getting the first key for the next leaf, repeat the btree
1110          * search, etc, because log trees are temporary anyway and it
1111          * would only save a few bytes of leaf space.
1112          */
1113         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1114             if (path->slots[0] + 1 >=
1115                 btrfs_header_nritems(path->nodes[0])) {
1116                 ret = find_next_csum_offset(root, path, &next_offset);
1117                 if (ret < 0)
1118                     goto out;
1119                 found_next = 1;
1120                 goto insert;
1121             }
1122 
1123             ret = find_next_csum_offset(root, path, &next_offset);
1124             if (ret < 0)
1125                 goto out;
1126 
1127             tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
1128             if (tmp <= INT_MAX)
1129                 extend_nr = min_t(int, extend_nr, tmp);
1130         }
1131 
1132         diff = (csum_offset + extend_nr) * csum_size;
1133         diff = min(diff,
1134                MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
1135 
1136         diff = diff - btrfs_item_size(leaf, path->slots[0]);
1137         diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
1138         diff /= csum_size;
1139         diff *= csum_size;
1140 
1141         btrfs_extend_item(path, diff);
1142         ret = 0;
1143         goto csum;
1144     }
1145 
1146 insert:
1147     btrfs_release_path(path);
1148     csum_offset = 0;
1149     if (found_next) {
1150         u64 tmp;
1151 
1152         tmp = sums->len - total_bytes;
1153         tmp >>= fs_info->sectorsize_bits;
1154         tmp = min(tmp, (next_offset - file_key.offset) >>
1155                      fs_info->sectorsize_bits);
1156 
1157         tmp = max_t(u64, 1, tmp);
1158         tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
1159         ins_size = csum_size * tmp;
1160     } else {
1161         ins_size = csum_size;
1162     }
1163     ret = btrfs_insert_empty_item(trans, root, path, &file_key,
1164                       ins_size);
1165     if (ret < 0)
1166         goto out;
1167     if (WARN_ON(ret != 0))
1168         goto out;
1169     leaf = path->nodes[0];
1170 csum:
1171     item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
1172     item_end = (struct btrfs_csum_item *)((unsigned char *)item +
1173                       btrfs_item_size(leaf, path->slots[0]));
1174     item = (struct btrfs_csum_item *)((unsigned char *)item +
1175                       csum_offset * csum_size);
1176 found:
1177     ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits;
1178     ins_size *= csum_size;
1179     ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
1180                   ins_size);
1181     write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
1182                 ins_size);
1183 
1184     index += ins_size;
1185     ins_size /= csum_size;
1186     total_bytes += ins_size * fs_info->sectorsize;
1187 
1188     btrfs_mark_buffer_dirty(path->nodes[0]);
1189     if (total_bytes < sums->len) {
1190         btrfs_release_path(path);
1191         cond_resched();
1192         goto again;
1193     }
1194 out:
1195     btrfs_free_path(path);
1196     return ret;
1197 }
1198 
1199 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
1200                      const struct btrfs_path *path,
1201                      struct btrfs_file_extent_item *fi,
1202                      const bool new_inline,
1203                      struct extent_map *em)
1204 {
1205     struct btrfs_fs_info *fs_info = inode->root->fs_info;
1206     struct btrfs_root *root = inode->root;
1207     struct extent_buffer *leaf = path->nodes[0];
1208     const int slot = path->slots[0];
1209     struct btrfs_key key;
1210     u64 extent_start, extent_end;
1211     u64 bytenr;
1212     u8 type = btrfs_file_extent_type(leaf, fi);
1213     int compress_type = btrfs_file_extent_compression(leaf, fi);
1214 
1215     btrfs_item_key_to_cpu(leaf, &key, slot);
1216     extent_start = key.offset;
1217     extent_end = btrfs_file_extent_end(path);
1218     em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1219     em->generation = btrfs_file_extent_generation(leaf, fi);
1220     if (type == BTRFS_FILE_EXTENT_REG ||
1221         type == BTRFS_FILE_EXTENT_PREALLOC) {
1222         em->start = extent_start;
1223         em->len = extent_end - extent_start;
1224         em->orig_start = extent_start -
1225             btrfs_file_extent_offset(leaf, fi);
1226         em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
1227         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1228         if (bytenr == 0) {
1229             em->block_start = EXTENT_MAP_HOLE;
1230             return;
1231         }
1232         if (compress_type != BTRFS_COMPRESS_NONE) {
1233             set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1234             em->compress_type = compress_type;
1235             em->block_start = bytenr;
1236             em->block_len = em->orig_block_len;
1237         } else {
1238             bytenr += btrfs_file_extent_offset(leaf, fi);
1239             em->block_start = bytenr;
1240             em->block_len = em->len;
1241             if (type == BTRFS_FILE_EXTENT_PREALLOC)
1242                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
1243         }
1244     } else if (type == BTRFS_FILE_EXTENT_INLINE) {
1245         em->block_start = EXTENT_MAP_INLINE;
1246         em->start = extent_start;
1247         em->len = extent_end - extent_start;
1248         /*
1249          * Initialize orig_start and block_len with the same values
1250          * as in inode.c:btrfs_get_extent().
1251          */
1252         em->orig_start = EXTENT_MAP_HOLE;
1253         em->block_len = (u64)-1;
1254         if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1255             set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1256             em->compress_type = compress_type;
1257         }
1258     } else {
1259         btrfs_err(fs_info,
1260               "unknown file extent item type %d, inode %llu, offset %llu, "
1261               "root %llu", type, btrfs_ino(inode), extent_start,
1262               root->root_key.objectid);
1263     }
1264 }
1265 
1266 /*
1267  * Returns the end offset (non inclusive) of the file extent item the given path
1268  * points to. If it points to an inline extent, the returned offset is rounded
1269  * up to the sector size.
1270  */
1271 u64 btrfs_file_extent_end(const struct btrfs_path *path)
1272 {
1273     const struct extent_buffer *leaf = path->nodes[0];
1274     const int slot = path->slots[0];
1275     struct btrfs_file_extent_item *fi;
1276     struct btrfs_key key;
1277     u64 end;
1278 
1279     btrfs_item_key_to_cpu(leaf, &key, slot);
1280     ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
1281     fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1282 
1283     if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
1284         end = btrfs_file_extent_ram_bytes(leaf, fi);
1285         end = ALIGN(key.offset + end, leaf->fs_info->sectorsize);
1286     } else {
1287         end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1288     }
1289 
1290     return end;
1291 }