0001
0002
0003
0004
0005
0006 #include <linux/sched.h>
0007 #include <linux/slab.h>
0008 #include <linux/blkdev.h>
0009 #include <linux/list_sort.h>
0010 #include <linux/iversion.h>
0011 #include "misc.h"
0012 #include "ctree.h"
0013 #include "tree-log.h"
0014 #include "disk-io.h"
0015 #include "locking.h"
0016 #include "print-tree.h"
0017 #include "backref.h"
0018 #include "compression.h"
0019 #include "qgroup.h"
0020 #include "block-group.h"
0021 #include "space-info.h"
0022 #include "zoned.h"
0023 #include "inode-item.h"
0024
0025
0026
0027
0028
0029
0030
0031 enum {
0032 LOG_INODE_ALL,
0033 LOG_INODE_EXISTS,
0034 LOG_OTHER_INODE,
0035 LOG_OTHER_INODE_ALL,
0036 };
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 enum {
0091 LOG_WALK_PIN_ONLY,
0092 LOG_WALK_REPLAY_INODES,
0093 LOG_WALK_REPLAY_DIR_INDEX,
0094 LOG_WALK_REPLAY_ALL,
0095 };
0096
0097 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
0098 struct btrfs_inode *inode,
0099 int inode_only,
0100 struct btrfs_log_ctx *ctx);
0101 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
0102 struct btrfs_root *root,
0103 struct btrfs_path *path, u64 objectid);
0104 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
0105 struct btrfs_root *root,
0106 struct btrfs_root *log,
0107 struct btrfs_path *path,
0108 u64 dirid, int del_all);
0109 static void wait_log_commit(struct btrfs_root *root, int transid);
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139 static int start_log_trans(struct btrfs_trans_handle *trans,
0140 struct btrfs_root *root,
0141 struct btrfs_log_ctx *ctx)
0142 {
0143 struct btrfs_fs_info *fs_info = root->fs_info;
0144 struct btrfs_root *tree_root = fs_info->tree_root;
0145 const bool zoned = btrfs_is_zoned(fs_info);
0146 int ret = 0;
0147 bool created = false;
0148
0149
0150
0151
0152
0153 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) {
0154 mutex_lock(&tree_root->log_mutex);
0155 if (!fs_info->log_root_tree) {
0156 ret = btrfs_init_log_root_tree(trans, fs_info);
0157 if (!ret) {
0158 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state);
0159 created = true;
0160 }
0161 }
0162 mutex_unlock(&tree_root->log_mutex);
0163 if (ret)
0164 return ret;
0165 }
0166
0167 mutex_lock(&root->log_mutex);
0168
0169 again:
0170 if (root->log_root) {
0171 int index = (root->log_transid + 1) % 2;
0172
0173 if (btrfs_need_log_full_commit(trans)) {
0174 ret = BTRFS_LOG_FORCE_COMMIT;
0175 goto out;
0176 }
0177
0178 if (zoned && atomic_read(&root->log_commit[index])) {
0179 wait_log_commit(root, root->log_transid - 1);
0180 goto again;
0181 }
0182
0183 if (!root->log_start_pid) {
0184 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
0185 root->log_start_pid = current->pid;
0186 } else if (root->log_start_pid != current->pid) {
0187 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
0188 }
0189 } else {
0190
0191
0192
0193
0194
0195
0196 if (zoned && !created) {
0197 ret = BTRFS_LOG_FORCE_COMMIT;
0198 goto out;
0199 }
0200
0201 ret = btrfs_add_log_tree(trans, root);
0202 if (ret)
0203 goto out;
0204
0205 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
0206 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
0207 root->log_start_pid = current->pid;
0208 }
0209
0210 atomic_inc(&root->log_writers);
0211 if (!ctx->logging_new_name) {
0212 int index = root->log_transid % 2;
0213 list_add_tail(&ctx->list, &root->log_ctxs[index]);
0214 ctx->log_transid = root->log_transid;
0215 }
0216
0217 out:
0218 mutex_unlock(&root->log_mutex);
0219 return ret;
0220 }
0221
0222
0223
0224
0225
0226
0227 static int join_running_log_trans(struct btrfs_root *root)
0228 {
0229 const bool zoned = btrfs_is_zoned(root->fs_info);
0230 int ret = -ENOENT;
0231
0232 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
0233 return ret;
0234
0235 mutex_lock(&root->log_mutex);
0236 again:
0237 if (root->log_root) {
0238 int index = (root->log_transid + 1) % 2;
0239
0240 ret = 0;
0241 if (zoned && atomic_read(&root->log_commit[index])) {
0242 wait_log_commit(root, root->log_transid - 1);
0243 goto again;
0244 }
0245 atomic_inc(&root->log_writers);
0246 }
0247 mutex_unlock(&root->log_mutex);
0248 return ret;
0249 }
0250
0251
0252
0253
0254
0255
0256 void btrfs_pin_log_trans(struct btrfs_root *root)
0257 {
0258 atomic_inc(&root->log_writers);
0259 }
0260
0261
0262
0263
0264
0265 void btrfs_end_log_trans(struct btrfs_root *root)
0266 {
0267 if (atomic_dec_and_test(&root->log_writers)) {
0268
0269 cond_wake_up_nomb(&root->log_writer_wait);
0270 }
0271 }
0272
0273 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
0274 {
0275 filemap_fdatawait_range(buf->pages[0]->mapping,
0276 buf->start, buf->start + buf->len - 1);
0277 }
0278
0279
0280
0281
0282
0283
0284
0285 struct walk_control {
0286
0287
0288
0289 int free;
0290
0291
0292
0293
0294 int pin;
0295
0296
0297 int stage;
0298
0299
0300
0301
0302
0303
0304 bool ignore_cur_inode;
0305
0306
0307 struct btrfs_root *replay_dest;
0308
0309
0310 struct btrfs_trans_handle *trans;
0311
0312
0313
0314
0315
0316
0317 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
0318 struct walk_control *wc, u64 gen, int level);
0319 };
0320
0321
0322
0323
0324 static int process_one_buffer(struct btrfs_root *log,
0325 struct extent_buffer *eb,
0326 struct walk_control *wc, u64 gen, int level)
0327 {
0328 struct btrfs_fs_info *fs_info = log->fs_info;
0329 int ret = 0;
0330
0331
0332
0333
0334
0335 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
0336 ret = btrfs_read_extent_buffer(eb, gen, level, NULL);
0337 if (ret)
0338 return ret;
0339 }
0340
0341 if (wc->pin) {
0342 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
0343 eb->len);
0344 if (ret)
0345 return ret;
0346
0347 if (btrfs_buffer_uptodate(eb, gen, 0) &&
0348 btrfs_header_level(eb) == 0)
0349 ret = btrfs_exclude_logged_extents(eb);
0350 }
0351 return ret;
0352 }
0353
0354 static int do_overwrite_item(struct btrfs_trans_handle *trans,
0355 struct btrfs_root *root,
0356 struct btrfs_path *path,
0357 struct extent_buffer *eb, int slot,
0358 struct btrfs_key *key)
0359 {
0360 int ret;
0361 u32 item_size;
0362 u64 saved_i_size = 0;
0363 int save_old_i_size = 0;
0364 unsigned long src_ptr;
0365 unsigned long dst_ptr;
0366 int overwrite_root = 0;
0367 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
0368
0369 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
0370 overwrite_root = 1;
0371
0372 item_size = btrfs_item_size(eb, slot);
0373 src_ptr = btrfs_item_ptr_offset(eb, slot);
0374
0375
0376 ASSERT(path->nodes[0] != NULL);
0377
0378
0379
0380
0381
0382 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
0383 struct btrfs_key found_key;
0384
0385 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
0386 ret = btrfs_comp_cpu_keys(&found_key, key);
0387 ASSERT(ret >= 0);
0388 } else {
0389 ret = 1;
0390 }
0391
0392 if (ret == 0) {
0393 char *src_copy;
0394 char *dst_copy;
0395 u32 dst_size = btrfs_item_size(path->nodes[0],
0396 path->slots[0]);
0397 if (dst_size != item_size)
0398 goto insert;
0399
0400 if (item_size == 0) {
0401 btrfs_release_path(path);
0402 return 0;
0403 }
0404 dst_copy = kmalloc(item_size, GFP_NOFS);
0405 src_copy = kmalloc(item_size, GFP_NOFS);
0406 if (!dst_copy || !src_copy) {
0407 btrfs_release_path(path);
0408 kfree(dst_copy);
0409 kfree(src_copy);
0410 return -ENOMEM;
0411 }
0412
0413 read_extent_buffer(eb, src_copy, src_ptr, item_size);
0414
0415 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
0416 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
0417 item_size);
0418 ret = memcmp(dst_copy, src_copy, item_size);
0419
0420 kfree(dst_copy);
0421 kfree(src_copy);
0422
0423
0424
0425
0426
0427
0428 if (ret == 0) {
0429 btrfs_release_path(path);
0430 return 0;
0431 }
0432
0433
0434
0435
0436
0437 if (inode_item) {
0438 struct btrfs_inode_item *item;
0439 u64 nbytes;
0440 u32 mode;
0441
0442 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
0443 struct btrfs_inode_item);
0444 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
0445 item = btrfs_item_ptr(eb, slot,
0446 struct btrfs_inode_item);
0447 btrfs_set_inode_nbytes(eb, item, nbytes);
0448
0449
0450
0451
0452
0453
0454 mode = btrfs_inode_mode(eb, item);
0455 if (S_ISDIR(mode))
0456 btrfs_set_inode_size(eb, item, 0);
0457 }
0458 } else if (inode_item) {
0459 struct btrfs_inode_item *item;
0460 u32 mode;
0461
0462
0463
0464
0465
0466 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
0467 btrfs_set_inode_nbytes(eb, item, 0);
0468
0469
0470
0471
0472
0473
0474 mode = btrfs_inode_mode(eb, item);
0475 if (S_ISDIR(mode))
0476 btrfs_set_inode_size(eb, item, 0);
0477 }
0478 insert:
0479 btrfs_release_path(path);
0480
0481 path->skip_release_on_error = 1;
0482 ret = btrfs_insert_empty_item(trans, root, path,
0483 key, item_size);
0484 path->skip_release_on_error = 0;
0485
0486
0487 if (ret == -EEXIST || ret == -EOVERFLOW) {
0488 u32 found_size;
0489 found_size = btrfs_item_size(path->nodes[0],
0490 path->slots[0]);
0491 if (found_size > item_size)
0492 btrfs_truncate_item(path, item_size, 1);
0493 else if (found_size < item_size)
0494 btrfs_extend_item(path, item_size - found_size);
0495 } else if (ret) {
0496 return ret;
0497 }
0498 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
0499 path->slots[0]);
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
0511 struct btrfs_inode_item *src_item;
0512 struct btrfs_inode_item *dst_item;
0513
0514 src_item = (struct btrfs_inode_item *)src_ptr;
0515 dst_item = (struct btrfs_inode_item *)dst_ptr;
0516
0517 if (btrfs_inode_generation(eb, src_item) == 0) {
0518 struct extent_buffer *dst_eb = path->nodes[0];
0519 const u64 ino_size = btrfs_inode_size(eb, src_item);
0520
0521
0522
0523
0524
0525
0526
0527
0528 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
0529 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
0530 ino_size != 0)
0531 btrfs_set_inode_size(dst_eb, dst_item, ino_size);
0532 goto no_copy;
0533 }
0534
0535 if (overwrite_root &&
0536 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
0537 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
0538 save_old_i_size = 1;
0539 saved_i_size = btrfs_inode_size(path->nodes[0],
0540 dst_item);
0541 }
0542 }
0543
0544 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
0545 src_ptr, item_size);
0546
0547 if (save_old_i_size) {
0548 struct btrfs_inode_item *dst_item;
0549 dst_item = (struct btrfs_inode_item *)dst_ptr;
0550 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
0551 }
0552
0553
0554 if (key->type == BTRFS_INODE_ITEM_KEY) {
0555 struct btrfs_inode_item *dst_item;
0556 dst_item = (struct btrfs_inode_item *)dst_ptr;
0557 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
0558 btrfs_set_inode_generation(path->nodes[0], dst_item,
0559 trans->transid);
0560 }
0561 }
0562 no_copy:
0563 btrfs_mark_buffer_dirty(path->nodes[0]);
0564 btrfs_release_path(path);
0565 return 0;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582 static int overwrite_item(struct btrfs_trans_handle *trans,
0583 struct btrfs_root *root,
0584 struct btrfs_path *path,
0585 struct extent_buffer *eb, int slot,
0586 struct btrfs_key *key)
0587 {
0588 int ret;
0589
0590
0591 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
0592 if (ret < 0)
0593 return ret;
0594
0595 return do_overwrite_item(trans, root, path, eb, slot, key);
0596 }
0597
0598
0599
0600
0601
0602 static noinline struct inode *read_one_inode(struct btrfs_root *root,
0603 u64 objectid)
0604 {
0605 struct inode *inode;
0606
0607 inode = btrfs_iget(root->fs_info->sb, objectid, root);
0608 if (IS_ERR(inode))
0609 inode = NULL;
0610 return inode;
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
0626 struct btrfs_root *root,
0627 struct btrfs_path *path,
0628 struct extent_buffer *eb, int slot,
0629 struct btrfs_key *key)
0630 {
0631 struct btrfs_drop_extents_args drop_args = { 0 };
0632 struct btrfs_fs_info *fs_info = root->fs_info;
0633 int found_type;
0634 u64 extent_end;
0635 u64 start = key->offset;
0636 u64 nbytes = 0;
0637 struct btrfs_file_extent_item *item;
0638 struct inode *inode = NULL;
0639 unsigned long size;
0640 int ret = 0;
0641
0642 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
0643 found_type = btrfs_file_extent_type(eb, item);
0644
0645 if (found_type == BTRFS_FILE_EXTENT_REG ||
0646 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
0647 nbytes = btrfs_file_extent_num_bytes(eb, item);
0648 extent_end = start + nbytes;
0649
0650
0651
0652
0653
0654 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
0655 nbytes = 0;
0656 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
0657 size = btrfs_file_extent_ram_bytes(eb, item);
0658 nbytes = btrfs_file_extent_ram_bytes(eb, item);
0659 extent_end = ALIGN(start + size,
0660 fs_info->sectorsize);
0661 } else {
0662 ret = 0;
0663 goto out;
0664 }
0665
0666 inode = read_one_inode(root, key->objectid);
0667 if (!inode) {
0668 ret = -EIO;
0669 goto out;
0670 }
0671
0672
0673
0674
0675
0676
0677 ret = btrfs_lookup_file_extent(trans, root, path,
0678 btrfs_ino(BTRFS_I(inode)), start, 0);
0679
0680 if (ret == 0 &&
0681 (found_type == BTRFS_FILE_EXTENT_REG ||
0682 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
0683 struct btrfs_file_extent_item cmp1;
0684 struct btrfs_file_extent_item cmp2;
0685 struct btrfs_file_extent_item *existing;
0686 struct extent_buffer *leaf;
0687
0688 leaf = path->nodes[0];
0689 existing = btrfs_item_ptr(leaf, path->slots[0],
0690 struct btrfs_file_extent_item);
0691
0692 read_extent_buffer(eb, &cmp1, (unsigned long)item,
0693 sizeof(cmp1));
0694 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
0695 sizeof(cmp2));
0696
0697
0698
0699
0700
0701 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
0702 btrfs_release_path(path);
0703 goto out;
0704 }
0705 }
0706 btrfs_release_path(path);
0707
0708
0709 drop_args.start = start;
0710 drop_args.end = extent_end;
0711 drop_args.drop_cache = true;
0712 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args);
0713 if (ret)
0714 goto out;
0715
0716 if (found_type == BTRFS_FILE_EXTENT_REG ||
0717 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
0718 u64 offset;
0719 unsigned long dest_offset;
0720 struct btrfs_key ins;
0721
0722 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
0723 btrfs_fs_incompat(fs_info, NO_HOLES))
0724 goto update_inode;
0725
0726 ret = btrfs_insert_empty_item(trans, root, path, key,
0727 sizeof(*item));
0728 if (ret)
0729 goto out;
0730 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
0731 path->slots[0]);
0732 copy_extent_buffer(path->nodes[0], eb, dest_offset,
0733 (unsigned long)item, sizeof(*item));
0734
0735 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
0736 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
0737 ins.type = BTRFS_EXTENT_ITEM_KEY;
0738 offset = key->offset - btrfs_file_extent_offset(eb, item);
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748 ret = btrfs_qgroup_trace_extent(trans,
0749 btrfs_file_extent_disk_bytenr(eb, item),
0750 btrfs_file_extent_disk_num_bytes(eb, item),
0751 GFP_NOFS);
0752 if (ret < 0)
0753 goto out;
0754
0755 if (ins.objectid > 0) {
0756 struct btrfs_ref ref = { 0 };
0757 u64 csum_start;
0758 u64 csum_end;
0759 LIST_HEAD(ordered_sums);
0760
0761
0762
0763
0764
0765 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
0766 ins.offset);
0767 if (ret < 0) {
0768 goto out;
0769 } else if (ret == 0) {
0770 btrfs_init_generic_ref(&ref,
0771 BTRFS_ADD_DELAYED_REF,
0772 ins.objectid, ins.offset, 0);
0773 btrfs_init_data_ref(&ref,
0774 root->root_key.objectid,
0775 key->objectid, offset, 0, false);
0776 ret = btrfs_inc_extent_ref(trans, &ref);
0777 if (ret)
0778 goto out;
0779 } else {
0780
0781
0782
0783
0784 ret = btrfs_alloc_logged_file_extent(trans,
0785 root->root_key.objectid,
0786 key->objectid, offset, &ins);
0787 if (ret)
0788 goto out;
0789 }
0790 btrfs_release_path(path);
0791
0792 if (btrfs_file_extent_compression(eb, item)) {
0793 csum_start = ins.objectid;
0794 csum_end = csum_start + ins.offset;
0795 } else {
0796 csum_start = ins.objectid +
0797 btrfs_file_extent_offset(eb, item);
0798 csum_end = csum_start +
0799 btrfs_file_extent_num_bytes(eb, item);
0800 }
0801
0802 ret = btrfs_lookup_csums_range(root->log_root,
0803 csum_start, csum_end - 1,
0804 &ordered_sums, 0);
0805 if (ret)
0806 goto out;
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856 while (!list_empty(&ordered_sums)) {
0857 struct btrfs_ordered_sum *sums;
0858 struct btrfs_root *csum_root;
0859
0860 sums = list_entry(ordered_sums.next,
0861 struct btrfs_ordered_sum,
0862 list);
0863 csum_root = btrfs_csum_root(fs_info,
0864 sums->bytenr);
0865 if (!ret)
0866 ret = btrfs_del_csums(trans, csum_root,
0867 sums->bytenr,
0868 sums->len);
0869 if (!ret)
0870 ret = btrfs_csum_file_blocks(trans,
0871 csum_root,
0872 sums);
0873 list_del(&sums->list);
0874 kfree(sums);
0875 }
0876 if (ret)
0877 goto out;
0878 } else {
0879 btrfs_release_path(path);
0880 }
0881 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
0882
0883 ret = overwrite_item(trans, root, path, eb, slot, key);
0884 if (ret)
0885 goto out;
0886 }
0887
0888 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
0889 extent_end - start);
0890 if (ret)
0891 goto out;
0892
0893 update_inode:
0894 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
0895 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
0896 out:
0897 iput(inode);
0898 return ret;
0899 }
0900
0901 static int unlink_inode_for_log_replay(struct btrfs_trans_handle *trans,
0902 struct btrfs_inode *dir,
0903 struct btrfs_inode *inode,
0904 const char *name,
0905 int name_len)
0906 {
0907 int ret;
0908
0909 ret = btrfs_unlink_inode(trans, dir, inode, name, name_len);
0910 if (ret)
0911 return ret;
0912
0913
0914
0915
0916
0917
0918 return btrfs_run_delayed_items(trans);
0919 }
0920
0921
0922
0923
0924
0925
0926
0927
0928
0929 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
0930 struct btrfs_path *path,
0931 struct btrfs_inode *dir,
0932 struct btrfs_dir_item *di)
0933 {
0934 struct btrfs_root *root = dir->root;
0935 struct inode *inode;
0936 char *name;
0937 int name_len;
0938 struct extent_buffer *leaf;
0939 struct btrfs_key location;
0940 int ret;
0941
0942 leaf = path->nodes[0];
0943
0944 btrfs_dir_item_key_to_cpu(leaf, di, &location);
0945 name_len = btrfs_dir_name_len(leaf, di);
0946 name = kmalloc(name_len, GFP_NOFS);
0947 if (!name)
0948 return -ENOMEM;
0949
0950 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
0951 btrfs_release_path(path);
0952
0953 inode = read_one_inode(root, location.objectid);
0954 if (!inode) {
0955 ret = -EIO;
0956 goto out;
0957 }
0958
0959 ret = link_to_fixup_dir(trans, root, path, location.objectid);
0960 if (ret)
0961 goto out;
0962
0963 ret = unlink_inode_for_log_replay(trans, dir, BTRFS_I(inode), name,
0964 name_len);
0965 out:
0966 kfree(name);
0967 iput(inode);
0968 return ret;
0969 }
0970
0971
0972
0973
0974
0975
0976
0977
0978 static noinline int inode_in_dir(struct btrfs_root *root,
0979 struct btrfs_path *path,
0980 u64 dirid, u64 objectid, u64 index,
0981 const char *name, int name_len)
0982 {
0983 struct btrfs_dir_item *di;
0984 struct btrfs_key location;
0985 int ret = 0;
0986
0987 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
0988 index, name, name_len, 0);
0989 if (IS_ERR(di)) {
0990 ret = PTR_ERR(di);
0991 goto out;
0992 } else if (di) {
0993 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
0994 if (location.objectid != objectid)
0995 goto out;
0996 } else {
0997 goto out;
0998 }
0999
1000 btrfs_release_path(path);
1001 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
1002 if (IS_ERR(di)) {
1003 ret = PTR_ERR(di);
1004 goto out;
1005 } else if (di) {
1006 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1007 if (location.objectid == objectid)
1008 ret = 1;
1009 }
1010 out:
1011 btrfs_release_path(path);
1012 return ret;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 static noinline int backref_in_log(struct btrfs_root *log,
1026 struct btrfs_key *key,
1027 u64 ref_objectid,
1028 const char *name, int namelen)
1029 {
1030 struct btrfs_path *path;
1031 int ret;
1032
1033 path = btrfs_alloc_path();
1034 if (!path)
1035 return -ENOMEM;
1036
1037 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
1038 if (ret < 0) {
1039 goto out;
1040 } else if (ret == 1) {
1041 ret = 0;
1042 goto out;
1043 }
1044
1045 if (key->type == BTRFS_INODE_EXTREF_KEY)
1046 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1047 path->slots[0],
1048 ref_objectid,
1049 name, namelen);
1050 else
1051 ret = !!btrfs_find_name_in_backref(path->nodes[0],
1052 path->slots[0],
1053 name, namelen);
1054 out:
1055 btrfs_free_path(path);
1056 return ret;
1057 }
1058
1059 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
1060 struct btrfs_root *root,
1061 struct btrfs_path *path,
1062 struct btrfs_root *log_root,
1063 struct btrfs_inode *dir,
1064 struct btrfs_inode *inode,
1065 u64 inode_objectid, u64 parent_objectid,
1066 u64 ref_index, char *name, int namelen,
1067 int *search_done)
1068 {
1069 int ret;
1070 char *victim_name;
1071 int victim_name_len;
1072 struct extent_buffer *leaf;
1073 struct btrfs_dir_item *di;
1074 struct btrfs_key search_key;
1075 struct btrfs_inode_extref *extref;
1076
1077 again:
1078
1079 search_key.objectid = inode_objectid;
1080 search_key.type = BTRFS_INODE_REF_KEY;
1081 search_key.offset = parent_objectid;
1082 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1083 if (ret == 0) {
1084 struct btrfs_inode_ref *victim_ref;
1085 unsigned long ptr;
1086 unsigned long ptr_end;
1087
1088 leaf = path->nodes[0];
1089
1090
1091
1092
1093 if (search_key.objectid == search_key.offset)
1094 return 1;
1095
1096
1097
1098
1099
1100 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1101 ptr_end = ptr + btrfs_item_size(leaf, path->slots[0]);
1102 while (ptr < ptr_end) {
1103 victim_ref = (struct btrfs_inode_ref *)ptr;
1104 victim_name_len = btrfs_inode_ref_name_len(leaf,
1105 victim_ref);
1106 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1107 if (!victim_name)
1108 return -ENOMEM;
1109
1110 read_extent_buffer(leaf, victim_name,
1111 (unsigned long)(victim_ref + 1),
1112 victim_name_len);
1113
1114 ret = backref_in_log(log_root, &search_key,
1115 parent_objectid, victim_name,
1116 victim_name_len);
1117 if (ret < 0) {
1118 kfree(victim_name);
1119 return ret;
1120 } else if (!ret) {
1121 inc_nlink(&inode->vfs_inode);
1122 btrfs_release_path(path);
1123
1124 ret = unlink_inode_for_log_replay(trans, dir, inode,
1125 victim_name, victim_name_len);
1126 kfree(victim_name);
1127 if (ret)
1128 return ret;
1129 *search_done = 1;
1130 goto again;
1131 }
1132 kfree(victim_name);
1133
1134 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1135 }
1136
1137
1138
1139
1140
1141 *search_done = 1;
1142 }
1143 btrfs_release_path(path);
1144
1145
1146 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1147 inode_objectid, parent_objectid, 0,
1148 0);
1149 if (IS_ERR(extref)) {
1150 return PTR_ERR(extref);
1151 } else if (extref) {
1152 u32 item_size;
1153 u32 cur_offset = 0;
1154 unsigned long base;
1155 struct inode *victim_parent;
1156
1157 leaf = path->nodes[0];
1158
1159 item_size = btrfs_item_size(leaf, path->slots[0]);
1160 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1161
1162 while (cur_offset < item_size) {
1163 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1164
1165 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1166
1167 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1168 goto next;
1169
1170 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1171 if (!victim_name)
1172 return -ENOMEM;
1173 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1174 victim_name_len);
1175
1176 search_key.objectid = inode_objectid;
1177 search_key.type = BTRFS_INODE_EXTREF_KEY;
1178 search_key.offset = btrfs_extref_hash(parent_objectid,
1179 victim_name,
1180 victim_name_len);
1181 ret = backref_in_log(log_root, &search_key,
1182 parent_objectid, victim_name,
1183 victim_name_len);
1184 if (ret < 0) {
1185 kfree(victim_name);
1186 return ret;
1187 } else if (!ret) {
1188 ret = -ENOENT;
1189 victim_parent = read_one_inode(root,
1190 parent_objectid);
1191 if (victim_parent) {
1192 inc_nlink(&inode->vfs_inode);
1193 btrfs_release_path(path);
1194
1195 ret = unlink_inode_for_log_replay(trans,
1196 BTRFS_I(victim_parent),
1197 inode,
1198 victim_name,
1199 victim_name_len);
1200 }
1201 iput(victim_parent);
1202 kfree(victim_name);
1203 if (ret)
1204 return ret;
1205 *search_done = 1;
1206 goto again;
1207 }
1208 kfree(victim_name);
1209 next:
1210 cur_offset += victim_name_len + sizeof(*extref);
1211 }
1212 *search_done = 1;
1213 }
1214 btrfs_release_path(path);
1215
1216
1217 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1218 ref_index, name, namelen, 0);
1219 if (IS_ERR(di)) {
1220 return PTR_ERR(di);
1221 } else if (di) {
1222 ret = drop_one_dir_item(trans, path, dir, di);
1223 if (ret)
1224 return ret;
1225 }
1226 btrfs_release_path(path);
1227
1228
1229 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1230 name, namelen, 0);
1231 if (IS_ERR(di)) {
1232 return PTR_ERR(di);
1233 } else if (di) {
1234 ret = drop_one_dir_item(trans, path, dir, di);
1235 if (ret)
1236 return ret;
1237 }
1238 btrfs_release_path(path);
1239
1240 return 0;
1241 }
1242
1243 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1244 u32 *namelen, char **name, u64 *index,
1245 u64 *parent_objectid)
1246 {
1247 struct btrfs_inode_extref *extref;
1248
1249 extref = (struct btrfs_inode_extref *)ref_ptr;
1250
1251 *namelen = btrfs_inode_extref_name_len(eb, extref);
1252 *name = kmalloc(*namelen, GFP_NOFS);
1253 if (*name == NULL)
1254 return -ENOMEM;
1255
1256 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1257 *namelen);
1258
1259 if (index)
1260 *index = btrfs_inode_extref_index(eb, extref);
1261 if (parent_objectid)
1262 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1263
1264 return 0;
1265 }
1266
1267 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1268 u32 *namelen, char **name, u64 *index)
1269 {
1270 struct btrfs_inode_ref *ref;
1271
1272 ref = (struct btrfs_inode_ref *)ref_ptr;
1273
1274 *namelen = btrfs_inode_ref_name_len(eb, ref);
1275 *name = kmalloc(*namelen, GFP_NOFS);
1276 if (*name == NULL)
1277 return -ENOMEM;
1278
1279 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1280
1281 if (index)
1282 *index = btrfs_inode_ref_index(eb, ref);
1283
1284 return 0;
1285 }
1286
1287
1288
1289
1290
1291
1292
1293
1294 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1295 struct btrfs_root *root,
1296 struct btrfs_path *path,
1297 struct btrfs_inode *inode,
1298 struct extent_buffer *log_eb,
1299 int log_slot,
1300 struct btrfs_key *key)
1301 {
1302 int ret;
1303 unsigned long ref_ptr;
1304 unsigned long ref_end;
1305 struct extent_buffer *eb;
1306
1307 again:
1308 btrfs_release_path(path);
1309 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1310 if (ret > 0) {
1311 ret = 0;
1312 goto out;
1313 }
1314 if (ret < 0)
1315 goto out;
1316
1317 eb = path->nodes[0];
1318 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1319 ref_end = ref_ptr + btrfs_item_size(eb, path->slots[0]);
1320 while (ref_ptr < ref_end) {
1321 char *name = NULL;
1322 int namelen;
1323 u64 parent_id;
1324
1325 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1326 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1327 NULL, &parent_id);
1328 } else {
1329 parent_id = key->offset;
1330 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1331 NULL);
1332 }
1333 if (ret)
1334 goto out;
1335
1336 if (key->type == BTRFS_INODE_EXTREF_KEY)
1337 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
1338 parent_id, name,
1339 namelen);
1340 else
1341 ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
1342 name, namelen);
1343
1344 if (!ret) {
1345 struct inode *dir;
1346
1347 btrfs_release_path(path);
1348 dir = read_one_inode(root, parent_id);
1349 if (!dir) {
1350 ret = -ENOENT;
1351 kfree(name);
1352 goto out;
1353 }
1354 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
1355 inode, name, namelen);
1356 kfree(name);
1357 iput(dir);
1358 if (ret)
1359 goto out;
1360 goto again;
1361 }
1362
1363 kfree(name);
1364 ref_ptr += namelen;
1365 if (key->type == BTRFS_INODE_EXTREF_KEY)
1366 ref_ptr += sizeof(struct btrfs_inode_extref);
1367 else
1368 ref_ptr += sizeof(struct btrfs_inode_ref);
1369 }
1370 ret = 0;
1371 out:
1372 btrfs_release_path(path);
1373 return ret;
1374 }
1375
1376 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1377 const u8 ref_type, const char *name,
1378 const int namelen)
1379 {
1380 struct btrfs_key key;
1381 struct btrfs_path *path;
1382 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1383 int ret;
1384
1385 path = btrfs_alloc_path();
1386 if (!path)
1387 return -ENOMEM;
1388
1389 key.objectid = btrfs_ino(BTRFS_I(inode));
1390 key.type = ref_type;
1391 if (key.type == BTRFS_INODE_REF_KEY)
1392 key.offset = parent_id;
1393 else
1394 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1395
1396 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1397 if (ret < 0)
1398 goto out;
1399 if (ret > 0) {
1400 ret = 0;
1401 goto out;
1402 }
1403 if (key.type == BTRFS_INODE_EXTREF_KEY)
1404 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
1405 path->slots[0], parent_id, name, namelen);
1406 else
1407 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1408 name, namelen);
1409
1410 out:
1411 btrfs_free_path(path);
1412 return ret;
1413 }
1414
1415 static int add_link(struct btrfs_trans_handle *trans,
1416 struct inode *dir, struct inode *inode, const char *name,
1417 int namelen, u64 ref_index)
1418 {
1419 struct btrfs_root *root = BTRFS_I(dir)->root;
1420 struct btrfs_dir_item *dir_item;
1421 struct btrfs_key key;
1422 struct btrfs_path *path;
1423 struct inode *other_inode = NULL;
1424 int ret;
1425
1426 path = btrfs_alloc_path();
1427 if (!path)
1428 return -ENOMEM;
1429
1430 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1431 btrfs_ino(BTRFS_I(dir)),
1432 name, namelen, 0);
1433 if (!dir_item) {
1434 btrfs_release_path(path);
1435 goto add_link;
1436 } else if (IS_ERR(dir_item)) {
1437 ret = PTR_ERR(dir_item);
1438 goto out;
1439 }
1440
1441
1442
1443
1444
1445
1446 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1447 btrfs_release_path(path);
1448 other_inode = read_one_inode(root, key.objectid);
1449 if (!other_inode) {
1450 ret = -ENOENT;
1451 goto out;
1452 }
1453 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(other_inode),
1454 name, namelen);
1455 if (ret)
1456 goto out;
1457
1458
1459
1460
1461 if (other_inode->i_nlink == 0)
1462 set_nlink(other_inode, 1);
1463 add_link:
1464 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1465 name, namelen, 0, ref_index);
1466 out:
1467 iput(other_inode);
1468 btrfs_free_path(path);
1469
1470 return ret;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1480 struct btrfs_root *root,
1481 struct btrfs_root *log,
1482 struct btrfs_path *path,
1483 struct extent_buffer *eb, int slot,
1484 struct btrfs_key *key)
1485 {
1486 struct inode *dir = NULL;
1487 struct inode *inode = NULL;
1488 unsigned long ref_ptr;
1489 unsigned long ref_end;
1490 char *name = NULL;
1491 int namelen;
1492 int ret;
1493 int search_done = 0;
1494 int log_ref_ver = 0;
1495 u64 parent_objectid;
1496 u64 inode_objectid;
1497 u64 ref_index = 0;
1498 int ref_struct_size;
1499
1500 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1501 ref_end = ref_ptr + btrfs_item_size(eb, slot);
1502
1503 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1504 struct btrfs_inode_extref *r;
1505
1506 ref_struct_size = sizeof(struct btrfs_inode_extref);
1507 log_ref_ver = 1;
1508 r = (struct btrfs_inode_extref *)ref_ptr;
1509 parent_objectid = btrfs_inode_extref_parent(eb, r);
1510 } else {
1511 ref_struct_size = sizeof(struct btrfs_inode_ref);
1512 parent_objectid = key->offset;
1513 }
1514 inode_objectid = key->objectid;
1515
1516
1517
1518
1519
1520
1521
1522 dir = read_one_inode(root, parent_objectid);
1523 if (!dir) {
1524 ret = -ENOENT;
1525 goto out;
1526 }
1527
1528 inode = read_one_inode(root, inode_objectid);
1529 if (!inode) {
1530 ret = -EIO;
1531 goto out;
1532 }
1533
1534 while (ref_ptr < ref_end) {
1535 if (log_ref_ver) {
1536 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1537 &ref_index, &parent_objectid);
1538
1539
1540
1541
1542 if (!dir)
1543 dir = read_one_inode(root, parent_objectid);
1544 if (!dir) {
1545 ret = -ENOENT;
1546 goto out;
1547 }
1548 } else {
1549 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1550 &ref_index);
1551 }
1552 if (ret)
1553 goto out;
1554
1555 ret = inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1556 btrfs_ino(BTRFS_I(inode)), ref_index,
1557 name, namelen);
1558 if (ret < 0) {
1559 goto out;
1560 } else if (ret == 0) {
1561
1562
1563
1564
1565
1566
1567
1568
1569 if (!search_done) {
1570 ret = __add_inode_ref(trans, root, path, log,
1571 BTRFS_I(dir),
1572 BTRFS_I(inode),
1573 inode_objectid,
1574 parent_objectid,
1575 ref_index, name, namelen,
1576 &search_done);
1577 if (ret) {
1578 if (ret == 1)
1579 ret = 0;
1580 goto out;
1581 }
1582 }
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1593 name, namelen);
1594 if (ret > 0) {
1595 ret = unlink_inode_for_log_replay(trans,
1596 BTRFS_I(dir),
1597 BTRFS_I(inode),
1598 name, namelen);
1599
1600
1601
1602
1603
1604 if (!ret && inode->i_nlink == 0)
1605 set_nlink(inode, 1);
1606 }
1607 if (ret < 0)
1608 goto out;
1609
1610
1611 ret = add_link(trans, dir, inode, name, namelen,
1612 ref_index);
1613 if (ret)
1614 goto out;
1615
1616 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1617 if (ret)
1618 goto out;
1619 }
1620
1621
1622 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1623 kfree(name);
1624 name = NULL;
1625 if (log_ref_ver) {
1626 iput(dir);
1627 dir = NULL;
1628 }
1629 }
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1640 key);
1641 if (ret)
1642 goto out;
1643
1644
1645 ret = overwrite_item(trans, root, path, eb, slot, key);
1646 out:
1647 btrfs_release_path(path);
1648 kfree(name);
1649 iput(dir);
1650 iput(inode);
1651 return ret;
1652 }
1653
1654 static int count_inode_extrefs(struct btrfs_root *root,
1655 struct btrfs_inode *inode, struct btrfs_path *path)
1656 {
1657 int ret = 0;
1658 int name_len;
1659 unsigned int nlink = 0;
1660 u32 item_size;
1661 u32 cur_offset = 0;
1662 u64 inode_objectid = btrfs_ino(inode);
1663 u64 offset = 0;
1664 unsigned long ptr;
1665 struct btrfs_inode_extref *extref;
1666 struct extent_buffer *leaf;
1667
1668 while (1) {
1669 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1670 &extref, &offset);
1671 if (ret)
1672 break;
1673
1674 leaf = path->nodes[0];
1675 item_size = btrfs_item_size(leaf, path->slots[0]);
1676 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1677 cur_offset = 0;
1678
1679 while (cur_offset < item_size) {
1680 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1681 name_len = btrfs_inode_extref_name_len(leaf, extref);
1682
1683 nlink++;
1684
1685 cur_offset += name_len + sizeof(*extref);
1686 }
1687
1688 offset++;
1689 btrfs_release_path(path);
1690 }
1691 btrfs_release_path(path);
1692
1693 if (ret < 0 && ret != -ENOENT)
1694 return ret;
1695 return nlink;
1696 }
1697
1698 static int count_inode_refs(struct btrfs_root *root,
1699 struct btrfs_inode *inode, struct btrfs_path *path)
1700 {
1701 int ret;
1702 struct btrfs_key key;
1703 unsigned int nlink = 0;
1704 unsigned long ptr;
1705 unsigned long ptr_end;
1706 int name_len;
1707 u64 ino = btrfs_ino(inode);
1708
1709 key.objectid = ino;
1710 key.type = BTRFS_INODE_REF_KEY;
1711 key.offset = (u64)-1;
1712
1713 while (1) {
1714 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1715 if (ret < 0)
1716 break;
1717 if (ret > 0) {
1718 if (path->slots[0] == 0)
1719 break;
1720 path->slots[0]--;
1721 }
1722 process_slot:
1723 btrfs_item_key_to_cpu(path->nodes[0], &key,
1724 path->slots[0]);
1725 if (key.objectid != ino ||
1726 key.type != BTRFS_INODE_REF_KEY)
1727 break;
1728 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1729 ptr_end = ptr + btrfs_item_size(path->nodes[0],
1730 path->slots[0]);
1731 while (ptr < ptr_end) {
1732 struct btrfs_inode_ref *ref;
1733
1734 ref = (struct btrfs_inode_ref *)ptr;
1735 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1736 ref);
1737 ptr = (unsigned long)(ref + 1) + name_len;
1738 nlink++;
1739 }
1740
1741 if (key.offset == 0)
1742 break;
1743 if (path->slots[0] > 0) {
1744 path->slots[0]--;
1745 goto process_slot;
1746 }
1747 key.offset--;
1748 btrfs_release_path(path);
1749 }
1750 btrfs_release_path(path);
1751
1752 return nlink;
1753 }
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1766 struct btrfs_root *root,
1767 struct inode *inode)
1768 {
1769 struct btrfs_path *path;
1770 int ret;
1771 u64 nlink = 0;
1772 u64 ino = btrfs_ino(BTRFS_I(inode));
1773
1774 path = btrfs_alloc_path();
1775 if (!path)
1776 return -ENOMEM;
1777
1778 ret = count_inode_refs(root, BTRFS_I(inode), path);
1779 if (ret < 0)
1780 goto out;
1781
1782 nlink = ret;
1783
1784 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1785 if (ret < 0)
1786 goto out;
1787
1788 nlink += ret;
1789
1790 ret = 0;
1791
1792 if (nlink != inode->i_nlink) {
1793 set_nlink(inode, nlink);
1794 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1795 if (ret)
1796 goto out;
1797 }
1798 BTRFS_I(inode)->index_cnt = (u64)-1;
1799
1800 if (inode->i_nlink == 0) {
1801 if (S_ISDIR(inode->i_mode)) {
1802 ret = replay_dir_deletes(trans, root, NULL, path,
1803 ino, 1);
1804 if (ret)
1805 goto out;
1806 }
1807 ret = btrfs_insert_orphan_item(trans, root, ino);
1808 if (ret == -EEXIST)
1809 ret = 0;
1810 }
1811
1812 out:
1813 btrfs_free_path(path);
1814 return ret;
1815 }
1816
1817 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1818 struct btrfs_root *root,
1819 struct btrfs_path *path)
1820 {
1821 int ret;
1822 struct btrfs_key key;
1823 struct inode *inode;
1824
1825 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1826 key.type = BTRFS_ORPHAN_ITEM_KEY;
1827 key.offset = (u64)-1;
1828 while (1) {
1829 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1830 if (ret < 0)
1831 break;
1832
1833 if (ret == 1) {
1834 ret = 0;
1835 if (path->slots[0] == 0)
1836 break;
1837 path->slots[0]--;
1838 }
1839
1840 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1841 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1842 key.type != BTRFS_ORPHAN_ITEM_KEY)
1843 break;
1844
1845 ret = btrfs_del_item(trans, root, path);
1846 if (ret)
1847 break;
1848
1849 btrfs_release_path(path);
1850 inode = read_one_inode(root, key.offset);
1851 if (!inode) {
1852 ret = -EIO;
1853 break;
1854 }
1855
1856 ret = fixup_inode_link_count(trans, root, inode);
1857 iput(inode);
1858 if (ret)
1859 break;
1860
1861
1862
1863
1864
1865
1866 key.offset = (u64)-1;
1867 }
1868 btrfs_release_path(path);
1869 return ret;
1870 }
1871
1872
1873
1874
1875
1876
1877
1878 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1879 struct btrfs_root *root,
1880 struct btrfs_path *path,
1881 u64 objectid)
1882 {
1883 struct btrfs_key key;
1884 int ret = 0;
1885 struct inode *inode;
1886
1887 inode = read_one_inode(root, objectid);
1888 if (!inode)
1889 return -EIO;
1890
1891 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1892 key.type = BTRFS_ORPHAN_ITEM_KEY;
1893 key.offset = objectid;
1894
1895 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1896
1897 btrfs_release_path(path);
1898 if (ret == 0) {
1899 if (!inode->i_nlink)
1900 set_nlink(inode, 1);
1901 else
1902 inc_nlink(inode);
1903 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
1904 } else if (ret == -EEXIST) {
1905 ret = 0;
1906 }
1907 iput(inode);
1908
1909 return ret;
1910 }
1911
1912
1913
1914
1915
1916
1917 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1918 struct btrfs_root *root,
1919 u64 dirid, u64 index,
1920 char *name, int name_len,
1921 struct btrfs_key *location)
1922 {
1923 struct inode *inode;
1924 struct inode *dir;
1925 int ret;
1926
1927 inode = read_one_inode(root, location->objectid);
1928 if (!inode)
1929 return -ENOENT;
1930
1931 dir = read_one_inode(root, dirid);
1932 if (!dir) {
1933 iput(inode);
1934 return -EIO;
1935 }
1936
1937 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1938 name_len, 1, index);
1939
1940
1941
1942 iput(inode);
1943 iput(dir);
1944 return ret;
1945 }
1946
1947 static int delete_conflicting_dir_entry(struct btrfs_trans_handle *trans,
1948 struct btrfs_inode *dir,
1949 struct btrfs_path *path,
1950 struct btrfs_dir_item *dst_di,
1951 const struct btrfs_key *log_key,
1952 u8 log_type,
1953 bool exists)
1954 {
1955 struct btrfs_key found_key;
1956
1957 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1958
1959 if (found_key.objectid == log_key->objectid &&
1960 found_key.type == log_key->type &&
1961 found_key.offset == log_key->offset &&
1962 btrfs_dir_type(path->nodes[0], dst_di) == log_type)
1963 return 1;
1964
1965
1966
1967
1968
1969 if (!exists)
1970 return 0;
1971
1972 return drop_one_dir_item(trans, path, dir, dst_di);
1973 }
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1992 struct btrfs_root *root,
1993 struct btrfs_path *path,
1994 struct extent_buffer *eb,
1995 struct btrfs_dir_item *di,
1996 struct btrfs_key *key)
1997 {
1998 char *name;
1999 int name_len;
2000 struct btrfs_dir_item *dir_dst_di;
2001 struct btrfs_dir_item *index_dst_di;
2002 bool dir_dst_matches = false;
2003 bool index_dst_matches = false;
2004 struct btrfs_key log_key;
2005 struct btrfs_key search_key;
2006 struct inode *dir;
2007 u8 log_type;
2008 bool exists;
2009 int ret;
2010 bool update_size = true;
2011 bool name_added = false;
2012
2013 dir = read_one_inode(root, key->objectid);
2014 if (!dir)
2015 return -EIO;
2016
2017 name_len = btrfs_dir_name_len(eb, di);
2018 name = kmalloc(name_len, GFP_NOFS);
2019 if (!name) {
2020 ret = -ENOMEM;
2021 goto out;
2022 }
2023
2024 log_type = btrfs_dir_type(eb, di);
2025 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2026 name_len);
2027
2028 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
2029 ret = btrfs_lookup_inode(trans, root, path, &log_key, 0);
2030 btrfs_release_path(path);
2031 if (ret < 0)
2032 goto out;
2033 exists = (ret == 0);
2034 ret = 0;
2035
2036 dir_dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
2037 name, name_len, 1);
2038 if (IS_ERR(dir_dst_di)) {
2039 ret = PTR_ERR(dir_dst_di);
2040 goto out;
2041 } else if (dir_dst_di) {
2042 ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
2043 dir_dst_di, &log_key, log_type,
2044 exists);
2045 if (ret < 0)
2046 goto out;
2047 dir_dst_matches = (ret == 1);
2048 }
2049
2050 btrfs_release_path(path);
2051
2052 index_dst_di = btrfs_lookup_dir_index_item(trans, root, path,
2053 key->objectid, key->offset,
2054 name, name_len, 1);
2055 if (IS_ERR(index_dst_di)) {
2056 ret = PTR_ERR(index_dst_di);
2057 goto out;
2058 } else if (index_dst_di) {
2059 ret = delete_conflicting_dir_entry(trans, BTRFS_I(dir), path,
2060 index_dst_di, &log_key,
2061 log_type, exists);
2062 if (ret < 0)
2063 goto out;
2064 index_dst_matches = (ret == 1);
2065 }
2066
2067 btrfs_release_path(path);
2068
2069 if (dir_dst_matches && index_dst_matches) {
2070 ret = 0;
2071 update_size = false;
2072 goto out;
2073 }
2074
2075
2076
2077
2078
2079 search_key.objectid = log_key.objectid;
2080 search_key.type = BTRFS_INODE_REF_KEY;
2081 search_key.offset = key->objectid;
2082 ret = backref_in_log(root->log_root, &search_key, 0, name, name_len);
2083 if (ret < 0) {
2084 goto out;
2085 } else if (ret) {
2086
2087 ret = 0;
2088 update_size = false;
2089 goto out;
2090 }
2091
2092 search_key.objectid = log_key.objectid;
2093 search_key.type = BTRFS_INODE_EXTREF_KEY;
2094 search_key.offset = key->objectid;
2095 ret = backref_in_log(root->log_root, &search_key, key->objectid, name,
2096 name_len);
2097 if (ret < 0) {
2098 goto out;
2099 } else if (ret) {
2100
2101 ret = 0;
2102 update_size = false;
2103 goto out;
2104 }
2105 btrfs_release_path(path);
2106 ret = insert_one_name(trans, root, key->objectid, key->offset,
2107 name, name_len, &log_key);
2108 if (ret && ret != -ENOENT && ret != -EEXIST)
2109 goto out;
2110 if (!ret)
2111 name_added = true;
2112 update_size = false;
2113 ret = 0;
2114
2115 out:
2116 if (!ret && update_size) {
2117 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2118 ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
2119 }
2120 kfree(name);
2121 iput(dir);
2122 if (!ret && name_added)
2123 ret = 1;
2124 return ret;
2125 }
2126
2127
2128 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2129 struct btrfs_root *root,
2130 struct btrfs_path *path,
2131 struct extent_buffer *eb, int slot,
2132 struct btrfs_key *key)
2133 {
2134 int ret;
2135 struct btrfs_dir_item *di;
2136
2137
2138 ASSERT(key->type == BTRFS_DIR_INDEX_KEY);
2139
2140 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2141 ret = replay_one_name(trans, root, path, eb, di, key);
2142 if (ret < 0)
2143 return ret;
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2172 struct btrfs_path *fixup_path;
2173 struct btrfs_key di_key;
2174
2175 fixup_path = btrfs_alloc_path();
2176 if (!fixup_path)
2177 return -ENOMEM;
2178
2179 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2180 ret = link_to_fixup_dir(trans, root, fixup_path, di_key.objectid);
2181 btrfs_free_path(fixup_path);
2182 }
2183
2184 return ret;
2185 }
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198 static noinline int find_dir_range(struct btrfs_root *root,
2199 struct btrfs_path *path,
2200 u64 dirid,
2201 u64 *start_ret, u64 *end_ret)
2202 {
2203 struct btrfs_key key;
2204 u64 found_end;
2205 struct btrfs_dir_log_item *item;
2206 int ret;
2207 int nritems;
2208
2209 if (*start_ret == (u64)-1)
2210 return 1;
2211
2212 key.objectid = dirid;
2213 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2214 key.offset = *start_ret;
2215
2216 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2217 if (ret < 0)
2218 goto out;
2219 if (ret > 0) {
2220 if (path->slots[0] == 0)
2221 goto out;
2222 path->slots[0]--;
2223 }
2224 if (ret != 0)
2225 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2226
2227 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2228 ret = 1;
2229 goto next;
2230 }
2231 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2232 struct btrfs_dir_log_item);
2233 found_end = btrfs_dir_log_end(path->nodes[0], item);
2234
2235 if (*start_ret >= key.offset && *start_ret <= found_end) {
2236 ret = 0;
2237 *start_ret = key.offset;
2238 *end_ret = found_end;
2239 goto out;
2240 }
2241 ret = 1;
2242 next:
2243
2244 nritems = btrfs_header_nritems(path->nodes[0]);
2245 path->slots[0]++;
2246 if (path->slots[0] >= nritems) {
2247 ret = btrfs_next_leaf(root, path);
2248 if (ret)
2249 goto out;
2250 }
2251
2252 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2253
2254 if (key.type != BTRFS_DIR_LOG_INDEX_KEY || key.objectid != dirid) {
2255 ret = 1;
2256 goto out;
2257 }
2258 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2259 struct btrfs_dir_log_item);
2260 found_end = btrfs_dir_log_end(path->nodes[0], item);
2261 *start_ret = key.offset;
2262 *end_ret = found_end;
2263 ret = 0;
2264 out:
2265 btrfs_release_path(path);
2266 return ret;
2267 }
2268
2269
2270
2271
2272
2273
2274 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *log,
2276 struct btrfs_path *path,
2277 struct btrfs_path *log_path,
2278 struct inode *dir,
2279 struct btrfs_key *dir_key)
2280 {
2281 struct btrfs_root *root = BTRFS_I(dir)->root;
2282 int ret;
2283 struct extent_buffer *eb;
2284 int slot;
2285 struct btrfs_dir_item *di;
2286 int name_len;
2287 char *name;
2288 struct inode *inode = NULL;
2289 struct btrfs_key location;
2290
2291
2292
2293
2294
2295
2296
2297 ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY);
2298
2299 eb = path->nodes[0];
2300 slot = path->slots[0];
2301 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2302 name_len = btrfs_dir_name_len(eb, di);
2303 name = kmalloc(name_len, GFP_NOFS);
2304 if (!name) {
2305 ret = -ENOMEM;
2306 goto out;
2307 }
2308
2309 read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len);
2310
2311 if (log) {
2312 struct btrfs_dir_item *log_di;
2313
2314 log_di = btrfs_lookup_dir_index_item(trans, log, log_path,
2315 dir_key->objectid,
2316 dir_key->offset,
2317 name, name_len, 0);
2318 if (IS_ERR(log_di)) {
2319 ret = PTR_ERR(log_di);
2320 goto out;
2321 } else if (log_di) {
2322
2323 ret = 0;
2324 goto out;
2325 }
2326 }
2327
2328 btrfs_dir_item_key_to_cpu(eb, di, &location);
2329 btrfs_release_path(path);
2330 btrfs_release_path(log_path);
2331 inode = read_one_inode(root, location.objectid);
2332 if (!inode) {
2333 ret = -EIO;
2334 goto out;
2335 }
2336
2337 ret = link_to_fixup_dir(trans, root, path, location.objectid);
2338 if (ret)
2339 goto out;
2340
2341 inc_nlink(inode);
2342 ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir), BTRFS_I(inode),
2343 name, name_len);
2344
2345
2346
2347
2348
2349 out:
2350 btrfs_release_path(path);
2351 btrfs_release_path(log_path);
2352 kfree(name);
2353 iput(inode);
2354 return ret;
2355 }
2356
2357 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2358 struct btrfs_root *root,
2359 struct btrfs_root *log,
2360 struct btrfs_path *path,
2361 const u64 ino)
2362 {
2363 struct btrfs_key search_key;
2364 struct btrfs_path *log_path;
2365 int i;
2366 int nritems;
2367 int ret;
2368
2369 log_path = btrfs_alloc_path();
2370 if (!log_path)
2371 return -ENOMEM;
2372
2373 search_key.objectid = ino;
2374 search_key.type = BTRFS_XATTR_ITEM_KEY;
2375 search_key.offset = 0;
2376 again:
2377 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2378 if (ret < 0)
2379 goto out;
2380 process_leaf:
2381 nritems = btrfs_header_nritems(path->nodes[0]);
2382 for (i = path->slots[0]; i < nritems; i++) {
2383 struct btrfs_key key;
2384 struct btrfs_dir_item *di;
2385 struct btrfs_dir_item *log_di;
2386 u32 total_size;
2387 u32 cur;
2388
2389 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2390 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2391 ret = 0;
2392 goto out;
2393 }
2394
2395 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2396 total_size = btrfs_item_size(path->nodes[0], i);
2397 cur = 0;
2398 while (cur < total_size) {
2399 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2400 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2401 u32 this_len = sizeof(*di) + name_len + data_len;
2402 char *name;
2403
2404 name = kmalloc(name_len, GFP_NOFS);
2405 if (!name) {
2406 ret = -ENOMEM;
2407 goto out;
2408 }
2409 read_extent_buffer(path->nodes[0], name,
2410 (unsigned long)(di + 1), name_len);
2411
2412 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2413 name, name_len, 0);
2414 btrfs_release_path(log_path);
2415 if (!log_di) {
2416
2417 btrfs_release_path(path);
2418 di = btrfs_lookup_xattr(trans, root, path, ino,
2419 name, name_len, -1);
2420 kfree(name);
2421 if (IS_ERR(di)) {
2422 ret = PTR_ERR(di);
2423 goto out;
2424 }
2425 ASSERT(di);
2426 ret = btrfs_delete_one_dir_name(trans, root,
2427 path, di);
2428 if (ret)
2429 goto out;
2430 btrfs_release_path(path);
2431 search_key = key;
2432 goto again;
2433 }
2434 kfree(name);
2435 if (IS_ERR(log_di)) {
2436 ret = PTR_ERR(log_di);
2437 goto out;
2438 }
2439 cur += this_len;
2440 di = (struct btrfs_dir_item *)((char *)di + this_len);
2441 }
2442 }
2443 ret = btrfs_next_leaf(root, path);
2444 if (ret > 0)
2445 ret = 0;
2446 else if (ret == 0)
2447 goto process_leaf;
2448 out:
2449 btrfs_free_path(log_path);
2450 btrfs_release_path(path);
2451 return ret;
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2466 struct btrfs_root *root,
2467 struct btrfs_root *log,
2468 struct btrfs_path *path,
2469 u64 dirid, int del_all)
2470 {
2471 u64 range_start;
2472 u64 range_end;
2473 int ret = 0;
2474 struct btrfs_key dir_key;
2475 struct btrfs_key found_key;
2476 struct btrfs_path *log_path;
2477 struct inode *dir;
2478
2479 dir_key.objectid = dirid;
2480 dir_key.type = BTRFS_DIR_INDEX_KEY;
2481 log_path = btrfs_alloc_path();
2482 if (!log_path)
2483 return -ENOMEM;
2484
2485 dir = read_one_inode(root, dirid);
2486
2487
2488
2489
2490 if (!dir) {
2491 btrfs_free_path(log_path);
2492 return 0;
2493 }
2494
2495 range_start = 0;
2496 range_end = 0;
2497 while (1) {
2498 if (del_all)
2499 range_end = (u64)-1;
2500 else {
2501 ret = find_dir_range(log, path, dirid,
2502 &range_start, &range_end);
2503 if (ret < 0)
2504 goto out;
2505 else if (ret > 0)
2506 break;
2507 }
2508
2509 dir_key.offset = range_start;
2510 while (1) {
2511 int nritems;
2512 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2513 0, 0);
2514 if (ret < 0)
2515 goto out;
2516
2517 nritems = btrfs_header_nritems(path->nodes[0]);
2518 if (path->slots[0] >= nritems) {
2519 ret = btrfs_next_leaf(root, path);
2520 if (ret == 1)
2521 break;
2522 else if (ret < 0)
2523 goto out;
2524 }
2525 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2526 path->slots[0]);
2527 if (found_key.objectid != dirid ||
2528 found_key.type != dir_key.type) {
2529 ret = 0;
2530 goto out;
2531 }
2532
2533 if (found_key.offset > range_end)
2534 break;
2535
2536 ret = check_item_in_log(trans, log, path,
2537 log_path, dir,
2538 &found_key);
2539 if (ret)
2540 goto out;
2541 if (found_key.offset == (u64)-1)
2542 break;
2543 dir_key.offset = found_key.offset + 1;
2544 }
2545 btrfs_release_path(path);
2546 if (range_end == (u64)-1)
2547 break;
2548 range_start = range_end + 1;
2549 }
2550 ret = 0;
2551 out:
2552 btrfs_release_path(path);
2553 btrfs_free_path(log_path);
2554 iput(dir);
2555 return ret;
2556 }
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2570 struct walk_control *wc, u64 gen, int level)
2571 {
2572 int nritems;
2573 struct btrfs_path *path;
2574 struct btrfs_root *root = wc->replay_dest;
2575 struct btrfs_key key;
2576 int i;
2577 int ret;
2578
2579 ret = btrfs_read_extent_buffer(eb, gen, level, NULL);
2580 if (ret)
2581 return ret;
2582
2583 level = btrfs_header_level(eb);
2584
2585 if (level != 0)
2586 return 0;
2587
2588 path = btrfs_alloc_path();
2589 if (!path)
2590 return -ENOMEM;
2591
2592 nritems = btrfs_header_nritems(eb);
2593 for (i = 0; i < nritems; i++) {
2594 btrfs_item_key_to_cpu(eb, &key, i);
2595
2596
2597 if (key.type == BTRFS_INODE_ITEM_KEY &&
2598 wc->stage == LOG_WALK_REPLAY_INODES) {
2599 struct btrfs_inode_item *inode_item;
2600 u32 mode;
2601
2602 inode_item = btrfs_item_ptr(eb, i,
2603 struct btrfs_inode_item);
2604
2605
2606
2607
2608
2609
2610
2611
2612 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2613 wc->ignore_cur_inode = true;
2614 continue;
2615 } else {
2616 wc->ignore_cur_inode = false;
2617 }
2618 ret = replay_xattr_deletes(wc->trans, root, log,
2619 path, key.objectid);
2620 if (ret)
2621 break;
2622 mode = btrfs_inode_mode(eb, inode_item);
2623 if (S_ISDIR(mode)) {
2624 ret = replay_dir_deletes(wc->trans,
2625 root, log, path, key.objectid, 0);
2626 if (ret)
2627 break;
2628 }
2629 ret = overwrite_item(wc->trans, root, path,
2630 eb, i, &key);
2631 if (ret)
2632 break;
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642 if (S_ISREG(mode)) {
2643 struct btrfs_drop_extents_args drop_args = { 0 };
2644 struct inode *inode;
2645 u64 from;
2646
2647 inode = read_one_inode(root, key.objectid);
2648 if (!inode) {
2649 ret = -EIO;
2650 break;
2651 }
2652 from = ALIGN(i_size_read(inode),
2653 root->fs_info->sectorsize);
2654 drop_args.start = from;
2655 drop_args.end = (u64)-1;
2656 drop_args.drop_cache = true;
2657 ret = btrfs_drop_extents(wc->trans, root,
2658 BTRFS_I(inode),
2659 &drop_args);
2660 if (!ret) {
2661 inode_sub_bytes(inode,
2662 drop_args.bytes_found);
2663
2664 ret = btrfs_update_inode(wc->trans,
2665 root, BTRFS_I(inode));
2666 }
2667 iput(inode);
2668 if (ret)
2669 break;
2670 }
2671
2672 ret = link_to_fixup_dir(wc->trans, root,
2673 path, key.objectid);
2674 if (ret)
2675 break;
2676 }
2677
2678 if (wc->ignore_cur_inode)
2679 continue;
2680
2681 if (key.type == BTRFS_DIR_INDEX_KEY &&
2682 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2683 ret = replay_one_dir_item(wc->trans, root, path,
2684 eb, i, &key);
2685 if (ret)
2686 break;
2687 }
2688
2689 if (wc->stage < LOG_WALK_REPLAY_ALL)
2690 continue;
2691
2692
2693 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2694 ret = overwrite_item(wc->trans, root, path,
2695 eb, i, &key);
2696 if (ret)
2697 break;
2698 } else if (key.type == BTRFS_INODE_REF_KEY ||
2699 key.type == BTRFS_INODE_EXTREF_KEY) {
2700 ret = add_inode_ref(wc->trans, root, log, path,
2701 eb, i, &key);
2702 if (ret && ret != -ENOENT)
2703 break;
2704 ret = 0;
2705 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2706 ret = replay_one_extent(wc->trans, root, path,
2707 eb, i, &key);
2708 if (ret)
2709 break;
2710 }
2711
2712
2713
2714
2715
2716
2717 }
2718 btrfs_free_path(path);
2719 return ret;
2720 }
2721
2722
2723
2724
2725 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start)
2726 {
2727 struct btrfs_block_group *cache;
2728
2729 cache = btrfs_lookup_block_group(fs_info, start);
2730 if (!cache) {
2731 btrfs_err(fs_info, "unable to find block group for %llu", start);
2732 return;
2733 }
2734
2735 spin_lock(&cache->space_info->lock);
2736 spin_lock(&cache->lock);
2737 cache->reserved -= fs_info->nodesize;
2738 cache->space_info->bytes_reserved -= fs_info->nodesize;
2739 spin_unlock(&cache->lock);
2740 spin_unlock(&cache->space_info->lock);
2741
2742 btrfs_put_block_group(cache);
2743 }
2744
2745 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2746 struct btrfs_root *root,
2747 struct btrfs_path *path, int *level,
2748 struct walk_control *wc)
2749 {
2750 struct btrfs_fs_info *fs_info = root->fs_info;
2751 u64 bytenr;
2752 u64 ptr_gen;
2753 struct extent_buffer *next;
2754 struct extent_buffer *cur;
2755 u32 blocksize;
2756 int ret = 0;
2757
2758 while (*level > 0) {
2759 struct btrfs_key first_key;
2760
2761 cur = path->nodes[*level];
2762
2763 WARN_ON(btrfs_header_level(cur) != *level);
2764
2765 if (path->slots[*level] >=
2766 btrfs_header_nritems(cur))
2767 break;
2768
2769 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2770 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2771 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2772 blocksize = fs_info->nodesize;
2773
2774 next = btrfs_find_create_tree_block(fs_info, bytenr,
2775 btrfs_header_owner(cur),
2776 *level - 1);
2777 if (IS_ERR(next))
2778 return PTR_ERR(next);
2779
2780 if (*level == 1) {
2781 ret = wc->process_func(root, next, wc, ptr_gen,
2782 *level - 1);
2783 if (ret) {
2784 free_extent_buffer(next);
2785 return ret;
2786 }
2787
2788 path->slots[*level]++;
2789 if (wc->free) {
2790 ret = btrfs_read_extent_buffer(next, ptr_gen,
2791 *level - 1, &first_key);
2792 if (ret) {
2793 free_extent_buffer(next);
2794 return ret;
2795 }
2796
2797 if (trans) {
2798 btrfs_tree_lock(next);
2799 btrfs_clean_tree_block(next);
2800 btrfs_wait_tree_block_writeback(next);
2801 btrfs_tree_unlock(next);
2802 ret = btrfs_pin_reserved_extent(trans,
2803 bytenr, blocksize);
2804 if (ret) {
2805 free_extent_buffer(next);
2806 return ret;
2807 }
2808 btrfs_redirty_list_add(
2809 trans->transaction, next);
2810 } else {
2811 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2812 clear_extent_buffer_dirty(next);
2813 unaccount_log_buffer(fs_info, bytenr);
2814 }
2815 }
2816 free_extent_buffer(next);
2817 continue;
2818 }
2819 ret = btrfs_read_extent_buffer(next, ptr_gen, *level - 1, &first_key);
2820 if (ret) {
2821 free_extent_buffer(next);
2822 return ret;
2823 }
2824
2825 if (path->nodes[*level-1])
2826 free_extent_buffer(path->nodes[*level-1]);
2827 path->nodes[*level-1] = next;
2828 *level = btrfs_header_level(next);
2829 path->slots[*level] = 0;
2830 cond_resched();
2831 }
2832 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2833
2834 cond_resched();
2835 return 0;
2836 }
2837
2838 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2839 struct btrfs_root *root,
2840 struct btrfs_path *path, int *level,
2841 struct walk_control *wc)
2842 {
2843 struct btrfs_fs_info *fs_info = root->fs_info;
2844 int i;
2845 int slot;
2846 int ret;
2847
2848 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2849 slot = path->slots[i];
2850 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2851 path->slots[i]++;
2852 *level = i;
2853 WARN_ON(*level == 0);
2854 return 0;
2855 } else {
2856 ret = wc->process_func(root, path->nodes[*level], wc,
2857 btrfs_header_generation(path->nodes[*level]),
2858 *level);
2859 if (ret)
2860 return ret;
2861
2862 if (wc->free) {
2863 struct extent_buffer *next;
2864
2865 next = path->nodes[*level];
2866
2867 if (trans) {
2868 btrfs_tree_lock(next);
2869 btrfs_clean_tree_block(next);
2870 btrfs_wait_tree_block_writeback(next);
2871 btrfs_tree_unlock(next);
2872 ret = btrfs_pin_reserved_extent(trans,
2873 path->nodes[*level]->start,
2874 path->nodes[*level]->len);
2875 if (ret)
2876 return ret;
2877 btrfs_redirty_list_add(trans->transaction,
2878 next);
2879 } else {
2880 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2881 clear_extent_buffer_dirty(next);
2882
2883 unaccount_log_buffer(fs_info,
2884 path->nodes[*level]->start);
2885 }
2886 }
2887 free_extent_buffer(path->nodes[*level]);
2888 path->nodes[*level] = NULL;
2889 *level = i + 1;
2890 }
2891 }
2892 return 1;
2893 }
2894
2895
2896
2897
2898
2899
2900 static int walk_log_tree(struct btrfs_trans_handle *trans,
2901 struct btrfs_root *log, struct walk_control *wc)
2902 {
2903 struct btrfs_fs_info *fs_info = log->fs_info;
2904 int ret = 0;
2905 int wret;
2906 int level;
2907 struct btrfs_path *path;
2908 int orig_level;
2909
2910 path = btrfs_alloc_path();
2911 if (!path)
2912 return -ENOMEM;
2913
2914 level = btrfs_header_level(log->node);
2915 orig_level = level;
2916 path->nodes[level] = log->node;
2917 atomic_inc(&log->node->refs);
2918 path->slots[level] = 0;
2919
2920 while (1) {
2921 wret = walk_down_log_tree(trans, log, path, &level, wc);
2922 if (wret > 0)
2923 break;
2924 if (wret < 0) {
2925 ret = wret;
2926 goto out;
2927 }
2928
2929 wret = walk_up_log_tree(trans, log, path, &level, wc);
2930 if (wret > 0)
2931 break;
2932 if (wret < 0) {
2933 ret = wret;
2934 goto out;
2935 }
2936 }
2937
2938
2939 if (path->nodes[orig_level]) {
2940 ret = wc->process_func(log, path->nodes[orig_level], wc,
2941 btrfs_header_generation(path->nodes[orig_level]),
2942 orig_level);
2943 if (ret)
2944 goto out;
2945 if (wc->free) {
2946 struct extent_buffer *next;
2947
2948 next = path->nodes[orig_level];
2949
2950 if (trans) {
2951 btrfs_tree_lock(next);
2952 btrfs_clean_tree_block(next);
2953 btrfs_wait_tree_block_writeback(next);
2954 btrfs_tree_unlock(next);
2955 ret = btrfs_pin_reserved_extent(trans,
2956 next->start, next->len);
2957 if (ret)
2958 goto out;
2959 btrfs_redirty_list_add(trans->transaction, next);
2960 } else {
2961 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2962 clear_extent_buffer_dirty(next);
2963 unaccount_log_buffer(fs_info, next->start);
2964 }
2965 }
2966 }
2967
2968 out:
2969 btrfs_free_path(path);
2970 return ret;
2971 }
2972
2973
2974
2975
2976
2977 static int update_log_root(struct btrfs_trans_handle *trans,
2978 struct btrfs_root *log,
2979 struct btrfs_root_item *root_item)
2980 {
2981 struct btrfs_fs_info *fs_info = log->fs_info;
2982 int ret;
2983
2984 if (log->log_transid == 1) {
2985
2986 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2987 &log->root_key, root_item);
2988 } else {
2989 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2990 &log->root_key, root_item);
2991 }
2992 return ret;
2993 }
2994
2995 static void wait_log_commit(struct btrfs_root *root, int transid)
2996 {
2997 DEFINE_WAIT(wait);
2998 int index = transid % 2;
2999
3000
3001
3002
3003
3004
3005 for (;;) {
3006 prepare_to_wait(&root->log_commit_wait[index],
3007 &wait, TASK_UNINTERRUPTIBLE);
3008
3009 if (!(root->log_transid_committed < transid &&
3010 atomic_read(&root->log_commit[index])))
3011 break;
3012
3013 mutex_unlock(&root->log_mutex);
3014 schedule();
3015 mutex_lock(&root->log_mutex);
3016 }
3017 finish_wait(&root->log_commit_wait[index], &wait);
3018 }
3019
3020 static void wait_for_writer(struct btrfs_root *root)
3021 {
3022 DEFINE_WAIT(wait);
3023
3024 for (;;) {
3025 prepare_to_wait(&root->log_writer_wait, &wait,
3026 TASK_UNINTERRUPTIBLE);
3027 if (!atomic_read(&root->log_writers))
3028 break;
3029
3030 mutex_unlock(&root->log_mutex);
3031 schedule();
3032 mutex_lock(&root->log_mutex);
3033 }
3034 finish_wait(&root->log_writer_wait, &wait);
3035 }
3036
3037 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
3038 struct btrfs_log_ctx *ctx)
3039 {
3040 mutex_lock(&root->log_mutex);
3041 list_del_init(&ctx->list);
3042 mutex_unlock(&root->log_mutex);
3043 }
3044
3045
3046
3047
3048
3049 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3050 int index, int error)
3051 {
3052 struct btrfs_log_ctx *ctx;
3053 struct btrfs_log_ctx *safe;
3054
3055 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3056 list_del_init(&ctx->list);
3057 ctx->log_ret = error;
3058 }
3059 }
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3074 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3075 {
3076 int index1;
3077 int index2;
3078 int mark;
3079 int ret;
3080 struct btrfs_fs_info *fs_info = root->fs_info;
3081 struct btrfs_root *log = root->log_root;
3082 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3083 struct btrfs_root_item new_root_item;
3084 int log_transid = 0;
3085 struct btrfs_log_ctx root_log_ctx;
3086 struct blk_plug plug;
3087 u64 log_root_start;
3088 u64 log_root_level;
3089
3090 mutex_lock(&root->log_mutex);
3091 log_transid = ctx->log_transid;
3092 if (root->log_transid_committed >= log_transid) {
3093 mutex_unlock(&root->log_mutex);
3094 return ctx->log_ret;
3095 }
3096
3097 index1 = log_transid % 2;
3098 if (atomic_read(&root->log_commit[index1])) {
3099 wait_log_commit(root, log_transid);
3100 mutex_unlock(&root->log_mutex);
3101 return ctx->log_ret;
3102 }
3103 ASSERT(log_transid == root->log_transid);
3104 atomic_set(&root->log_commit[index1], 1);
3105
3106
3107 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3108 wait_log_commit(root, log_transid - 1);
3109
3110 while (1) {
3111 int batch = atomic_read(&root->log_batch);
3112
3113 if (!btrfs_test_opt(fs_info, SSD) &&
3114 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3115 mutex_unlock(&root->log_mutex);
3116 schedule_timeout_uninterruptible(1);
3117 mutex_lock(&root->log_mutex);
3118 }
3119 wait_for_writer(root);
3120 if (batch == atomic_read(&root->log_batch))
3121 break;
3122 }
3123
3124
3125 if (btrfs_need_log_full_commit(trans)) {
3126 ret = BTRFS_LOG_FORCE_COMMIT;
3127 mutex_unlock(&root->log_mutex);
3128 goto out;
3129 }
3130
3131 if (log_transid % 2 == 0)
3132 mark = EXTENT_DIRTY;
3133 else
3134 mark = EXTENT_NEW;
3135
3136
3137
3138
3139 blk_start_plug(&plug);
3140 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150 if (ret == -EAGAIN && btrfs_is_zoned(fs_info))
3151 ret = 0;
3152 if (ret) {
3153 blk_finish_plug(&plug);
3154 btrfs_abort_transaction(trans, ret);
3155 btrfs_set_log_full_commit(trans);
3156 mutex_unlock(&root->log_mutex);
3157 goto out;
3158 }
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173 btrfs_set_root_node(&log->root_item, log->node);
3174 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
3175
3176 root->log_transid++;
3177 log->log_transid = root->log_transid;
3178 root->log_start_pid = 0;
3179
3180
3181
3182
3183
3184 mutex_unlock(&root->log_mutex);
3185
3186 if (btrfs_is_zoned(fs_info)) {
3187 mutex_lock(&fs_info->tree_root->log_mutex);
3188 if (!log_root_tree->node) {
3189 ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
3190 if (ret) {
3191 mutex_unlock(&fs_info->tree_root->log_mutex);
3192 blk_finish_plug(&plug);
3193 goto out;
3194 }
3195 }
3196 mutex_unlock(&fs_info->tree_root->log_mutex);
3197 }
3198
3199 btrfs_init_log_ctx(&root_log_ctx, NULL);
3200
3201 mutex_lock(&log_root_tree->log_mutex);
3202
3203 index2 = log_root_tree->log_transid % 2;
3204 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3205 root_log_ctx.log_transid = log_root_tree->log_transid;
3206
3207
3208
3209
3210
3211
3212 ret = update_log_root(trans, log, &new_root_item);
3213 if (ret) {
3214 if (!list_empty(&root_log_ctx.list))
3215 list_del_init(&root_log_ctx.list);
3216
3217 blk_finish_plug(&plug);
3218 btrfs_set_log_full_commit(trans);
3219
3220 if (ret != -ENOSPC) {
3221 btrfs_abort_transaction(trans, ret);
3222 mutex_unlock(&log_root_tree->log_mutex);
3223 goto out;
3224 }
3225 btrfs_wait_tree_log_extents(log, mark);
3226 mutex_unlock(&log_root_tree->log_mutex);
3227 ret = BTRFS_LOG_FORCE_COMMIT;
3228 goto out;
3229 }
3230
3231 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3232 blk_finish_plug(&plug);
3233 list_del_init(&root_log_ctx.list);
3234 mutex_unlock(&log_root_tree->log_mutex);
3235 ret = root_log_ctx.log_ret;
3236 goto out;
3237 }
3238
3239 index2 = root_log_ctx.log_transid % 2;
3240 if (atomic_read(&log_root_tree->log_commit[index2])) {
3241 blk_finish_plug(&plug);
3242 ret = btrfs_wait_tree_log_extents(log, mark);
3243 wait_log_commit(log_root_tree,
3244 root_log_ctx.log_transid);
3245 mutex_unlock(&log_root_tree->log_mutex);
3246 if (!ret)
3247 ret = root_log_ctx.log_ret;
3248 goto out;
3249 }
3250 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3251 atomic_set(&log_root_tree->log_commit[index2], 1);
3252
3253 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3254 wait_log_commit(log_root_tree,
3255 root_log_ctx.log_transid - 1);
3256 }
3257
3258
3259
3260
3261
3262 if (btrfs_need_log_full_commit(trans)) {
3263 blk_finish_plug(&plug);
3264 btrfs_wait_tree_log_extents(log, mark);
3265 mutex_unlock(&log_root_tree->log_mutex);
3266 ret = BTRFS_LOG_FORCE_COMMIT;
3267 goto out_wake_log_root;
3268 }
3269
3270 ret = btrfs_write_marked_extents(fs_info,
3271 &log_root_tree->dirty_log_pages,
3272 EXTENT_DIRTY | EXTENT_NEW);
3273 blk_finish_plug(&plug);
3274
3275
3276
3277
3278
3279 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) {
3280 btrfs_set_log_full_commit(trans);
3281 btrfs_wait_tree_log_extents(log, mark);
3282 mutex_unlock(&log_root_tree->log_mutex);
3283 goto out_wake_log_root;
3284 } else if (ret) {
3285 btrfs_set_log_full_commit(trans);
3286 btrfs_abort_transaction(trans, ret);
3287 mutex_unlock(&log_root_tree->log_mutex);
3288 goto out_wake_log_root;
3289 }
3290 ret = btrfs_wait_tree_log_extents(log, mark);
3291 if (!ret)
3292 ret = btrfs_wait_tree_log_extents(log_root_tree,
3293 EXTENT_NEW | EXTENT_DIRTY);
3294 if (ret) {
3295 btrfs_set_log_full_commit(trans);
3296 mutex_unlock(&log_root_tree->log_mutex);
3297 goto out_wake_log_root;
3298 }
3299
3300 log_root_start = log_root_tree->node->start;
3301 log_root_level = btrfs_header_level(log_root_tree->node);
3302 log_root_tree->log_transid++;
3303 mutex_unlock(&log_root_tree->log_mutex);
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320 mutex_lock(&fs_info->tree_log_mutex);
3321
3322
3323
3324
3325
3326
3327
3328
3329 if (BTRFS_FS_ERROR(fs_info)) {
3330 ret = -EIO;
3331 btrfs_set_log_full_commit(trans);
3332 btrfs_abort_transaction(trans, ret);
3333 mutex_unlock(&fs_info->tree_log_mutex);
3334 goto out_wake_log_root;
3335 }
3336
3337 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start);
3338 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level);
3339 ret = write_all_supers(fs_info, 1);
3340 mutex_unlock(&fs_info->tree_log_mutex);
3341 if (ret) {
3342 btrfs_set_log_full_commit(trans);
3343 btrfs_abort_transaction(trans, ret);
3344 goto out_wake_log_root;
3345 }
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355 ASSERT(root->last_log_commit <= log_transid);
3356 root->last_log_commit = log_transid;
3357
3358 out_wake_log_root:
3359 mutex_lock(&log_root_tree->log_mutex);
3360 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3361
3362 log_root_tree->log_transid_committed++;
3363 atomic_set(&log_root_tree->log_commit[index2], 0);
3364 mutex_unlock(&log_root_tree->log_mutex);
3365
3366
3367
3368
3369
3370
3371 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3372 out:
3373 mutex_lock(&root->log_mutex);
3374 btrfs_remove_all_log_ctxs(root, index1, ret);
3375 root->log_transid_committed++;
3376 atomic_set(&root->log_commit[index1], 0);
3377 mutex_unlock(&root->log_mutex);
3378
3379
3380
3381
3382
3383
3384 cond_wake_up(&root->log_commit_wait[index1]);
3385 return ret;
3386 }
3387
3388 static void free_log_tree(struct btrfs_trans_handle *trans,
3389 struct btrfs_root *log)
3390 {
3391 int ret;
3392 struct walk_control wc = {
3393 .free = 1,
3394 .process_func = process_one_buffer
3395 };
3396
3397 if (log->node) {
3398 ret = walk_log_tree(trans, log, &wc);
3399 if (ret) {
3400
3401
3402
3403
3404
3405
3406 set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
3407 &log->fs_info->fs_state);
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417 btrfs_write_marked_extents(log->fs_info,
3418 &log->dirty_log_pages,
3419 EXTENT_DIRTY | EXTENT_NEW);
3420 btrfs_wait_tree_log_extents(log,
3421 EXTENT_DIRTY | EXTENT_NEW);
3422
3423 if (trans)
3424 btrfs_abort_transaction(trans, ret);
3425 else
3426 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3427 }
3428 }
3429
3430 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3431 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3432 extent_io_tree_release(&log->log_csum_range);
3433
3434 btrfs_put_root(log);
3435 }
3436
3437
3438
3439
3440
3441 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3442 {
3443 if (root->log_root) {
3444 free_log_tree(trans, root->log_root);
3445 root->log_root = NULL;
3446 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
3447 }
3448 return 0;
3449 }
3450
3451 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3452 struct btrfs_fs_info *fs_info)
3453 {
3454 if (fs_info->log_root_tree) {
3455 free_log_tree(trans, fs_info->log_root_tree);
3456 fs_info->log_root_tree = NULL;
3457 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state);
3458 }
3459 return 0;
3460 }
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471 static int inode_logged(struct btrfs_trans_handle *trans,
3472 struct btrfs_inode *inode,
3473 struct btrfs_path *path_in)
3474 {
3475 struct btrfs_path *path = path_in;
3476 struct btrfs_key key;
3477 int ret;
3478
3479 if (inode->logged_trans == trans->transid)
3480 return 1;
3481
3482
3483
3484
3485
3486 if (inode->logged_trans > 0)
3487 return 0;
3488
3489
3490
3491
3492
3493
3494
3495
3496 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &inode->root->state)) {
3497 inode->logged_trans = trans->transid - 1;
3498 return 0;
3499 }
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525 key.objectid = btrfs_ino(inode);
3526 key.type = BTRFS_INODE_ITEM_KEY;
3527 key.offset = 0;
3528
3529 if (!path) {
3530 path = btrfs_alloc_path();
3531 if (!path)
3532 return -ENOMEM;
3533 }
3534
3535 ret = btrfs_search_slot(NULL, inode->root->log_root, &key, path, 0, 0);
3536
3537 if (path_in)
3538 btrfs_release_path(path);
3539 else
3540 btrfs_free_path(path);
3541
3542
3543
3544
3545
3546 if (ret < 0) {
3547 return ret;
3548 } else if (ret > 0) {
3549
3550
3551
3552
3553 inode->logged_trans = trans->transid - 1;
3554 return 0;
3555 }
3556
3557
3558
3559
3560
3561
3562 inode->logged_trans = trans->transid;
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574 if (S_ISDIR(inode->vfs_inode.i_mode))
3575 inode->last_dir_index_offset = (u64)-1;
3576
3577 return 1;
3578 }
3579
3580
3581
3582
3583
3584
3585
3586
3587 static int del_logged_dentry(struct btrfs_trans_handle *trans,
3588 struct btrfs_root *log,
3589 struct btrfs_path *path,
3590 u64 dir_ino,
3591 const char *name, int name_len,
3592 u64 index)
3593 {
3594 struct btrfs_dir_item *di;
3595
3596
3597
3598
3599
3600 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3601 index, name, name_len, -1);
3602 if (IS_ERR(di))
3603 return PTR_ERR(di);
3604 else if (!di)
3605 return 1;
3606
3607
3608
3609
3610
3611
3612 return btrfs_delete_one_dir_name(trans, log, path, di);
3613 }
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636 void btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3637 struct btrfs_root *root,
3638 const char *name, int name_len,
3639 struct btrfs_inode *dir, u64 index)
3640 {
3641 struct btrfs_path *path;
3642 int ret;
3643
3644 ret = inode_logged(trans, dir, NULL);
3645 if (ret == 0)
3646 return;
3647 else if (ret < 0) {
3648 btrfs_set_log_full_commit(trans);
3649 return;
3650 }
3651
3652 ret = join_running_log_trans(root);
3653 if (ret)
3654 return;
3655
3656 mutex_lock(&dir->log_mutex);
3657
3658 path = btrfs_alloc_path();
3659 if (!path) {
3660 ret = -ENOMEM;
3661 goto out_unlock;
3662 }
3663
3664 ret = del_logged_dentry(trans, root->log_root, path, btrfs_ino(dir),
3665 name, name_len, index);
3666 btrfs_free_path(path);
3667 out_unlock:
3668 mutex_unlock(&dir->log_mutex);
3669 if (ret < 0)
3670 btrfs_set_log_full_commit(trans);
3671 btrfs_end_log_trans(root);
3672 }
3673
3674
3675 void btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3676 struct btrfs_root *root,
3677 const char *name, int name_len,
3678 struct btrfs_inode *inode, u64 dirid)
3679 {
3680 struct btrfs_root *log;
3681 u64 index;
3682 int ret;
3683
3684 ret = inode_logged(trans, inode, NULL);
3685 if (ret == 0)
3686 return;
3687 else if (ret < 0) {
3688 btrfs_set_log_full_commit(trans);
3689 return;
3690 }
3691
3692 ret = join_running_log_trans(root);
3693 if (ret)
3694 return;
3695 log = root->log_root;
3696 mutex_lock(&inode->log_mutex);
3697
3698 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3699 dirid, &index);
3700 mutex_unlock(&inode->log_mutex);
3701 if (ret < 0 && ret != -ENOENT)
3702 btrfs_set_log_full_commit(trans);
3703 btrfs_end_log_trans(root);
3704 }
3705
3706
3707
3708
3709
3710
3711 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3712 struct btrfs_root *log,
3713 struct btrfs_path *path,
3714 u64 dirid,
3715 u64 first_offset, u64 last_offset)
3716 {
3717 int ret;
3718 struct btrfs_key key;
3719 struct btrfs_dir_log_item *item;
3720
3721 key.objectid = dirid;
3722 key.offset = first_offset;
3723 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3724 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3725
3726
3727
3728
3729
3730
3731
3732 if (ret && ret != -EEXIST)
3733 return ret;
3734
3735 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3736 struct btrfs_dir_log_item);
3737 if (ret == -EEXIST) {
3738 const u64 curr_end = btrfs_dir_log_end(path->nodes[0], item);
3739
3740
3741
3742
3743
3744
3745
3746 last_offset = max(last_offset, curr_end);
3747 }
3748 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3749 btrfs_mark_buffer_dirty(path->nodes[0]);
3750 btrfs_release_path(path);
3751 return 0;
3752 }
3753
3754 static int flush_dir_items_batch(struct btrfs_trans_handle *trans,
3755 struct btrfs_root *log,
3756 struct extent_buffer *src,
3757 struct btrfs_path *dst_path,
3758 int start_slot,
3759 int count)
3760 {
3761 char *ins_data = NULL;
3762 struct btrfs_item_batch batch;
3763 struct extent_buffer *dst;
3764 unsigned long src_offset;
3765 unsigned long dst_offset;
3766 struct btrfs_key key;
3767 u32 item_size;
3768 int ret;
3769 int i;
3770
3771 ASSERT(count > 0);
3772 batch.nr = count;
3773
3774 if (count == 1) {
3775 btrfs_item_key_to_cpu(src, &key, start_slot);
3776 item_size = btrfs_item_size(src, start_slot);
3777 batch.keys = &key;
3778 batch.data_sizes = &item_size;
3779 batch.total_data_size = item_size;
3780 } else {
3781 struct btrfs_key *ins_keys;
3782 u32 *ins_sizes;
3783
3784 ins_data = kmalloc(count * sizeof(u32) +
3785 count * sizeof(struct btrfs_key), GFP_NOFS);
3786 if (!ins_data)
3787 return -ENOMEM;
3788
3789 ins_sizes = (u32 *)ins_data;
3790 ins_keys = (struct btrfs_key *)(ins_data + count * sizeof(u32));
3791 batch.keys = ins_keys;
3792 batch.data_sizes = ins_sizes;
3793 batch.total_data_size = 0;
3794
3795 for (i = 0; i < count; i++) {
3796 const int slot = start_slot + i;
3797
3798 btrfs_item_key_to_cpu(src, &ins_keys[i], slot);
3799 ins_sizes[i] = btrfs_item_size(src, slot);
3800 batch.total_data_size += ins_sizes[i];
3801 }
3802 }
3803
3804 ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
3805 if (ret)
3806 goto out;
3807
3808 dst = dst_path->nodes[0];
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819 dst_offset = btrfs_item_ptr_offset(dst, dst_path->slots[0] + count - 1);
3820 src_offset = btrfs_item_ptr_offset(src, start_slot + count - 1);
3821 copy_extent_buffer(dst, src, dst_offset, src_offset, batch.total_data_size);
3822 btrfs_release_path(dst_path);
3823 out:
3824 kfree(ins_data);
3825
3826 return ret;
3827 }
3828
3829 static int process_dir_items_leaf(struct btrfs_trans_handle *trans,
3830 struct btrfs_inode *inode,
3831 struct btrfs_path *path,
3832 struct btrfs_path *dst_path,
3833 struct btrfs_log_ctx *ctx,
3834 u64 *last_old_dentry_offset)
3835 {
3836 struct btrfs_root *log = inode->root->log_root;
3837 struct extent_buffer *src = path->nodes[0];
3838 const int nritems = btrfs_header_nritems(src);
3839 const u64 ino = btrfs_ino(inode);
3840 bool last_found = false;
3841 int batch_start = 0;
3842 int batch_size = 0;
3843 int i;
3844
3845 for (i = path->slots[0]; i < nritems; i++) {
3846 struct btrfs_dir_item *di;
3847 struct btrfs_key key;
3848 int ret;
3849
3850 btrfs_item_key_to_cpu(src, &key, i);
3851
3852 if (key.objectid != ino || key.type != BTRFS_DIR_INDEX_KEY) {
3853 last_found = true;
3854 break;
3855 }
3856
3857 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3858 ctx->last_dir_item_offset = key.offset;
3859
3860
3861
3862
3863
3864
3865
3866 if (btrfs_dir_transid(src, di) < trans->transid) {
3867 if (key.offset > *last_old_dentry_offset + 1) {
3868 ret = insert_dir_log_key(trans, log, dst_path,
3869 ino, *last_old_dentry_offset + 1,
3870 key.offset - 1);
3871 if (ret < 0)
3872 return ret;
3873 }
3874
3875 *last_old_dentry_offset = key.offset;
3876 continue;
3877 }
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900 if (!ctx->log_new_dentries) {
3901 struct btrfs_key di_key;
3902
3903 btrfs_dir_item_key_to_cpu(src, di, &di_key);
3904 if (di_key.type != BTRFS_ROOT_ITEM_KEY)
3905 ctx->log_new_dentries = true;
3906 }
3907
3908 if (!ctx->logged_before)
3909 goto add_to_batch;
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919 if (key.offset > inode->last_dir_index_offset)
3920 goto add_to_batch;
3921
3922
3923
3924
3925 ret = btrfs_search_slot(NULL, log, &key, dst_path, 0, 0);
3926 if (ret < 0) {
3927 return ret;
3928 } else if (ret > 0) {
3929 btrfs_release_path(dst_path);
3930 goto add_to_batch;
3931 }
3932
3933
3934
3935
3936
3937
3938
3939
3940 ret = do_overwrite_item(trans, log, dst_path, src, i, &key);
3941 if (ret < 0)
3942 return ret;
3943
3944 if (batch_size > 0) {
3945 ret = flush_dir_items_batch(trans, log, src, dst_path,
3946 batch_start, batch_size);
3947 if (ret < 0)
3948 return ret;
3949 batch_size = 0;
3950 }
3951 continue;
3952 add_to_batch:
3953 if (batch_size == 0)
3954 batch_start = i;
3955 batch_size++;
3956 }
3957
3958 if (batch_size > 0) {
3959 int ret;
3960
3961 ret = flush_dir_items_batch(trans, log, src, dst_path,
3962 batch_start, batch_size);
3963 if (ret < 0)
3964 return ret;
3965 }
3966
3967 return last_found ? 1 : 0;
3968 }
3969
3970
3971
3972
3973
3974
3975 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3976 struct btrfs_inode *inode,
3977 struct btrfs_path *path,
3978 struct btrfs_path *dst_path,
3979 struct btrfs_log_ctx *ctx,
3980 u64 min_offset, u64 *last_offset_ret)
3981 {
3982 struct btrfs_key min_key;
3983 struct btrfs_root *root = inode->root;
3984 struct btrfs_root *log = root->log_root;
3985 int err = 0;
3986 int ret;
3987 u64 last_old_dentry_offset = min_offset - 1;
3988 u64 last_offset = (u64)-1;
3989 u64 ino = btrfs_ino(inode);
3990
3991 min_key.objectid = ino;
3992 min_key.type = BTRFS_DIR_INDEX_KEY;
3993 min_key.offset = min_offset;
3994
3995 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3996
3997
3998
3999
4000
4001 if (ret != 0 || min_key.objectid != ino ||
4002 min_key.type != BTRFS_DIR_INDEX_KEY) {
4003 min_key.objectid = ino;
4004 min_key.type = BTRFS_DIR_INDEX_KEY;
4005 min_key.offset = (u64)-1;
4006 btrfs_release_path(path);
4007 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
4008 if (ret < 0) {
4009 btrfs_release_path(path);
4010 return ret;
4011 }
4012 ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
4013
4014
4015
4016
4017
4018
4019 if (ret == 0) {
4020 struct btrfs_key tmp;
4021
4022 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
4023 path->slots[0]);
4024 if (tmp.type == BTRFS_DIR_INDEX_KEY)
4025 last_old_dentry_offset = tmp.offset;
4026 }
4027 goto done;
4028 }
4029
4030
4031 ret = btrfs_previous_item(root, path, ino, BTRFS_DIR_INDEX_KEY);
4032 if (ret == 0) {
4033 struct btrfs_key tmp;
4034
4035 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
4036
4037
4038
4039
4040
4041
4042
4043
4044 if (tmp.type == BTRFS_DIR_INDEX_KEY)
4045 last_old_dentry_offset = tmp.offset;
4046 }
4047 btrfs_release_path(path);
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 search:
4058 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
4059 if (ret != 0)
4060 goto done;
4061
4062
4063
4064
4065
4066 while (1) {
4067 ret = process_dir_items_leaf(trans, inode, path, dst_path, ctx,
4068 &last_old_dentry_offset);
4069 if (ret != 0) {
4070 if (ret < 0)
4071 err = ret;
4072 goto done;
4073 }
4074 path->slots[0] = btrfs_header_nritems(path->nodes[0]);
4075
4076
4077
4078
4079
4080 ret = btrfs_next_leaf(root, path);
4081 if (ret) {
4082 if (ret == 1)
4083 last_offset = (u64)-1;
4084 else
4085 err = ret;
4086 goto done;
4087 }
4088 btrfs_item_key_to_cpu(path->nodes[0], &min_key, path->slots[0]);
4089 if (min_key.objectid != ino || min_key.type != BTRFS_DIR_INDEX_KEY) {
4090 last_offset = (u64)-1;
4091 goto done;
4092 }
4093 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103 last_offset = min_key.offset - 1;
4104 goto done;
4105 }
4106 if (need_resched()) {
4107 btrfs_release_path(path);
4108 cond_resched();
4109 goto search;
4110 }
4111 }
4112 done:
4113 btrfs_release_path(path);
4114 btrfs_release_path(dst_path);
4115
4116 if (err == 0) {
4117 *last_offset_ret = last_offset;
4118
4119
4120
4121
4122
4123
4124
4125
4126 ASSERT(last_old_dentry_offset <= last_offset);
4127 if (last_old_dentry_offset < last_offset) {
4128 ret = insert_dir_log_key(trans, log, path, ino,
4129 last_old_dentry_offset + 1,
4130 last_offset);
4131 if (ret)
4132 err = ret;
4133 }
4134 }
4135 return err;
4136 }
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
4151 struct btrfs_inode *inode,
4152 struct btrfs_path *path,
4153 struct btrfs_path *dst_path,
4154 struct btrfs_log_ctx *ctx)
4155 {
4156 u64 min_key;
4157 u64 max_key;
4158 int ret;
4159
4160 min_key = BTRFS_DIR_START_INDEX;
4161 max_key = 0;
4162 ctx->last_dir_item_offset = inode->last_dir_index_offset;
4163
4164 while (1) {
4165 ret = log_dir_items(trans, inode, path, dst_path,
4166 ctx, min_key, &max_key);
4167 if (ret)
4168 return ret;
4169 if (max_key == (u64)-1)
4170 break;
4171 min_key = max_key + 1;
4172 }
4173
4174 inode->last_dir_index_offset = ctx->last_dir_item_offset;
4175
4176 return 0;
4177 }
4178
4179
4180
4181
4182
4183
4184
4185 static int drop_inode_items(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *log,
4187 struct btrfs_path *path,
4188 struct btrfs_inode *inode,
4189 int max_key_type)
4190 {
4191 int ret;
4192 struct btrfs_key key;
4193 struct btrfs_key found_key;
4194 int start_slot;
4195
4196 key.objectid = btrfs_ino(inode);
4197 key.type = max_key_type;
4198 key.offset = (u64)-1;
4199
4200 while (1) {
4201 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
4202 BUG_ON(ret == 0);
4203 if (ret < 0)
4204 break;
4205
4206 if (path->slots[0] == 0)
4207 break;
4208
4209 path->slots[0]--;
4210 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4211 path->slots[0]);
4212
4213 if (found_key.objectid != key.objectid)
4214 break;
4215
4216 found_key.offset = 0;
4217 found_key.type = 0;
4218 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
4219 if (ret < 0)
4220 break;
4221
4222 ret = btrfs_del_items(trans, log, path, start_slot,
4223 path->slots[0] - start_slot + 1);
4224
4225
4226
4227
4228 if (ret || start_slot != 0)
4229 break;
4230 btrfs_release_path(path);
4231 }
4232 btrfs_release_path(path);
4233 if (ret > 0)
4234 ret = 0;
4235 return ret;
4236 }
4237
4238 static int truncate_inode_items(struct btrfs_trans_handle *trans,
4239 struct btrfs_root *log_root,
4240 struct btrfs_inode *inode,
4241 u64 new_size, u32 min_type)
4242 {
4243 struct btrfs_truncate_control control = {
4244 .new_size = new_size,
4245 .ino = btrfs_ino(inode),
4246 .min_type = min_type,
4247 .skip_ref_updates = true,
4248 };
4249
4250 return btrfs_truncate_inode_items(trans, log_root, &control);
4251 }
4252
4253 static void fill_inode_item(struct btrfs_trans_handle *trans,
4254 struct extent_buffer *leaf,
4255 struct btrfs_inode_item *item,
4256 struct inode *inode, int log_inode_only,
4257 u64 logged_isize)
4258 {
4259 struct btrfs_map_token token;
4260 u64 flags;
4261
4262 btrfs_init_map_token(&token, leaf);
4263
4264 if (log_inode_only) {
4265
4266
4267
4268
4269
4270 btrfs_set_token_inode_generation(&token, item, 0);
4271 btrfs_set_token_inode_size(&token, item, logged_isize);
4272 } else {
4273 btrfs_set_token_inode_generation(&token, item,
4274 BTRFS_I(inode)->generation);
4275 btrfs_set_token_inode_size(&token, item, inode->i_size);
4276 }
4277
4278 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
4279 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
4280 btrfs_set_token_inode_mode(&token, item, inode->i_mode);
4281 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
4282
4283 btrfs_set_token_timespec_sec(&token, &item->atime,
4284 inode->i_atime.tv_sec);
4285 btrfs_set_token_timespec_nsec(&token, &item->atime,
4286 inode->i_atime.tv_nsec);
4287
4288 btrfs_set_token_timespec_sec(&token, &item->mtime,
4289 inode->i_mtime.tv_sec);
4290 btrfs_set_token_timespec_nsec(&token, &item->mtime,
4291 inode->i_mtime.tv_nsec);
4292
4293 btrfs_set_token_timespec_sec(&token, &item->ctime,
4294 inode->i_ctime.tv_sec);
4295 btrfs_set_token_timespec_nsec(&token, &item->ctime,
4296 inode->i_ctime.tv_nsec);
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
4308 btrfs_set_token_inode_transid(&token, item, trans->transid);
4309 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
4310 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
4311 BTRFS_I(inode)->ro_flags);
4312 btrfs_set_token_inode_flags(&token, item, flags);
4313 btrfs_set_token_inode_block_group(&token, item, 0);
4314 }
4315
4316 static int log_inode_item(struct btrfs_trans_handle *trans,
4317 struct btrfs_root *log, struct btrfs_path *path,
4318 struct btrfs_inode *inode, bool inode_item_dropped)
4319 {
4320 struct btrfs_inode_item *inode_item;
4321 int ret;
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333 if (!inode_item_dropped && inode->logged_trans == trans->transid) {
4334 ret = btrfs_search_slot(trans, log, &inode->location, path, 0, 1);
4335 ASSERT(ret <= 0);
4336 if (ret > 0)
4337 ret = -ENOENT;
4338 } else {
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348 ret = btrfs_insert_empty_item(trans, log, path, &inode->location,
4349 sizeof(*inode_item));
4350 ASSERT(ret != -EEXIST);
4351 }
4352 if (ret)
4353 return ret;
4354 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4355 struct btrfs_inode_item);
4356 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
4357 0, 0);
4358 btrfs_release_path(path);
4359 return 0;
4360 }
4361
4362 static int log_csums(struct btrfs_trans_handle *trans,
4363 struct btrfs_inode *inode,
4364 struct btrfs_root *log_root,
4365 struct btrfs_ordered_sum *sums)
4366 {
4367 const u64 lock_end = sums->bytenr + sums->len - 1;
4368 struct extent_state *cached_state = NULL;
4369 int ret;
4370
4371
4372
4373
4374
4375
4376 if (inode->last_reflink_trans < trans->transid)
4377 return btrfs_csum_file_blocks(trans, log_root, sums);
4378
4379
4380
4381
4382
4383
4384
4385 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
4386 lock_end, &cached_state);
4387 if (ret)
4388 return ret;
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
4399 if (!ret)
4400 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4401
4402 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
4403 &cached_state);
4404
4405 return ret;
4406 }
4407
4408 static noinline int copy_items(struct btrfs_trans_handle *trans,
4409 struct btrfs_inode *inode,
4410 struct btrfs_path *dst_path,
4411 struct btrfs_path *src_path,
4412 int start_slot, int nr, int inode_only,
4413 u64 logged_isize)
4414 {
4415 struct btrfs_root *log = inode->root->log_root;
4416 struct btrfs_file_extent_item *extent;
4417 struct extent_buffer *src = src_path->nodes[0];
4418 int ret = 0;
4419 struct btrfs_key *ins_keys;
4420 u32 *ins_sizes;
4421 struct btrfs_item_batch batch;
4422 char *ins_data;
4423 int i;
4424 int dst_index;
4425 const bool skip_csum = (inode->flags & BTRFS_INODE_NODATASUM);
4426 const u64 i_size = i_size_read(&inode->vfs_inode);
4427
4428 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
4429 nr * sizeof(u32), GFP_NOFS);
4430 if (!ins_data)
4431 return -ENOMEM;
4432
4433 ins_sizes = (u32 *)ins_data;
4434 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
4435 batch.keys = ins_keys;
4436 batch.data_sizes = ins_sizes;
4437 batch.total_data_size = 0;
4438 batch.nr = 0;
4439
4440 dst_index = 0;
4441 for (i = 0; i < nr; i++) {
4442 const int src_slot = start_slot + i;
4443 struct btrfs_root *csum_root;
4444 struct btrfs_ordered_sum *sums;
4445 struct btrfs_ordered_sum *sums_next;
4446 LIST_HEAD(ordered_sums);
4447 u64 disk_bytenr;
4448 u64 disk_num_bytes;
4449 u64 extent_offset;
4450 u64 extent_num_bytes;
4451 bool is_old_extent;
4452
4453 btrfs_item_key_to_cpu(src, &ins_keys[dst_index], src_slot);
4454
4455 if (ins_keys[dst_index].type != BTRFS_EXTENT_DATA_KEY)
4456 goto add_to_batch;
4457
4458 extent = btrfs_item_ptr(src, src_slot,
4459 struct btrfs_file_extent_item);
4460
4461 is_old_extent = (btrfs_file_extent_generation(src, extent) <
4462 trans->transid);
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477 if (is_old_extent &&
4478 ins_keys[dst_index].offset < i_size &&
4479 inode->last_reflink_trans < trans->transid)
4480 continue;
4481
4482 if (skip_csum)
4483 goto add_to_batch;
4484
4485
4486 if (btrfs_file_extent_type(src, extent) != BTRFS_FILE_EXTENT_REG)
4487 goto add_to_batch;
4488
4489
4490
4491
4492
4493
4494 if (is_old_extent)
4495 goto add_to_batch;
4496
4497 disk_bytenr = btrfs_file_extent_disk_bytenr(src, extent);
4498
4499 if (disk_bytenr == 0)
4500 goto add_to_batch;
4501
4502 disk_num_bytes = btrfs_file_extent_disk_num_bytes(src, extent);
4503
4504 if (btrfs_file_extent_compression(src, extent)) {
4505 extent_offset = 0;
4506 extent_num_bytes = disk_num_bytes;
4507 } else {
4508 extent_offset = btrfs_file_extent_offset(src, extent);
4509 extent_num_bytes = btrfs_file_extent_num_bytes(src, extent);
4510 }
4511
4512 csum_root = btrfs_csum_root(trans->fs_info, disk_bytenr);
4513 disk_bytenr += extent_offset;
4514 ret = btrfs_lookup_csums_range(csum_root, disk_bytenr,
4515 disk_bytenr + extent_num_bytes - 1,
4516 &ordered_sums, 0);
4517 if (ret)
4518 goto out;
4519
4520 list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) {
4521 if (!ret)
4522 ret = log_csums(trans, inode, log, sums);
4523 list_del(&sums->list);
4524 kfree(sums);
4525 }
4526 if (ret)
4527 goto out;
4528
4529 add_to_batch:
4530 ins_sizes[dst_index] = btrfs_item_size(src, src_slot);
4531 batch.total_data_size += ins_sizes[dst_index];
4532 batch.nr++;
4533 dst_index++;
4534 }
4535
4536
4537
4538
4539
4540 if (batch.nr == 0)
4541 goto out;
4542
4543 ret = btrfs_insert_empty_items(trans, log, dst_path, &batch);
4544 if (ret)
4545 goto out;
4546
4547 dst_index = 0;
4548 for (i = 0; i < nr; i++) {
4549 const int src_slot = start_slot + i;
4550 const int dst_slot = dst_path->slots[0] + dst_index;
4551 struct btrfs_key key;
4552 unsigned long src_offset;
4553 unsigned long dst_offset;
4554
4555
4556
4557
4558
4559 if (dst_index >= batch.nr)
4560 break;
4561
4562 btrfs_item_key_to_cpu(src, &key, src_slot);
4563
4564 if (key.type != BTRFS_EXTENT_DATA_KEY)
4565 goto copy_item;
4566
4567 extent = btrfs_item_ptr(src, src_slot,
4568 struct btrfs_file_extent_item);
4569
4570
4571 if (btrfs_file_extent_generation(src, extent) < trans->transid &&
4572 key.offset < i_size &&
4573 inode->last_reflink_trans < trans->transid)
4574 continue;
4575
4576 copy_item:
4577 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], dst_slot);
4578 src_offset = btrfs_item_ptr_offset(src, src_slot);
4579
4580 if (key.type == BTRFS_INODE_ITEM_KEY) {
4581 struct btrfs_inode_item *inode_item;
4582
4583 inode_item = btrfs_item_ptr(dst_path->nodes[0], dst_slot,
4584 struct btrfs_inode_item);
4585 fill_inode_item(trans, dst_path->nodes[0], inode_item,
4586 &inode->vfs_inode,
4587 inode_only == LOG_INODE_EXISTS,
4588 logged_isize);
4589 } else {
4590 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
4591 src_offset, ins_sizes[dst_index]);
4592 }
4593
4594 dst_index++;
4595 }
4596
4597 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
4598 btrfs_release_path(dst_path);
4599 out:
4600 kfree(ins_data);
4601
4602 return ret;
4603 }
4604
4605 static int extent_cmp(void *priv, const struct list_head *a,
4606 const struct list_head *b)
4607 {
4608 const struct extent_map *em1, *em2;
4609
4610 em1 = list_entry(a, struct extent_map, list);
4611 em2 = list_entry(b, struct extent_map, list);
4612
4613 if (em1->start < em2->start)
4614 return -1;
4615 else if (em1->start > em2->start)
4616 return 1;
4617 return 0;
4618 }
4619
4620 static int log_extent_csums(struct btrfs_trans_handle *trans,
4621 struct btrfs_inode *inode,
4622 struct btrfs_root *log_root,
4623 const struct extent_map *em,
4624 struct btrfs_log_ctx *ctx)
4625 {
4626 struct btrfs_ordered_extent *ordered;
4627 struct btrfs_root *csum_root;
4628 u64 csum_offset;
4629 u64 csum_len;
4630 u64 mod_start = em->mod_start;
4631 u64 mod_len = em->mod_len;
4632 LIST_HEAD(ordered_sums);
4633 int ret = 0;
4634
4635 if (inode->flags & BTRFS_INODE_NODATASUM ||
4636 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4637 em->block_start == EXTENT_MAP_HOLE)
4638 return 0;
4639
4640 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) {
4641 const u64 ordered_end = ordered->file_offset + ordered->num_bytes;
4642 const u64 mod_end = mod_start + mod_len;
4643 struct btrfs_ordered_sum *sums;
4644
4645 if (mod_len == 0)
4646 break;
4647
4648 if (ordered_end <= mod_start)
4649 continue;
4650 if (mod_end <= ordered->file_offset)
4651 break;
4652
4653
4654
4655
4656
4657
4658 if (ordered->file_offset > mod_start) {
4659 if (ordered_end >= mod_end)
4660 mod_len = ordered->file_offset - mod_start;
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671 } else {
4672 if (ordered_end < mod_end) {
4673 mod_len = mod_end - ordered_end;
4674 mod_start = ordered_end;
4675 } else {
4676 mod_len = 0;
4677 }
4678 }
4679
4680
4681
4682
4683
4684 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags))
4685 continue;
4686
4687 list_for_each_entry(sums, &ordered->list, list) {
4688 ret = log_csums(trans, inode, log_root, sums);
4689 if (ret)
4690 return ret;
4691 }
4692 }
4693
4694
4695 if (mod_len == 0)
4696 return 0;
4697
4698
4699 if (em->compress_type) {
4700 csum_offset = 0;
4701 csum_len = max(em->block_len, em->orig_block_len);
4702 } else {
4703 csum_offset = mod_start - em->start;
4704 csum_len = mod_len;
4705 }
4706
4707
4708 csum_root = btrfs_csum_root(trans->fs_info, em->block_start);
4709 ret = btrfs_lookup_csums_range(csum_root,
4710 em->block_start + csum_offset,
4711 em->block_start + csum_offset +
4712 csum_len - 1, &ordered_sums, 0);
4713 if (ret)
4714 return ret;
4715
4716 while (!list_empty(&ordered_sums)) {
4717 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4718 struct btrfs_ordered_sum,
4719 list);
4720 if (!ret)
4721 ret = log_csums(trans, inode, log_root, sums);
4722 list_del(&sums->list);
4723 kfree(sums);
4724 }
4725
4726 return ret;
4727 }
4728
4729 static int log_one_extent(struct btrfs_trans_handle *trans,
4730 struct btrfs_inode *inode,
4731 const struct extent_map *em,
4732 struct btrfs_path *path,
4733 struct btrfs_log_ctx *ctx)
4734 {
4735 struct btrfs_drop_extents_args drop_args = { 0 };
4736 struct btrfs_root *log = inode->root->log_root;
4737 struct btrfs_file_extent_item fi = { 0 };
4738 struct extent_buffer *leaf;
4739 struct btrfs_key key;
4740 u64 extent_offset = em->start - em->orig_start;
4741 u64 block_len;
4742 int ret;
4743
4744 btrfs_set_stack_file_extent_generation(&fi, trans->transid);
4745 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4746 btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_PREALLOC);
4747 else
4748 btrfs_set_stack_file_extent_type(&fi, BTRFS_FILE_EXTENT_REG);
4749
4750 block_len = max(em->block_len, em->orig_block_len);
4751 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4752 btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start);
4753 btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
4754 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4755 btrfs_set_stack_file_extent_disk_bytenr(&fi, em->block_start -
4756 extent_offset);
4757 btrfs_set_stack_file_extent_disk_num_bytes(&fi, block_len);
4758 }
4759
4760 btrfs_set_stack_file_extent_offset(&fi, extent_offset);
4761 btrfs_set_stack_file_extent_num_bytes(&fi, em->len);
4762 btrfs_set_stack_file_extent_ram_bytes(&fi, em->ram_bytes);
4763 btrfs_set_stack_file_extent_compression(&fi, em->compress_type);
4764
4765 ret = log_extent_csums(trans, inode, log, em, ctx);
4766 if (ret)
4767 return ret;
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778 if (ctx->logged_before) {
4779 drop_args.path = path;
4780 drop_args.start = em->start;
4781 drop_args.end = em->start + em->len;
4782 drop_args.replace_extent = true;
4783 drop_args.extent_item_size = sizeof(fi);
4784 ret = btrfs_drop_extents(trans, log, inode, &drop_args);
4785 if (ret)
4786 return ret;
4787 }
4788
4789 if (!drop_args.extent_inserted) {
4790 key.objectid = btrfs_ino(inode);
4791 key.type = BTRFS_EXTENT_DATA_KEY;
4792 key.offset = em->start;
4793
4794 ret = btrfs_insert_empty_item(trans, log, path, &key,
4795 sizeof(fi));
4796 if (ret)
4797 return ret;
4798 }
4799 leaf = path->nodes[0];
4800 write_extent_buffer(leaf, &fi,
4801 btrfs_item_ptr_offset(leaf, path->slots[0]),
4802 sizeof(fi));
4803 btrfs_mark_buffer_dirty(leaf);
4804
4805 btrfs_release_path(path);
4806
4807 return ret;
4808 }
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4819 struct btrfs_inode *inode,
4820 struct btrfs_path *path)
4821 {
4822 struct btrfs_root *root = inode->root;
4823 struct btrfs_key key;
4824 const u64 i_size = i_size_read(&inode->vfs_inode);
4825 const u64 ino = btrfs_ino(inode);
4826 struct btrfs_path *dst_path = NULL;
4827 bool dropped_extents = false;
4828 u64 truncate_offset = i_size;
4829 struct extent_buffer *leaf;
4830 int slot;
4831 int ins_nr = 0;
4832 int start_slot;
4833 int ret;
4834
4835 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4836 return 0;
4837
4838 key.objectid = ino;
4839 key.type = BTRFS_EXTENT_DATA_KEY;
4840 key.offset = i_size;
4841 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4842 if (ret < 0)
4843 goto out;
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
4854 if (ret < 0)
4855 goto out;
4856
4857 if (ret == 0) {
4858 struct btrfs_file_extent_item *ei;
4859
4860 leaf = path->nodes[0];
4861 slot = path->slots[0];
4862 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
4863
4864 if (btrfs_file_extent_type(leaf, ei) ==
4865 BTRFS_FILE_EXTENT_PREALLOC) {
4866 u64 extent_end;
4867
4868 btrfs_item_key_to_cpu(leaf, &key, slot);
4869 extent_end = key.offset +
4870 btrfs_file_extent_num_bytes(leaf, ei);
4871
4872 if (extent_end > i_size)
4873 truncate_offset = extent_end;
4874 }
4875 } else {
4876 ret = 0;
4877 }
4878
4879 while (true) {
4880 leaf = path->nodes[0];
4881 slot = path->slots[0];
4882
4883 if (slot >= btrfs_header_nritems(leaf)) {
4884 if (ins_nr > 0) {
4885 ret = copy_items(trans, inode, dst_path, path,
4886 start_slot, ins_nr, 1, 0);
4887 if (ret < 0)
4888 goto out;
4889 ins_nr = 0;
4890 }
4891 ret = btrfs_next_leaf(root, path);
4892 if (ret < 0)
4893 goto out;
4894 if (ret > 0) {
4895 ret = 0;
4896 break;
4897 }
4898 continue;
4899 }
4900
4901 btrfs_item_key_to_cpu(leaf, &key, slot);
4902 if (key.objectid > ino)
4903 break;
4904 if (WARN_ON_ONCE(key.objectid < ino) ||
4905 key.type < BTRFS_EXTENT_DATA_KEY ||
4906 key.offset < i_size) {
4907 path->slots[0]++;
4908 continue;
4909 }
4910 if (!dropped_extents) {
4911
4912
4913
4914
4915 ret = truncate_inode_items(trans, root->log_root, inode,
4916 truncate_offset,
4917 BTRFS_EXTENT_DATA_KEY);
4918 if (ret)
4919 goto out;
4920 dropped_extents = true;
4921 }
4922 if (ins_nr == 0)
4923 start_slot = slot;
4924 ins_nr++;
4925 path->slots[0]++;
4926 if (!dst_path) {
4927 dst_path = btrfs_alloc_path();
4928 if (!dst_path) {
4929 ret = -ENOMEM;
4930 goto out;
4931 }
4932 }
4933 }
4934 if (ins_nr > 0)
4935 ret = copy_items(trans, inode, dst_path, path,
4936 start_slot, ins_nr, 1, 0);
4937 out:
4938 btrfs_release_path(path);
4939 btrfs_free_path(dst_path);
4940 return ret;
4941 }
4942
4943 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4944 struct btrfs_inode *inode,
4945 struct btrfs_path *path,
4946 struct btrfs_log_ctx *ctx)
4947 {
4948 struct btrfs_ordered_extent *ordered;
4949 struct btrfs_ordered_extent *tmp;
4950 struct extent_map *em, *n;
4951 struct list_head extents;
4952 struct extent_map_tree *tree = &inode->extent_tree;
4953 int ret = 0;
4954 int num = 0;
4955
4956 INIT_LIST_HEAD(&extents);
4957
4958 write_lock(&tree->lock);
4959
4960 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4961 list_del_init(&em->list);
4962
4963
4964
4965
4966
4967
4968 if (++num > 32768) {
4969 list_del_init(&tree->modified_extents);
4970 ret = -EFBIG;
4971 goto process;
4972 }
4973
4974 if (em->generation < trans->transid)
4975 continue;
4976
4977
4978 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4979 em->start >= i_size_read(&inode->vfs_inode))
4980 continue;
4981
4982
4983 refcount_inc(&em->refs);
4984 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4985 list_add_tail(&em->list, &extents);
4986 num++;
4987 }
4988
4989 list_sort(NULL, &extents, extent_cmp);
4990 process:
4991 while (!list_empty(&extents)) {
4992 em = list_entry(extents.next, struct extent_map, list);
4993
4994 list_del_init(&em->list);
4995
4996
4997
4998
4999
5000 if (ret) {
5001 clear_em_logging(tree, em);
5002 free_extent_map(em);
5003 continue;
5004 }
5005
5006 write_unlock(&tree->lock);
5007
5008 ret = log_one_extent(trans, inode, em, path, ctx);
5009 write_lock(&tree->lock);
5010 clear_em_logging(tree, em);
5011 free_extent_map(em);
5012 }
5013 WARN_ON(!list_empty(&extents));
5014 write_unlock(&tree->lock);
5015
5016 if (!ret)
5017 ret = btrfs_log_prealloc_extents(trans, inode, path);
5018 if (ret)
5019 return ret;
5020
5021
5022
5023
5024
5025
5026
5027
5028 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
5029 list_del_init(&ordered->log_list);
5030 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
5031
5032 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
5033 spin_lock_irq(&inode->ordered_tree.lock);
5034 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
5035 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
5036 atomic_inc(&trans->transaction->pending_ordered);
5037 }
5038 spin_unlock_irq(&inode->ordered_tree.lock);
5039 }
5040 btrfs_put_ordered_extent(ordered);
5041 }
5042
5043 return 0;
5044 }
5045
5046 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
5047 struct btrfs_path *path, u64 *size_ret)
5048 {
5049 struct btrfs_key key;
5050 int ret;
5051
5052 key.objectid = btrfs_ino(inode);
5053 key.type = BTRFS_INODE_ITEM_KEY;
5054 key.offset = 0;
5055
5056 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
5057 if (ret < 0) {
5058 return ret;
5059 } else if (ret > 0) {
5060 *size_ret = 0;
5061 } else {
5062 struct btrfs_inode_item *item;
5063
5064 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
5065 struct btrfs_inode_item);
5066 *size_ret = btrfs_inode_size(path->nodes[0], item);
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078 if (*size_ret > inode->vfs_inode.i_size)
5079 *size_ret = inode->vfs_inode.i_size;
5080 }
5081
5082 btrfs_release_path(path);
5083 return 0;
5084 }
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
5096 struct btrfs_inode *inode,
5097 struct btrfs_path *path,
5098 struct btrfs_path *dst_path)
5099 {
5100 struct btrfs_root *root = inode->root;
5101 int ret;
5102 struct btrfs_key key;
5103 const u64 ino = btrfs_ino(inode);
5104 int ins_nr = 0;
5105 int start_slot = 0;
5106 bool found_xattrs = false;
5107
5108 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags))
5109 return 0;
5110
5111 key.objectid = ino;
5112 key.type = BTRFS_XATTR_ITEM_KEY;
5113 key.offset = 0;
5114
5115 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5116 if (ret < 0)
5117 return ret;
5118
5119 while (true) {
5120 int slot = path->slots[0];
5121 struct extent_buffer *leaf = path->nodes[0];
5122 int nritems = btrfs_header_nritems(leaf);
5123
5124 if (slot >= nritems) {
5125 if (ins_nr > 0) {
5126 ret = copy_items(trans, inode, dst_path, path,
5127 start_slot, ins_nr, 1, 0);
5128 if (ret < 0)
5129 return ret;
5130 ins_nr = 0;
5131 }
5132 ret = btrfs_next_leaf(root, path);
5133 if (ret < 0)
5134 return ret;
5135 else if (ret > 0)
5136 break;
5137 continue;
5138 }
5139
5140 btrfs_item_key_to_cpu(leaf, &key, slot);
5141 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
5142 break;
5143
5144 if (ins_nr == 0)
5145 start_slot = slot;
5146 ins_nr++;
5147 path->slots[0]++;
5148 found_xattrs = true;
5149 cond_resched();
5150 }
5151 if (ins_nr > 0) {
5152 ret = copy_items(trans, inode, dst_path, path,
5153 start_slot, ins_nr, 1, 0);
5154 if (ret < 0)
5155 return ret;
5156 }
5157
5158 if (!found_xattrs)
5159 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags);
5160
5161 return 0;
5162 }
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173 static int btrfs_log_holes(struct btrfs_trans_handle *trans,
5174 struct btrfs_inode *inode,
5175 struct btrfs_path *path)
5176 {
5177 struct btrfs_root *root = inode->root;
5178 struct btrfs_fs_info *fs_info = root->fs_info;
5179 struct btrfs_key key;
5180 const u64 ino = btrfs_ino(inode);
5181 const u64 i_size = i_size_read(&inode->vfs_inode);
5182 u64 prev_extent_end = 0;
5183 int ret;
5184
5185 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0)
5186 return 0;
5187
5188 key.objectid = ino;
5189 key.type = BTRFS_EXTENT_DATA_KEY;
5190 key.offset = 0;
5191
5192 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5193 if (ret < 0)
5194 return ret;
5195
5196 while (true) {
5197 struct extent_buffer *leaf = path->nodes[0];
5198
5199 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
5200 ret = btrfs_next_leaf(root, path);
5201 if (ret < 0)
5202 return ret;
5203 if (ret > 0) {
5204 ret = 0;
5205 break;
5206 }
5207 leaf = path->nodes[0];
5208 }
5209
5210 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5211 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
5212 break;
5213
5214
5215 if (prev_extent_end < key.offset) {
5216 const u64 hole_len = key.offset - prev_extent_end;
5217
5218
5219
5220
5221
5222
5223 btrfs_release_path(path);
5224 ret = btrfs_insert_file_extent(trans, root->log_root,
5225 ino, prev_extent_end, 0,
5226 0, hole_len, 0, hole_len,
5227 0, 0, 0);
5228 if (ret < 0)
5229 return ret;
5230
5231
5232
5233
5234
5235
5236
5237
5238 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5239 if (ret < 0)
5240 return ret;
5241 if (WARN_ON(ret > 0))
5242 return -ENOENT;
5243 leaf = path->nodes[0];
5244 }
5245
5246 prev_extent_end = btrfs_file_extent_end(path);
5247 path->slots[0]++;
5248 cond_resched();
5249 }
5250
5251 if (prev_extent_end < i_size) {
5252 u64 hole_len;
5253
5254 btrfs_release_path(path);
5255 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize);
5256 ret = btrfs_insert_file_extent(trans, root->log_root,
5257 ino, prev_extent_end, 0, 0,
5258 hole_len, 0, hole_len,
5259 0, 0, 0);
5260 if (ret < 0)
5261 return ret;
5262 }
5263
5264 return 0;
5265 }
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
5310 const int slot,
5311 const struct btrfs_key *key,
5312 struct btrfs_inode *inode,
5313 u64 *other_ino, u64 *other_parent)
5314 {
5315 int ret;
5316 struct btrfs_path *search_path;
5317 char *name = NULL;
5318 u32 name_len = 0;
5319 u32 item_size = btrfs_item_size(eb, slot);
5320 u32 cur_offset = 0;
5321 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
5322
5323 search_path = btrfs_alloc_path();
5324 if (!search_path)
5325 return -ENOMEM;
5326 search_path->search_commit_root = 1;
5327 search_path->skip_locking = 1;
5328
5329 while (cur_offset < item_size) {
5330 u64 parent;
5331 u32 this_name_len;
5332 u32 this_len;
5333 unsigned long name_ptr;
5334 struct btrfs_dir_item *di;
5335
5336 if (key->type == BTRFS_INODE_REF_KEY) {
5337 struct btrfs_inode_ref *iref;
5338
5339 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
5340 parent = key->offset;
5341 this_name_len = btrfs_inode_ref_name_len(eb, iref);
5342 name_ptr = (unsigned long)(iref + 1);
5343 this_len = sizeof(*iref) + this_name_len;
5344 } else {
5345 struct btrfs_inode_extref *extref;
5346
5347 extref = (struct btrfs_inode_extref *)(ptr +
5348 cur_offset);
5349 parent = btrfs_inode_extref_parent(eb, extref);
5350 this_name_len = btrfs_inode_extref_name_len(eb, extref);
5351 name_ptr = (unsigned long)&extref->name;
5352 this_len = sizeof(*extref) + this_name_len;
5353 }
5354
5355 if (this_name_len > name_len) {
5356 char *new_name;
5357
5358 new_name = krealloc(name, this_name_len, GFP_NOFS);
5359 if (!new_name) {
5360 ret = -ENOMEM;
5361 goto out;
5362 }
5363 name_len = this_name_len;
5364 name = new_name;
5365 }
5366
5367 read_extent_buffer(eb, name, name_ptr, this_name_len);
5368 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
5369 parent, name, this_name_len, 0);
5370 if (di && !IS_ERR(di)) {
5371 struct btrfs_key di_key;
5372
5373 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
5374 di, &di_key);
5375 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
5376 if (di_key.objectid != key->objectid) {
5377 ret = 1;
5378 *other_ino = di_key.objectid;
5379 *other_parent = parent;
5380 } else {
5381 ret = 0;
5382 }
5383 } else {
5384 ret = -EAGAIN;
5385 }
5386 goto out;
5387 } else if (IS_ERR(di)) {
5388 ret = PTR_ERR(di);
5389 goto out;
5390 }
5391 btrfs_release_path(search_path);
5392
5393 cur_offset += this_len;
5394 }
5395 ret = 0;
5396 out:
5397 btrfs_free_path(search_path);
5398 kfree(name);
5399 return ret;
5400 }
5401
5402 struct btrfs_ino_list {
5403 u64 ino;
5404 u64 parent;
5405 struct list_head list;
5406 };
5407
5408 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5409 struct btrfs_root *root,
5410 struct btrfs_path *path,
5411 struct btrfs_log_ctx *ctx,
5412 u64 ino, u64 parent)
5413 {
5414 struct btrfs_ino_list *ino_elem;
5415 LIST_HEAD(inode_list);
5416 int ret = 0;
5417
5418 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5419 if (!ino_elem)
5420 return -ENOMEM;
5421 ino_elem->ino = ino;
5422 ino_elem->parent = parent;
5423 list_add_tail(&ino_elem->list, &inode_list);
5424
5425 while (!list_empty(&inode_list)) {
5426 struct btrfs_fs_info *fs_info = root->fs_info;
5427 struct btrfs_key key;
5428 struct inode *inode;
5429
5430 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
5431 list);
5432 ino = ino_elem->ino;
5433 parent = ino_elem->parent;
5434 list_del(&ino_elem->list);
5435 kfree(ino_elem);
5436 if (ret)
5437 continue;
5438
5439 btrfs_release_path(path);
5440
5441 inode = btrfs_iget(fs_info->sb, ino, root);
5442
5443
5444
5445
5446
5447 if (IS_ERR(inode)) {
5448 ret = PTR_ERR(inode);
5449 if (ret == -ENOENT) {
5450 inode = btrfs_iget(fs_info->sb, parent, root);
5451 if (IS_ERR(inode)) {
5452 ret = PTR_ERR(inode);
5453 } else {
5454 ret = btrfs_log_inode(trans,
5455 BTRFS_I(inode),
5456 LOG_OTHER_INODE_ALL,
5457 ctx);
5458 btrfs_add_delayed_iput(inode);
5459 }
5460 }
5461 continue;
5462 }
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494 spin_lock(&BTRFS_I(inode)->lock);
5495
5496
5497
5498
5499
5500
5501 if (BTRFS_I(inode)->logged_trans == trans->transid) {
5502 spin_unlock(&BTRFS_I(inode)->lock);
5503 btrfs_add_delayed_iput(inode);
5504 continue;
5505 }
5506 spin_unlock(&BTRFS_I(inode)->lock);
5507
5508
5509
5510
5511
5512
5513
5514 ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_OTHER_INODE, ctx);
5515 if (ret) {
5516 btrfs_add_delayed_iput(inode);
5517 continue;
5518 }
5519
5520 key.objectid = ino;
5521 key.type = BTRFS_INODE_REF_KEY;
5522 key.offset = 0;
5523 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5524 if (ret < 0) {
5525 btrfs_add_delayed_iput(inode);
5526 continue;
5527 }
5528
5529 while (true) {
5530 struct extent_buffer *leaf = path->nodes[0];
5531 int slot = path->slots[0];
5532 u64 other_ino = 0;
5533 u64 other_parent = 0;
5534
5535 if (slot >= btrfs_header_nritems(leaf)) {
5536 ret = btrfs_next_leaf(root, path);
5537 if (ret < 0) {
5538 break;
5539 } else if (ret > 0) {
5540 ret = 0;
5541 break;
5542 }
5543 continue;
5544 }
5545
5546 btrfs_item_key_to_cpu(leaf, &key, slot);
5547 if (key.objectid != ino ||
5548 (key.type != BTRFS_INODE_REF_KEY &&
5549 key.type != BTRFS_INODE_EXTREF_KEY)) {
5550 ret = 0;
5551 break;
5552 }
5553
5554 ret = btrfs_check_ref_name_override(leaf, slot, &key,
5555 BTRFS_I(inode), &other_ino,
5556 &other_parent);
5557 if (ret < 0)
5558 break;
5559 if (ret > 0) {
5560 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
5561 if (!ino_elem) {
5562 ret = -ENOMEM;
5563 break;
5564 }
5565 ino_elem->ino = other_ino;
5566 ino_elem->parent = other_parent;
5567 list_add_tail(&ino_elem->list, &inode_list);
5568 ret = 0;
5569 }
5570 path->slots[0]++;
5571 }
5572 btrfs_add_delayed_iput(inode);
5573 }
5574
5575 return ret;
5576 }
5577
5578 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5579 struct btrfs_inode *inode,
5580 struct btrfs_key *min_key,
5581 const struct btrfs_key *max_key,
5582 struct btrfs_path *path,
5583 struct btrfs_path *dst_path,
5584 const u64 logged_isize,
5585 const bool recursive_logging,
5586 const int inode_only,
5587 struct btrfs_log_ctx *ctx,
5588 bool *need_log_inode_item)
5589 {
5590 const u64 i_size = i_size_read(&inode->vfs_inode);
5591 struct btrfs_root *root = inode->root;
5592 int ins_start_slot = 0;
5593 int ins_nr = 0;
5594 int ret;
5595
5596 while (1) {
5597 ret = btrfs_search_forward(root, min_key, path, trans->transid);
5598 if (ret < 0)
5599 return ret;
5600 if (ret > 0) {
5601 ret = 0;
5602 break;
5603 }
5604 again:
5605
5606 if (min_key->objectid != max_key->objectid)
5607 break;
5608 if (min_key->type > max_key->type)
5609 break;
5610
5611 if (min_key->type == BTRFS_INODE_ITEM_KEY) {
5612 *need_log_inode_item = false;
5613 } else if (min_key->type == BTRFS_EXTENT_DATA_KEY &&
5614 min_key->offset >= i_size) {
5615
5616
5617
5618
5619
5620
5621 break;
5622 } else if ((min_key->type == BTRFS_INODE_REF_KEY ||
5623 min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5624 inode->generation == trans->transid &&
5625 !recursive_logging) {
5626 u64 other_ino = 0;
5627 u64 other_parent = 0;
5628
5629 ret = btrfs_check_ref_name_override(path->nodes[0],
5630 path->slots[0], min_key, inode,
5631 &other_ino, &other_parent);
5632 if (ret < 0) {
5633 return ret;
5634 } else if (ret > 0 &&
5635 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5636 if (ins_nr > 0) {
5637 ins_nr++;
5638 } else {
5639 ins_nr = 1;
5640 ins_start_slot = path->slots[0];
5641 }
5642 ret = copy_items(trans, inode, dst_path, path,
5643 ins_start_slot, ins_nr,
5644 inode_only, logged_isize);
5645 if (ret < 0)
5646 return ret;
5647 ins_nr = 0;
5648
5649 ret = log_conflicting_inodes(trans, root, path,
5650 ctx, other_ino, other_parent);
5651 if (ret)
5652 return ret;
5653 btrfs_release_path(path);
5654 goto next_key;
5655 }
5656 } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5657
5658 if (ins_nr == 0)
5659 goto next_slot;
5660 ret = copy_items(trans, inode, dst_path, path,
5661 ins_start_slot,
5662 ins_nr, inode_only, logged_isize);
5663 if (ret < 0)
5664 return ret;
5665 ins_nr = 0;
5666 goto next_slot;
5667 }
5668
5669 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5670 ins_nr++;
5671 goto next_slot;
5672 } else if (!ins_nr) {
5673 ins_start_slot = path->slots[0];
5674 ins_nr = 1;
5675 goto next_slot;
5676 }
5677
5678 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5679 ins_nr, inode_only, logged_isize);
5680 if (ret < 0)
5681 return ret;
5682 ins_nr = 1;
5683 ins_start_slot = path->slots[0];
5684 next_slot:
5685 path->slots[0]++;
5686 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5687 btrfs_item_key_to_cpu(path->nodes[0], min_key,
5688 path->slots[0]);
5689 goto again;
5690 }
5691 if (ins_nr) {
5692 ret = copy_items(trans, inode, dst_path, path,
5693 ins_start_slot, ins_nr, inode_only,
5694 logged_isize);
5695 if (ret < 0)
5696 return ret;
5697 ins_nr = 0;
5698 }
5699 btrfs_release_path(path);
5700 next_key:
5701 if (min_key->offset < (u64)-1) {
5702 min_key->offset++;
5703 } else if (min_key->type < max_key->type) {
5704 min_key->type++;
5705 min_key->offset = 0;
5706 } else {
5707 break;
5708 }
5709
5710
5711
5712
5713
5714
5715 cond_resched();
5716 }
5717 if (ins_nr) {
5718 ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5719 ins_nr, inode_only, logged_isize);
5720 if (ret)
5721 return ret;
5722 }
5723
5724 if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) {
5725
5726
5727
5728
5729 btrfs_release_path(path);
5730 ret = btrfs_log_prealloc_extents(trans, inode, dst_path);
5731 }
5732
5733 return ret;
5734 }
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5751 struct btrfs_inode *inode,
5752 int inode_only,
5753 struct btrfs_log_ctx *ctx)
5754 {
5755 struct btrfs_path *path;
5756 struct btrfs_path *dst_path;
5757 struct btrfs_key min_key;
5758 struct btrfs_key max_key;
5759 struct btrfs_root *log = inode->root->log_root;
5760 int ret;
5761 bool fast_search = false;
5762 u64 ino = btrfs_ino(inode);
5763 struct extent_map_tree *em_tree = &inode->extent_tree;
5764 u64 logged_isize = 0;
5765 bool need_log_inode_item = true;
5766 bool xattrs_logged = false;
5767 bool recursive_logging = false;
5768 bool inode_item_dropped = true;
5769 const bool orig_logged_before = ctx->logged_before;
5770
5771 path = btrfs_alloc_path();
5772 if (!path)
5773 return -ENOMEM;
5774 dst_path = btrfs_alloc_path();
5775 if (!dst_path) {
5776 btrfs_free_path(path);
5777 return -ENOMEM;
5778 }
5779
5780 min_key.objectid = ino;
5781 min_key.type = BTRFS_INODE_ITEM_KEY;
5782 min_key.offset = 0;
5783
5784 max_key.objectid = ino;
5785
5786
5787
5788 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5789 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5790 &inode->runtime_flags) &&
5791 inode_only >= LOG_INODE_EXISTS))
5792 max_key.type = BTRFS_XATTR_ITEM_KEY;
5793 else
5794 max_key.type = (u8)-1;
5795 max_key.offset = (u64)-1;
5796
5797
5798
5799
5800
5801
5802 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5803 ret = btrfs_commit_inode_delayed_items(trans, inode);
5804 if (ret)
5805 goto out;
5806 }
5807
5808 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) {
5809 recursive_logging = true;
5810 if (inode_only == LOG_OTHER_INODE)
5811 inode_only = LOG_INODE_EXISTS;
5812 else
5813 inode_only = LOG_INODE_ALL;
5814 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5815 } else {
5816 mutex_lock(&inode->log_mutex);
5817 }
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828 if (S_ISLNK(inode->vfs_inode.i_mode))
5829 inode_only = LOG_INODE_ALL;
5830
5831
5832
5833
5834
5835
5836 ret = inode_logged(trans, inode, path);
5837 if (ret < 0)
5838 goto out_unlock;
5839 ctx->logged_before = (ret == 1);
5840 ret = 0;
5841
5842
5843
5844
5845
5846
5847
5848
5849 if (S_ISDIR(inode->vfs_inode.i_mode) &&
5850 inode_only == LOG_INODE_ALL &&
5851 inode->last_unlink_trans >= trans->transid) {
5852 btrfs_set_log_full_commit(trans);
5853 ret = BTRFS_LOG_FORCE_COMMIT;
5854 goto out_unlock;
5855 }
5856
5857
5858
5859
5860
5861 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5862 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5863
5864 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags);
5865 if (inode_only == LOG_INODE_EXISTS)
5866 max_key_type = BTRFS_XATTR_ITEM_KEY;
5867 if (ctx->logged_before)
5868 ret = drop_inode_items(trans, log, path, inode,
5869 max_key_type);
5870 } else {
5871 if (inode_only == LOG_INODE_EXISTS && ctx->logged_before) {
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885 ret = logged_inode_size(log, inode, path, &logged_isize);
5886 if (ret)
5887 goto out_unlock;
5888 }
5889 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5890 &inode->runtime_flags)) {
5891 if (inode_only == LOG_INODE_EXISTS) {
5892 max_key.type = BTRFS_XATTR_ITEM_KEY;
5893 if (ctx->logged_before)
5894 ret = drop_inode_items(trans, log, path,
5895 inode, max_key.type);
5896 } else {
5897 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5898 &inode->runtime_flags);
5899 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5900 &inode->runtime_flags);
5901 if (ctx->logged_before)
5902 ret = truncate_inode_items(trans, log,
5903 inode, 0, 0);
5904 }
5905 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5906 &inode->runtime_flags) ||
5907 inode_only == LOG_INODE_EXISTS) {
5908 if (inode_only == LOG_INODE_ALL)
5909 fast_search = true;
5910 max_key.type = BTRFS_XATTR_ITEM_KEY;
5911 if (ctx->logged_before)
5912 ret = drop_inode_items(trans, log, path, inode,
5913 max_key.type);
5914 } else {
5915 if (inode_only == LOG_INODE_ALL)
5916 fast_search = true;
5917 inode_item_dropped = false;
5918 goto log_extents;
5919 }
5920
5921 }
5922 if (ret)
5923 goto out_unlock;
5924
5925 ret = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5926 path, dst_path, logged_isize,
5927 recursive_logging, inode_only, ctx,
5928 &need_log_inode_item);
5929 if (ret)
5930 goto out_unlock;
5931
5932 btrfs_release_path(path);
5933 btrfs_release_path(dst_path);
5934 ret = btrfs_log_all_xattrs(trans, inode, path, dst_path);
5935 if (ret)
5936 goto out_unlock;
5937 xattrs_logged = true;
5938 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5939 btrfs_release_path(path);
5940 btrfs_release_path(dst_path);
5941 ret = btrfs_log_holes(trans, inode, path);
5942 if (ret)
5943 goto out_unlock;
5944 }
5945 log_extents:
5946 btrfs_release_path(path);
5947 btrfs_release_path(dst_path);
5948 if (need_log_inode_item) {
5949 ret = log_inode_item(trans, log, dst_path, inode, inode_item_dropped);
5950 if (ret)
5951 goto out_unlock;
5952
5953
5954
5955
5956
5957
5958
5959
5960 if (!xattrs_logged && inode->logged_trans < trans->transid) {
5961 ret = btrfs_log_all_xattrs(trans, inode, path, dst_path);
5962 if (ret)
5963 goto out_unlock;
5964 btrfs_release_path(path);
5965 }
5966 }
5967 if (fast_search) {
5968 ret = btrfs_log_changed_extents(trans, inode, dst_path, ctx);
5969 if (ret)
5970 goto out_unlock;
5971 } else if (inode_only == LOG_INODE_ALL) {
5972 struct extent_map *em, *n;
5973
5974 write_lock(&em_tree->lock);
5975 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list)
5976 list_del_init(&em->list);
5977 write_unlock(&em_tree->lock);
5978 }
5979
5980 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5981 ret = log_directory_changes(trans, inode, path, dst_path, ctx);
5982 if (ret)
5983 goto out_unlock;
5984 }
5985
5986 spin_lock(&inode->lock);
5987 inode->logged_trans = trans->transid;
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019 if (inode_only != LOG_INODE_EXISTS)
6020 inode->last_log_commit = inode->last_sub_trans;
6021 spin_unlock(&inode->lock);
6022
6023
6024
6025
6026
6027 if (inode_only == LOG_INODE_ALL)
6028 inode->last_reflink_trans = 0;
6029
6030 out_unlock:
6031 mutex_unlock(&inode->log_mutex);
6032 out:
6033 btrfs_free_path(path);
6034 btrfs_free_path(dst_path);
6035
6036 if (recursive_logging)
6037 ctx->logged_before = orig_logged_before;
6038
6039 return ret;
6040 }
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051 static bool need_log_inode(struct btrfs_trans_handle *trans,
6052 struct btrfs_inode *inode)
6053 {
6054
6055
6056
6057
6058 if (S_ISDIR(inode->vfs_inode.i_mode) && inode->last_trans < trans->transid)
6059 return false;
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070 if (inode->logged_trans == trans->transid &&
6071 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
6072 return false;
6073
6074 return true;
6075 }
6076
6077 struct btrfs_dir_list {
6078 u64 ino;
6079 struct list_head list;
6080 };
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
6119 struct btrfs_root *root,
6120 struct btrfs_inode *start_inode,
6121 struct btrfs_log_ctx *ctx)
6122 {
6123 struct btrfs_fs_info *fs_info = root->fs_info;
6124 struct btrfs_path *path;
6125 LIST_HEAD(dir_list);
6126 struct btrfs_dir_list *dir_elem;
6127 int ret = 0;
6128
6129
6130
6131
6132
6133
6134 if (ctx->logging_new_name)
6135 return 0;
6136
6137 path = btrfs_alloc_path();
6138 if (!path)
6139 return -ENOMEM;
6140
6141 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
6142 if (!dir_elem) {
6143 btrfs_free_path(path);
6144 return -ENOMEM;
6145 }
6146 dir_elem->ino = btrfs_ino(start_inode);
6147 list_add_tail(&dir_elem->list, &dir_list);
6148
6149 while (!list_empty(&dir_list)) {
6150 struct extent_buffer *leaf;
6151 struct btrfs_key min_key;
6152 int nritems;
6153 int i;
6154
6155 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
6156 list);
6157 if (ret)
6158 goto next_dir_inode;
6159
6160 min_key.objectid = dir_elem->ino;
6161 min_key.type = BTRFS_DIR_INDEX_KEY;
6162 min_key.offset = 0;
6163 again:
6164 btrfs_release_path(path);
6165 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
6166 if (ret < 0) {
6167 goto next_dir_inode;
6168 } else if (ret > 0) {
6169 ret = 0;
6170 goto next_dir_inode;
6171 }
6172
6173 leaf = path->nodes[0];
6174 nritems = btrfs_header_nritems(leaf);
6175 for (i = path->slots[0]; i < nritems; i++) {
6176 struct btrfs_dir_item *di;
6177 struct btrfs_key di_key;
6178 struct inode *di_inode;
6179 struct btrfs_dir_list *new_dir_elem;
6180 int log_mode = LOG_INODE_EXISTS;
6181 int type;
6182
6183 btrfs_item_key_to_cpu(leaf, &min_key, i);
6184 if (min_key.objectid != dir_elem->ino ||
6185 min_key.type != BTRFS_DIR_INDEX_KEY)
6186 goto next_dir_inode;
6187
6188 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
6189 type = btrfs_dir_type(leaf, di);
6190 if (btrfs_dir_transid(leaf, di) < trans->transid)
6191 continue;
6192 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
6193 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
6194 continue;
6195
6196 btrfs_release_path(path);
6197 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
6198 if (IS_ERR(di_inode)) {
6199 ret = PTR_ERR(di_inode);
6200 goto next_dir_inode;
6201 }
6202
6203 if (!need_log_inode(trans, BTRFS_I(di_inode))) {
6204 btrfs_add_delayed_iput(di_inode);
6205 break;
6206 }
6207
6208 ctx->log_new_dentries = false;
6209 if (type == BTRFS_FT_DIR)
6210 log_mode = LOG_INODE_ALL;
6211 ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
6212 log_mode, ctx);
6213 btrfs_add_delayed_iput(di_inode);
6214 if (ret)
6215 goto next_dir_inode;
6216 if (ctx->log_new_dentries) {
6217 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
6218 GFP_NOFS);
6219 if (!new_dir_elem) {
6220 ret = -ENOMEM;
6221 goto next_dir_inode;
6222 }
6223 new_dir_elem->ino = di_key.objectid;
6224 list_add_tail(&new_dir_elem->list, &dir_list);
6225 }
6226 break;
6227 }
6228 if (min_key.offset < (u64)-1) {
6229 min_key.offset++;
6230 goto again;
6231 }
6232 next_dir_inode:
6233 list_del(&dir_elem->list);
6234 kfree(dir_elem);
6235 }
6236
6237 btrfs_free_path(path);
6238 return ret;
6239 }
6240
6241 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
6242 struct btrfs_inode *inode,
6243 struct btrfs_log_ctx *ctx)
6244 {
6245 struct btrfs_fs_info *fs_info = trans->fs_info;
6246 int ret;
6247 struct btrfs_path *path;
6248 struct btrfs_key key;
6249 struct btrfs_root *root = inode->root;
6250 const u64 ino = btrfs_ino(inode);
6251
6252 path = btrfs_alloc_path();
6253 if (!path)
6254 return -ENOMEM;
6255 path->skip_locking = 1;
6256 path->search_commit_root = 1;
6257
6258 key.objectid = ino;
6259 key.type = BTRFS_INODE_REF_KEY;
6260 key.offset = 0;
6261 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6262 if (ret < 0)
6263 goto out;
6264
6265 while (true) {
6266 struct extent_buffer *leaf = path->nodes[0];
6267 int slot = path->slots[0];
6268 u32 cur_offset = 0;
6269 u32 item_size;
6270 unsigned long ptr;
6271
6272 if (slot >= btrfs_header_nritems(leaf)) {
6273 ret = btrfs_next_leaf(root, path);
6274 if (ret < 0)
6275 goto out;
6276 else if (ret > 0)
6277 break;
6278 continue;
6279 }
6280
6281 btrfs_item_key_to_cpu(leaf, &key, slot);
6282
6283 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
6284 break;
6285
6286 item_size = btrfs_item_size(leaf, slot);
6287 ptr = btrfs_item_ptr_offset(leaf, slot);
6288 while (cur_offset < item_size) {
6289 struct btrfs_key inode_key;
6290 struct inode *dir_inode;
6291
6292 inode_key.type = BTRFS_INODE_ITEM_KEY;
6293 inode_key.offset = 0;
6294
6295 if (key.type == BTRFS_INODE_EXTREF_KEY) {
6296 struct btrfs_inode_extref *extref;
6297
6298 extref = (struct btrfs_inode_extref *)
6299 (ptr + cur_offset);
6300 inode_key.objectid = btrfs_inode_extref_parent(
6301 leaf, extref);
6302 cur_offset += sizeof(*extref);
6303 cur_offset += btrfs_inode_extref_name_len(leaf,
6304 extref);
6305 } else {
6306 inode_key.objectid = key.offset;
6307 cur_offset = item_size;
6308 }
6309
6310 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
6311 root);
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334
6335 if (IS_ERR(dir_inode)) {
6336 ret = PTR_ERR(dir_inode);
6337 goto out;
6338 }
6339
6340 if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
6341 btrfs_add_delayed_iput(dir_inode);
6342 continue;
6343 }
6344
6345 ctx->log_new_dentries = false;
6346 ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
6347 LOG_INODE_ALL, ctx);
6348 if (!ret && ctx->log_new_dentries)
6349 ret = log_new_dir_dentries(trans, root,
6350 BTRFS_I(dir_inode), ctx);
6351 btrfs_add_delayed_iput(dir_inode);
6352 if (ret)
6353 goto out;
6354 }
6355 path->slots[0]++;
6356 }
6357 ret = 0;
6358 out:
6359 btrfs_free_path(path);
6360 return ret;
6361 }
6362
6363 static int log_new_ancestors(struct btrfs_trans_handle *trans,
6364 struct btrfs_root *root,
6365 struct btrfs_path *path,
6366 struct btrfs_log_ctx *ctx)
6367 {
6368 struct btrfs_key found_key;
6369
6370 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
6371
6372 while (true) {
6373 struct btrfs_fs_info *fs_info = root->fs_info;
6374 struct extent_buffer *leaf = path->nodes[0];
6375 int slot = path->slots[0];
6376 struct btrfs_key search_key;
6377 struct inode *inode;
6378 u64 ino;
6379 int ret = 0;
6380
6381 btrfs_release_path(path);
6382
6383 ino = found_key.offset;
6384
6385 search_key.objectid = found_key.offset;
6386 search_key.type = BTRFS_INODE_ITEM_KEY;
6387 search_key.offset = 0;
6388 inode = btrfs_iget(fs_info->sb, ino, root);
6389 if (IS_ERR(inode))
6390 return PTR_ERR(inode);
6391
6392 if (BTRFS_I(inode)->generation >= trans->transid &&
6393 need_log_inode(trans, BTRFS_I(inode)))
6394 ret = btrfs_log_inode(trans, BTRFS_I(inode),
6395 LOG_INODE_EXISTS, ctx);
6396 btrfs_add_delayed_iput(inode);
6397 if (ret)
6398 return ret;
6399
6400 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID)
6401 break;
6402
6403 search_key.type = BTRFS_INODE_REF_KEY;
6404 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6405 if (ret < 0)
6406 return ret;
6407
6408 leaf = path->nodes[0];
6409 slot = path->slots[0];
6410 if (slot >= btrfs_header_nritems(leaf)) {
6411 ret = btrfs_next_leaf(root, path);
6412 if (ret < 0)
6413 return ret;
6414 else if (ret > 0)
6415 return -ENOENT;
6416 leaf = path->nodes[0];
6417 slot = path->slots[0];
6418 }
6419
6420 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6421 if (found_key.objectid != search_key.objectid ||
6422 found_key.type != BTRFS_INODE_REF_KEY)
6423 return -ENOENT;
6424 }
6425 return 0;
6426 }
6427
6428 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans,
6429 struct btrfs_inode *inode,
6430 struct dentry *parent,
6431 struct btrfs_log_ctx *ctx)
6432 {
6433 struct btrfs_root *root = inode->root;
6434 struct dentry *old_parent = NULL;
6435 struct super_block *sb = inode->vfs_inode.i_sb;
6436 int ret = 0;
6437
6438 while (true) {
6439 if (!parent || d_really_is_negative(parent) ||
6440 sb != parent->d_sb)
6441 break;
6442
6443 inode = BTRFS_I(d_inode(parent));
6444 if (root != inode->root)
6445 break;
6446
6447 if (inode->generation >= trans->transid &&
6448 need_log_inode(trans, inode)) {
6449 ret = btrfs_log_inode(trans, inode,
6450 LOG_INODE_EXISTS, ctx);
6451 if (ret)
6452 break;
6453 }
6454 if (IS_ROOT(parent))
6455 break;
6456
6457 parent = dget_parent(parent);
6458 dput(old_parent);
6459 old_parent = parent;
6460 }
6461 dput(old_parent);
6462
6463 return ret;
6464 }
6465
6466 static int log_all_new_ancestors(struct btrfs_trans_handle *trans,
6467 struct btrfs_inode *inode,
6468 struct dentry *parent,
6469 struct btrfs_log_ctx *ctx)
6470 {
6471 struct btrfs_root *root = inode->root;
6472 const u64 ino = btrfs_ino(inode);
6473 struct btrfs_path *path;
6474 struct btrfs_key search_key;
6475 int ret;
6476
6477
6478
6479
6480
6481 if (inode->vfs_inode.i_nlink < 2)
6482 return log_new_ancestors_fast(trans, inode, parent, ctx);
6483
6484 path = btrfs_alloc_path();
6485 if (!path)
6486 return -ENOMEM;
6487
6488 search_key.objectid = ino;
6489 search_key.type = BTRFS_INODE_REF_KEY;
6490 search_key.offset = 0;
6491 again:
6492 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
6493 if (ret < 0)
6494 goto out;
6495 if (ret == 0)
6496 path->slots[0]++;
6497
6498 while (true) {
6499 struct extent_buffer *leaf = path->nodes[0];
6500 int slot = path->slots[0];
6501 struct btrfs_key found_key;
6502
6503 if (slot >= btrfs_header_nritems(leaf)) {
6504 ret = btrfs_next_leaf(root, path);
6505 if (ret < 0)
6506 goto out;
6507 else if (ret > 0)
6508 break;
6509 continue;
6510 }
6511
6512 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6513 if (found_key.objectid != ino ||
6514 found_key.type > BTRFS_INODE_EXTREF_KEY)
6515 break;
6516
6517
6518
6519
6520
6521
6522
6523
6524 if (found_key.type == BTRFS_INODE_EXTREF_KEY) {
6525 ret = -EMLINK;
6526 goto out;
6527 }
6528
6529
6530
6531
6532
6533
6534
6535 memcpy(&search_key, &found_key, sizeof(search_key));
6536
6537 ret = log_new_ancestors(trans, root, path, ctx);
6538 if (ret)
6539 goto out;
6540 btrfs_release_path(path);
6541 goto again;
6542 }
6543 ret = 0;
6544 out:
6545 btrfs_free_path(path);
6546 return ret;
6547 }
6548
6549
6550
6551
6552
6553
6554
6555 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
6556 struct btrfs_inode *inode,
6557 struct dentry *parent,
6558 int inode_only,
6559 struct btrfs_log_ctx *ctx)
6560 {
6561 struct btrfs_root *root = inode->root;
6562 struct btrfs_fs_info *fs_info = root->fs_info;
6563 int ret = 0;
6564 bool log_dentries = false;
6565
6566 if (btrfs_test_opt(fs_info, NOTREELOG)) {
6567 ret = BTRFS_LOG_FORCE_COMMIT;
6568 goto end_no_trans;
6569 }
6570
6571 if (btrfs_root_refs(&root->root_item) == 0) {
6572 ret = BTRFS_LOG_FORCE_COMMIT;
6573 goto end_no_trans;
6574 }
6575
6576
6577
6578
6579
6580
6581 if ((btrfs_inode_in_log(inode, trans->transid) &&
6582 list_empty(&ctx->ordered_extents)) ||
6583 inode->vfs_inode.i_nlink == 0) {
6584 ret = BTRFS_NO_LOG_SYNC;
6585 goto end_no_trans;
6586 }
6587
6588 ret = start_log_trans(trans, root, ctx);
6589 if (ret)
6590 goto end_no_trans;
6591
6592 ret = btrfs_log_inode(trans, inode, inode_only, ctx);
6593 if (ret)
6594 goto end_trans;
6595
6596
6597
6598
6599
6600
6601
6602 if (S_ISREG(inode->vfs_inode.i_mode) &&
6603 inode->generation < trans->transid &&
6604 inode->last_unlink_trans < trans->transid) {
6605 ret = 0;
6606 goto end_trans;
6607 }
6608
6609 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx->log_new_dentries)
6610 log_dentries = true;
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653 if (inode->last_unlink_trans >= trans->transid) {
6654 ret = btrfs_log_all_parents(trans, inode, ctx);
6655 if (ret)
6656 goto end_trans;
6657 }
6658
6659 ret = log_all_new_ancestors(trans, inode, parent, ctx);
6660 if (ret)
6661 goto end_trans;
6662
6663 if (log_dentries)
6664 ret = log_new_dir_dentries(trans, root, inode, ctx);
6665 else
6666 ret = 0;
6667 end_trans:
6668 if (ret < 0) {
6669 btrfs_set_log_full_commit(trans);
6670 ret = BTRFS_LOG_FORCE_COMMIT;
6671 }
6672
6673 if (ret)
6674 btrfs_remove_log_ctx(root, ctx);
6675 btrfs_end_log_trans(root);
6676 end_no_trans:
6677 return ret;
6678 }
6679
6680
6681
6682
6683
6684
6685
6686 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
6687 struct dentry *dentry,
6688 struct btrfs_log_ctx *ctx)
6689 {
6690 struct dentry *parent = dget_parent(dentry);
6691 int ret;
6692
6693 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
6694 LOG_INODE_ALL, ctx);
6695 dput(parent);
6696
6697 return ret;
6698 }
6699
6700
6701
6702
6703
6704 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
6705 {
6706 int ret;
6707 struct btrfs_path *path;
6708 struct btrfs_trans_handle *trans;
6709 struct btrfs_key key;
6710 struct btrfs_key found_key;
6711 struct btrfs_root *log;
6712 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6713 struct walk_control wc = {
6714 .process_func = process_one_buffer,
6715 .stage = LOG_WALK_PIN_ONLY,
6716 };
6717
6718 path = btrfs_alloc_path();
6719 if (!path)
6720 return -ENOMEM;
6721
6722 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6723
6724 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6725 if (IS_ERR(trans)) {
6726 ret = PTR_ERR(trans);
6727 goto error;
6728 }
6729
6730 wc.trans = trans;
6731 wc.pin = 1;
6732
6733 ret = walk_log_tree(trans, log_root_tree, &wc);
6734 if (ret) {
6735 btrfs_abort_transaction(trans, ret);
6736 goto error;
6737 }
6738
6739 again:
6740 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6741 key.offset = (u64)-1;
6742 key.type = BTRFS_ROOT_ITEM_KEY;
6743
6744 while (1) {
6745 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6746
6747 if (ret < 0) {
6748 btrfs_abort_transaction(trans, ret);
6749 goto error;
6750 }
6751 if (ret > 0) {
6752 if (path->slots[0] == 0)
6753 break;
6754 path->slots[0]--;
6755 }
6756 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6757 path->slots[0]);
6758 btrfs_release_path(path);
6759 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6760 break;
6761
6762 log = btrfs_read_tree_root(log_root_tree, &found_key);
6763 if (IS_ERR(log)) {
6764 ret = PTR_ERR(log);
6765 btrfs_abort_transaction(trans, ret);
6766 goto error;
6767 }
6768
6769 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
6770 true);
6771 if (IS_ERR(wc.replay_dest)) {
6772 ret = PTR_ERR(wc.replay_dest);
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785 if (ret == -ENOENT)
6786 ret = btrfs_pin_extent_for_log_replay(trans,
6787 log->node->start,
6788 log->node->len);
6789 btrfs_put_root(log);
6790
6791 if (!ret)
6792 goto next;
6793 btrfs_abort_transaction(trans, ret);
6794 goto error;
6795 }
6796
6797 wc.replay_dest->log_root = log;
6798 ret = btrfs_record_root_in_trans(trans, wc.replay_dest);
6799 if (ret)
6800
6801 btrfs_abort_transaction(trans, ret);
6802 else
6803 ret = walk_log_tree(trans, log, &wc);
6804
6805 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6806 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6807 path);
6808 if (ret)
6809 btrfs_abort_transaction(trans, ret);
6810 }
6811
6812 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6813 struct btrfs_root *root = wc.replay_dest;
6814
6815 btrfs_release_path(path);
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825 ret = btrfs_init_root_free_objectid(root);
6826 if (ret)
6827 btrfs_abort_transaction(trans, ret);
6828 }
6829
6830 wc.replay_dest->log_root = NULL;
6831 btrfs_put_root(wc.replay_dest);
6832 btrfs_put_root(log);
6833
6834 if (ret)
6835 goto error;
6836 next:
6837 if (found_key.offset == 0)
6838 break;
6839 key.offset = found_key.offset - 1;
6840 }
6841 btrfs_release_path(path);
6842
6843
6844 if (wc.pin) {
6845 wc.pin = 0;
6846 wc.process_func = replay_one_buffer;
6847 wc.stage = LOG_WALK_REPLAY_INODES;
6848 goto again;
6849 }
6850
6851 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6852 wc.stage++;
6853 goto again;
6854 }
6855
6856 btrfs_free_path(path);
6857
6858
6859 ret = btrfs_commit_transaction(trans);
6860 if (ret)
6861 return ret;
6862
6863 log_root_tree->log_root = NULL;
6864 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6865 btrfs_put_root(log_root_tree);
6866
6867 return 0;
6868 error:
6869 if (wc.trans)
6870 btrfs_end_transaction(wc.trans);
6871 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6872 btrfs_free_path(path);
6873 return ret;
6874 }
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6888 struct btrfs_inode *dir, struct btrfs_inode *inode,
6889 int for_rename)
6890 {
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901 mutex_lock(&inode->log_mutex);
6902 inode->last_unlink_trans = trans->transid;
6903 mutex_unlock(&inode->log_mutex);
6904
6905
6906
6907
6908
6909 if (dir->logged_trans == trans->transid)
6910 return;
6911
6912
6913
6914
6915
6916 if (inode->logged_trans == trans->transid)
6917 return;
6918
6919
6920
6921
6922
6923
6924
6925
6926 if (for_rename)
6927 goto record;
6928
6929
6930 return;
6931
6932 record:
6933 mutex_lock(&dir->log_mutex);
6934 dir->last_unlink_trans = trans->transid;
6935 mutex_unlock(&dir->log_mutex);
6936 }
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6951 struct btrfs_inode *dir)
6952 {
6953 mutex_lock(&dir->log_mutex);
6954 dir->last_unlink_trans = trans->transid;
6955 mutex_unlock(&dir->log_mutex);
6956 }
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975 void btrfs_log_new_name(struct btrfs_trans_handle *trans,
6976 struct dentry *old_dentry, struct btrfs_inode *old_dir,
6977 u64 old_dir_index, struct dentry *parent)
6978 {
6979 struct btrfs_inode *inode = BTRFS_I(d_inode(old_dentry));
6980 struct btrfs_root *root = inode->root;
6981 struct btrfs_log_ctx ctx;
6982 bool log_pinned = false;
6983 int ret;
6984
6985
6986
6987
6988
6989 if (!S_ISDIR(inode->vfs_inode.i_mode))
6990 inode->last_unlink_trans = trans->transid;
6991
6992
6993
6994
6995
6996 ret = inode_logged(trans, inode, NULL);
6997 if (ret < 0) {
6998 goto out;
6999 } else if (ret == 0) {
7000 if (!old_dir)
7001 return;
7002
7003
7004
7005
7006
7007 ret = inode_logged(trans, old_dir, NULL);
7008 if (ret < 0)
7009 goto out;
7010 else if (ret == 0)
7011 return;
7012 }
7013 ret = 0;
7014
7015
7016
7017
7018
7019
7020
7021
7022 if (old_dir && old_dir->logged_trans == trans->transid) {
7023 struct btrfs_root *log = old_dir->root->log_root;
7024 struct btrfs_path *path;
7025
7026 ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX);
7027
7028
7029
7030
7031
7032
7033
7034 ret = join_running_log_trans(root);
7035
7036
7037
7038
7039
7040 if (WARN_ON_ONCE(ret < 0))
7041 goto out;
7042 log_pinned = true;
7043
7044 path = btrfs_alloc_path();
7045 if (!path) {
7046 ret = -ENOMEM;
7047 goto out;
7048 }
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060 mutex_lock(&old_dir->log_mutex);
7061 ret = del_logged_dentry(trans, log, path, btrfs_ino(old_dir),
7062 old_dentry->d_name.name,
7063 old_dentry->d_name.len, old_dir_index);
7064 if (ret > 0) {
7065
7066
7067
7068
7069 btrfs_release_path(path);
7070 ret = insert_dir_log_key(trans, log, path,
7071 btrfs_ino(old_dir),
7072 old_dir_index, old_dir_index);
7073 }
7074 mutex_unlock(&old_dir->log_mutex);
7075
7076 btrfs_free_path(path);
7077 if (ret < 0)
7078 goto out;
7079 }
7080
7081 btrfs_init_log_ctx(&ctx, &inode->vfs_inode);
7082 ctx.logging_new_name = true;
7083
7084
7085
7086
7087
7088
7089
7090 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx);
7091 out:
7092
7093
7094
7095
7096
7097
7098 if (ret < 0)
7099 btrfs_set_log_full_commit(trans);
7100 if (log_pinned)
7101 btrfs_end_log_trans(root);
7102 }
7103