0001
0002
0003
0004
0005
0006 #include <linux/fs.h>
0007 #include <linux/slab.h>
0008 #include <linux/sched.h>
0009 #include <linux/writeback.h>
0010 #include <linux/pagemap.h>
0011 #include <linux/blkdev.h>
0012 #include <linux/uuid.h>
0013 #include <linux/timekeeping.h>
0014 #include "misc.h"
0015 #include "ctree.h"
0016 #include "disk-io.h"
0017 #include "transaction.h"
0018 #include "locking.h"
0019 #include "tree-log.h"
0020 #include "volumes.h"
0021 #include "dev-replace.h"
0022 #include "qgroup.h"
0023 #include "block-group.h"
0024 #include "space-info.h"
0025 #include "zoned.h"
0026
0027 #define BTRFS_ROOT_TRANS_TAG 0
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
0101 [TRANS_STATE_RUNNING] = 0U,
0102 [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
0103 [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
0104 __TRANS_ATTACH |
0105 __TRANS_JOIN |
0106 __TRANS_JOIN_NOSTART),
0107 [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
0108 __TRANS_ATTACH |
0109 __TRANS_JOIN |
0110 __TRANS_JOIN_NOLOCK |
0111 __TRANS_JOIN_NOSTART),
0112 [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START |
0113 __TRANS_ATTACH |
0114 __TRANS_JOIN |
0115 __TRANS_JOIN_NOLOCK |
0116 __TRANS_JOIN_NOSTART),
0117 [TRANS_STATE_COMPLETED] = (__TRANS_START |
0118 __TRANS_ATTACH |
0119 __TRANS_JOIN |
0120 __TRANS_JOIN_NOLOCK |
0121 __TRANS_JOIN_NOSTART),
0122 };
0123
0124 void btrfs_put_transaction(struct btrfs_transaction *transaction)
0125 {
0126 WARN_ON(refcount_read(&transaction->use_count) == 0);
0127 if (refcount_dec_and_test(&transaction->use_count)) {
0128 BUG_ON(!list_empty(&transaction->list));
0129 WARN_ON(!RB_EMPTY_ROOT(
0130 &transaction->delayed_refs.href_root.rb_root));
0131 WARN_ON(!RB_EMPTY_ROOT(
0132 &transaction->delayed_refs.dirty_extent_root));
0133 if (transaction->delayed_refs.pending_csums)
0134 btrfs_err(transaction->fs_info,
0135 "pending csums is %llu",
0136 transaction->delayed_refs.pending_csums);
0137
0138
0139
0140
0141
0142
0143
0144 while (!list_empty(&transaction->deleted_bgs)) {
0145 struct btrfs_block_group *cache;
0146
0147 cache = list_first_entry(&transaction->deleted_bgs,
0148 struct btrfs_block_group,
0149 bg_list);
0150 list_del_init(&cache->bg_list);
0151 btrfs_unfreeze_block_group(cache);
0152 btrfs_put_block_group(cache);
0153 }
0154 WARN_ON(!list_empty(&transaction->dev_update_list));
0155 kfree(transaction);
0156 }
0157 }
0158
0159 static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
0160 {
0161 struct btrfs_transaction *cur_trans = trans->transaction;
0162 struct btrfs_fs_info *fs_info = trans->fs_info;
0163 struct btrfs_root *root, *tmp;
0164 struct btrfs_caching_control *caching_ctl, *next;
0165
0166
0167
0168
0169
0170 ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING);
0171
0172 down_write(&fs_info->commit_root_sem);
0173
0174 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
0175 fs_info->last_reloc_trans = trans->transid;
0176
0177 list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
0178 dirty_list) {
0179 list_del_init(&root->dirty_list);
0180 free_extent_buffer(root->commit_root);
0181 root->commit_root = btrfs_root_node(root);
0182 extent_io_tree_release(&root->dirty_log_pages);
0183 btrfs_qgroup_clean_swapped_blocks(root);
0184 }
0185
0186
0187 spin_lock(&cur_trans->dropped_roots_lock);
0188 while (!list_empty(&cur_trans->dropped_roots)) {
0189 root = list_first_entry(&cur_trans->dropped_roots,
0190 struct btrfs_root, root_list);
0191 list_del_init(&root->root_list);
0192 spin_unlock(&cur_trans->dropped_roots_lock);
0193 btrfs_free_log(trans, root);
0194 btrfs_drop_and_free_fs_root(fs_info, root);
0195 spin_lock(&cur_trans->dropped_roots_lock);
0196 }
0197 spin_unlock(&cur_trans->dropped_roots_lock);
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225 write_lock(&fs_info->block_group_cache_lock);
0226 list_for_each_entry_safe(caching_ctl, next,
0227 &fs_info->caching_block_groups, list) {
0228 struct btrfs_block_group *cache = caching_ctl->block_group;
0229
0230 if (btrfs_block_group_done(cache)) {
0231 cache->last_byte_to_unpin = (u64)-1;
0232 list_del_init(&caching_ctl->list);
0233 btrfs_put_caching_control(caching_ctl);
0234 } else {
0235 cache->last_byte_to_unpin = caching_ctl->progress;
0236 }
0237 }
0238 write_unlock(&fs_info->block_group_cache_lock);
0239 up_write(&fs_info->commit_root_sem);
0240 }
0241
0242 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
0243 unsigned int type)
0244 {
0245 if (type & TRANS_EXTWRITERS)
0246 atomic_inc(&trans->num_extwriters);
0247 }
0248
0249 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
0250 unsigned int type)
0251 {
0252 if (type & TRANS_EXTWRITERS)
0253 atomic_dec(&trans->num_extwriters);
0254 }
0255
0256 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
0257 unsigned int type)
0258 {
0259 atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
0260 }
0261
0262 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
0263 {
0264 return atomic_read(&trans->num_extwriters);
0265 }
0266
0267
0268
0269
0270
0271
0272
0273
0274 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
0275 {
0276 struct btrfs_fs_info *fs_info = trans->fs_info;
0277
0278 if (!trans->chunk_bytes_reserved)
0279 return;
0280
0281 btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
0282 trans->chunk_bytes_reserved, NULL);
0283 trans->chunk_bytes_reserved = 0;
0284 }
0285
0286
0287
0288
0289 static noinline int join_transaction(struct btrfs_fs_info *fs_info,
0290 unsigned int type)
0291 {
0292 struct btrfs_transaction *cur_trans;
0293
0294 spin_lock(&fs_info->trans_lock);
0295 loop:
0296
0297 if (BTRFS_FS_ERROR(fs_info)) {
0298 spin_unlock(&fs_info->trans_lock);
0299 return -EROFS;
0300 }
0301
0302 cur_trans = fs_info->running_transaction;
0303 if (cur_trans) {
0304 if (TRANS_ABORTED(cur_trans)) {
0305 spin_unlock(&fs_info->trans_lock);
0306 return cur_trans->aborted;
0307 }
0308 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
0309 spin_unlock(&fs_info->trans_lock);
0310 return -EBUSY;
0311 }
0312 refcount_inc(&cur_trans->use_count);
0313 atomic_inc(&cur_trans->num_writers);
0314 extwriter_counter_inc(cur_trans, type);
0315 spin_unlock(&fs_info->trans_lock);
0316 return 0;
0317 }
0318 spin_unlock(&fs_info->trans_lock);
0319
0320
0321
0322
0323
0324 if (type == TRANS_ATTACH)
0325 return -ENOENT;
0326
0327
0328
0329
0330
0331 BUG_ON(type == TRANS_JOIN_NOLOCK);
0332
0333 cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS);
0334 if (!cur_trans)
0335 return -ENOMEM;
0336
0337 spin_lock(&fs_info->trans_lock);
0338 if (fs_info->running_transaction) {
0339
0340
0341
0342
0343 kfree(cur_trans);
0344 goto loop;
0345 } else if (BTRFS_FS_ERROR(fs_info)) {
0346 spin_unlock(&fs_info->trans_lock);
0347 kfree(cur_trans);
0348 return -EROFS;
0349 }
0350
0351 cur_trans->fs_info = fs_info;
0352 atomic_set(&cur_trans->pending_ordered, 0);
0353 init_waitqueue_head(&cur_trans->pending_wait);
0354 atomic_set(&cur_trans->num_writers, 1);
0355 extwriter_counter_init(cur_trans, type);
0356 init_waitqueue_head(&cur_trans->writer_wait);
0357 init_waitqueue_head(&cur_trans->commit_wait);
0358 cur_trans->state = TRANS_STATE_RUNNING;
0359
0360
0361
0362
0363 refcount_set(&cur_trans->use_count, 2);
0364 cur_trans->flags = 0;
0365 cur_trans->start_time = ktime_get_seconds();
0366
0367 memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs));
0368
0369 cur_trans->delayed_refs.href_root = RB_ROOT_CACHED;
0370 cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
0371 atomic_set(&cur_trans->delayed_refs.num_entries, 0);
0372
0373
0374
0375
0376
0377 smp_mb();
0378 if (!list_empty(&fs_info->tree_mod_seq_list))
0379 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n");
0380 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
0381 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n");
0382 atomic64_set(&fs_info->tree_mod_seq, 0);
0383
0384 spin_lock_init(&cur_trans->delayed_refs.lock);
0385
0386 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
0387 INIT_LIST_HEAD(&cur_trans->dev_update_list);
0388 INIT_LIST_HEAD(&cur_trans->switch_commits);
0389 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
0390 INIT_LIST_HEAD(&cur_trans->io_bgs);
0391 INIT_LIST_HEAD(&cur_trans->dropped_roots);
0392 mutex_init(&cur_trans->cache_write_mutex);
0393 spin_lock_init(&cur_trans->dirty_bgs_lock);
0394 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
0395 spin_lock_init(&cur_trans->dropped_roots_lock);
0396 INIT_LIST_HEAD(&cur_trans->releasing_ebs);
0397 spin_lock_init(&cur_trans->releasing_ebs_lock);
0398 list_add_tail(&cur_trans->list, &fs_info->trans_list);
0399 extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
0400 IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
0401 extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
0402 IO_TREE_FS_PINNED_EXTENTS, NULL);
0403 fs_info->generation++;
0404 cur_trans->transid = fs_info->generation;
0405 fs_info->running_transaction = cur_trans;
0406 cur_trans->aborted = 0;
0407 spin_unlock(&fs_info->trans_lock);
0408
0409 return 0;
0410 }
0411
0412
0413
0414
0415
0416
0417
0418 static int record_root_in_trans(struct btrfs_trans_handle *trans,
0419 struct btrfs_root *root,
0420 int force)
0421 {
0422 struct btrfs_fs_info *fs_info = root->fs_info;
0423 int ret = 0;
0424
0425 if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
0426 root->last_trans < trans->transid) || force) {
0427 WARN_ON(!force && root->commit_root != root->node);
0428
0429
0430
0431
0432
0433
0434 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
0435
0436
0437
0438
0439 smp_wmb();
0440
0441 spin_lock(&fs_info->fs_roots_radix_lock);
0442 if (root->last_trans == trans->transid && !force) {
0443 spin_unlock(&fs_info->fs_roots_radix_lock);
0444 return 0;
0445 }
0446 radix_tree_tag_set(&fs_info->fs_roots_radix,
0447 (unsigned long)root->root_key.objectid,
0448 BTRFS_ROOT_TRANS_TAG);
0449 spin_unlock(&fs_info->fs_roots_radix_lock);
0450 root->last_trans = trans->transid;
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471 ret = btrfs_init_reloc_root(trans, root);
0472 smp_mb__before_atomic();
0473 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
0474 }
0475 return ret;
0476 }
0477
0478
0479 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
0480 struct btrfs_root *root)
0481 {
0482 struct btrfs_fs_info *fs_info = root->fs_info;
0483 struct btrfs_transaction *cur_trans = trans->transaction;
0484
0485
0486 spin_lock(&cur_trans->dropped_roots_lock);
0487 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
0488 spin_unlock(&cur_trans->dropped_roots_lock);
0489
0490
0491 spin_lock(&fs_info->fs_roots_radix_lock);
0492 radix_tree_tag_clear(&fs_info->fs_roots_radix,
0493 (unsigned long)root->root_key.objectid,
0494 BTRFS_ROOT_TRANS_TAG);
0495 spin_unlock(&fs_info->fs_roots_radix_lock);
0496 }
0497
0498 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
0499 struct btrfs_root *root)
0500 {
0501 struct btrfs_fs_info *fs_info = root->fs_info;
0502 int ret;
0503
0504 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
0505 return 0;
0506
0507
0508
0509
0510
0511 smp_rmb();
0512 if (root->last_trans == trans->transid &&
0513 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
0514 return 0;
0515
0516 mutex_lock(&fs_info->reloc_mutex);
0517 ret = record_root_in_trans(trans, root, 0);
0518 mutex_unlock(&fs_info->reloc_mutex);
0519
0520 return ret;
0521 }
0522
0523 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
0524 {
0525 return (trans->state >= TRANS_STATE_COMMIT_START &&
0526 trans->state < TRANS_STATE_UNBLOCKED &&
0527 !TRANS_ABORTED(trans));
0528 }
0529
0530
0531
0532
0533
0534 static void wait_current_trans(struct btrfs_fs_info *fs_info)
0535 {
0536 struct btrfs_transaction *cur_trans;
0537
0538 spin_lock(&fs_info->trans_lock);
0539 cur_trans = fs_info->running_transaction;
0540 if (cur_trans && is_transaction_blocked(cur_trans)) {
0541 refcount_inc(&cur_trans->use_count);
0542 spin_unlock(&fs_info->trans_lock);
0543
0544 wait_event(fs_info->transaction_wait,
0545 cur_trans->state >= TRANS_STATE_UNBLOCKED ||
0546 TRANS_ABORTED(cur_trans));
0547 btrfs_put_transaction(cur_trans);
0548 } else {
0549 spin_unlock(&fs_info->trans_lock);
0550 }
0551 }
0552
0553 static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
0554 {
0555 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
0556 return 0;
0557
0558 if (type == TRANS_START)
0559 return 1;
0560
0561 return 0;
0562 }
0563
0564 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
0565 {
0566 struct btrfs_fs_info *fs_info = root->fs_info;
0567
0568 if (!fs_info->reloc_ctl ||
0569 !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
0570 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
0571 root->reloc_root)
0572 return false;
0573
0574 return true;
0575 }
0576
0577 static struct btrfs_trans_handle *
0578 start_transaction(struct btrfs_root *root, unsigned int num_items,
0579 unsigned int type, enum btrfs_reserve_flush_enum flush,
0580 bool enforce_qgroups)
0581 {
0582 struct btrfs_fs_info *fs_info = root->fs_info;
0583 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
0584 struct btrfs_trans_handle *h;
0585 struct btrfs_transaction *cur_trans;
0586 u64 num_bytes = 0;
0587 u64 qgroup_reserved = 0;
0588 bool reloc_reserved = false;
0589 bool do_chunk_alloc = false;
0590 int ret;
0591
0592 if (BTRFS_FS_ERROR(fs_info))
0593 return ERR_PTR(-EROFS);
0594
0595 if (current->journal_info) {
0596 WARN_ON(type & TRANS_EXTWRITERS);
0597 h = current->journal_info;
0598 refcount_inc(&h->use_count);
0599 WARN_ON(refcount_read(&h->use_count) > 2);
0600 h->orig_rsv = h->block_rsv;
0601 h->block_rsv = NULL;
0602 goto got_it;
0603 }
0604
0605
0606
0607
0608
0609 if (num_items && root != fs_info->chunk_root) {
0610 struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
0611 u64 delayed_refs_bytes = 0;
0612
0613 qgroup_reserved = num_items * fs_info->nodesize;
0614 ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
0615 enforce_qgroups);
0616 if (ret)
0617 return ERR_PTR(ret);
0618
0619
0620
0621
0622
0623
0624
0625
0626 num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
0627 if (flush == BTRFS_RESERVE_FLUSH_ALL &&
0628 delayed_refs_rsv->full == 0) {
0629 delayed_refs_bytes = num_bytes;
0630 num_bytes <<= 1;
0631 }
0632
0633
0634
0635
0636 if (need_reserve_reloc_root(root)) {
0637 num_bytes += fs_info->nodesize;
0638 reloc_reserved = true;
0639 }
0640
0641 ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
0642 if (ret)
0643 goto reserve_fail;
0644 if (delayed_refs_bytes) {
0645 btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
0646 delayed_refs_bytes);
0647 num_bytes -= delayed_refs_bytes;
0648 }
0649
0650 if (rsv->space_info->force_alloc)
0651 do_chunk_alloc = true;
0652 } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
0653 !delayed_refs_rsv->full) {
0654
0655
0656
0657
0658
0659
0660
0661 ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
0662 if (ret)
0663 goto reserve_fail;
0664 }
0665 again:
0666 h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS);
0667 if (!h) {
0668 ret = -ENOMEM;
0669 goto alloc_fail;
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 if (type & __TRANS_FREEZABLE)
0683 sb_start_intwrite(fs_info->sb);
0684
0685 if (may_wait_transaction(fs_info, type))
0686 wait_current_trans(fs_info);
0687
0688 do {
0689 ret = join_transaction(fs_info, type);
0690 if (ret == -EBUSY) {
0691 wait_current_trans(fs_info);
0692 if (unlikely(type == TRANS_ATTACH ||
0693 type == TRANS_JOIN_NOSTART))
0694 ret = -ENOENT;
0695 }
0696 } while (ret == -EBUSY);
0697
0698 if (ret < 0)
0699 goto join_fail;
0700
0701 cur_trans = fs_info->running_transaction;
0702
0703 h->transid = cur_trans->transid;
0704 h->transaction = cur_trans;
0705 refcount_set(&h->use_count, 1);
0706 h->fs_info = root->fs_info;
0707
0708 h->type = type;
0709 INIT_LIST_HEAD(&h->new_bgs);
0710
0711 smp_mb();
0712 if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
0713 may_wait_transaction(fs_info, type)) {
0714 current->journal_info = h;
0715 btrfs_commit_transaction(h);
0716 goto again;
0717 }
0718
0719 if (num_bytes) {
0720 trace_btrfs_space_reservation(fs_info, "transaction",
0721 h->transid, num_bytes, 1);
0722 h->block_rsv = &fs_info->trans_block_rsv;
0723 h->bytes_reserved = num_bytes;
0724 h->reloc_reserved = reloc_reserved;
0725 }
0726
0727 got_it:
0728 if (!current->journal_info)
0729 current->journal_info = h;
0730
0731
0732
0733
0734
0735
0736
0737 if (do_chunk_alloc && num_bytes) {
0738 u64 flags = h->block_rsv->space_info->flags;
0739
0740 btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
0741 CHUNK_ALLOC_NO_FORCE);
0742 }
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752 ret = btrfs_record_root_in_trans(h, root);
0753 if (ret) {
0754
0755
0756
0757
0758
0759 btrfs_end_transaction(h);
0760 return ERR_PTR(ret);
0761 }
0762
0763 return h;
0764
0765 join_fail:
0766 if (type & __TRANS_FREEZABLE)
0767 sb_end_intwrite(fs_info->sb);
0768 kmem_cache_free(btrfs_trans_handle_cachep, h);
0769 alloc_fail:
0770 if (num_bytes)
0771 btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
0772 num_bytes, NULL);
0773 reserve_fail:
0774 btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
0775 return ERR_PTR(ret);
0776 }
0777
0778 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
0779 unsigned int num_items)
0780 {
0781 return start_transaction(root, num_items, TRANS_START,
0782 BTRFS_RESERVE_FLUSH_ALL, true);
0783 }
0784
0785 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
0786 struct btrfs_root *root,
0787 unsigned int num_items)
0788 {
0789 return start_transaction(root, num_items, TRANS_START,
0790 BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
0791 }
0792
0793 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
0794 {
0795 return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH,
0796 true);
0797 }
0798
0799 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root)
0800 {
0801 return start_transaction(root, 0, TRANS_JOIN_NOLOCK,
0802 BTRFS_RESERVE_NO_FLUSH, true);
0803 }
0804
0805
0806
0807
0808
0809 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
0810 {
0811 return start_transaction(root, 0, TRANS_JOIN_NOSTART,
0812 BTRFS_RESERVE_NO_FLUSH, true);
0813 }
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
0829 {
0830 return start_transaction(root, 0, TRANS_ATTACH,
0831 BTRFS_RESERVE_NO_FLUSH, true);
0832 }
0833
0834
0835
0836
0837
0838
0839
0840
0841 struct btrfs_trans_handle *
0842 btrfs_attach_transaction_barrier(struct btrfs_root *root)
0843 {
0844 struct btrfs_trans_handle *trans;
0845
0846 trans = start_transaction(root, 0, TRANS_ATTACH,
0847 BTRFS_RESERVE_NO_FLUSH, true);
0848 if (trans == ERR_PTR(-ENOENT))
0849 btrfs_wait_for_commit(root->fs_info, 0);
0850
0851 return trans;
0852 }
0853
0854
0855 static noinline void wait_for_commit(struct btrfs_transaction *commit,
0856 const enum btrfs_trans_state min_state)
0857 {
0858 struct btrfs_fs_info *fs_info = commit->fs_info;
0859 u64 transid = commit->transid;
0860 bool put = false;
0861
0862 while (1) {
0863 wait_event(commit->commit_wait, commit->state >= min_state);
0864 if (put)
0865 btrfs_put_transaction(commit);
0866
0867 if (min_state < TRANS_STATE_COMPLETED)
0868 break;
0869
0870
0871
0872
0873
0874
0875
0876
0877 spin_lock(&fs_info->trans_lock);
0878 commit = list_first_entry_or_null(&fs_info->trans_list,
0879 struct btrfs_transaction,
0880 list);
0881 if (!commit || commit->transid > transid) {
0882 spin_unlock(&fs_info->trans_lock);
0883 break;
0884 }
0885 refcount_inc(&commit->use_count);
0886 put = true;
0887 spin_unlock(&fs_info->trans_lock);
0888 }
0889 }
0890
0891 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
0892 {
0893 struct btrfs_transaction *cur_trans = NULL, *t;
0894 int ret = 0;
0895
0896 if (transid) {
0897 if (transid <= fs_info->last_trans_committed)
0898 goto out;
0899
0900
0901 spin_lock(&fs_info->trans_lock);
0902 list_for_each_entry(t, &fs_info->trans_list, list) {
0903 if (t->transid == transid) {
0904 cur_trans = t;
0905 refcount_inc(&cur_trans->use_count);
0906 ret = 0;
0907 break;
0908 }
0909 if (t->transid > transid) {
0910 ret = 0;
0911 break;
0912 }
0913 }
0914 spin_unlock(&fs_info->trans_lock);
0915
0916
0917
0918
0919
0920 if (!cur_trans) {
0921 if (transid > fs_info->last_trans_committed)
0922 ret = -EINVAL;
0923 goto out;
0924 }
0925 } else {
0926
0927 spin_lock(&fs_info->trans_lock);
0928 list_for_each_entry_reverse(t, &fs_info->trans_list,
0929 list) {
0930 if (t->state >= TRANS_STATE_COMMIT_START) {
0931 if (t->state == TRANS_STATE_COMPLETED)
0932 break;
0933 cur_trans = t;
0934 refcount_inc(&cur_trans->use_count);
0935 break;
0936 }
0937 }
0938 spin_unlock(&fs_info->trans_lock);
0939 if (!cur_trans)
0940 goto out;
0941 }
0942
0943 wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
0944 btrfs_put_transaction(cur_trans);
0945 out:
0946 return ret;
0947 }
0948
0949 void btrfs_throttle(struct btrfs_fs_info *fs_info)
0950 {
0951 wait_current_trans(fs_info);
0952 }
0953
0954 static bool should_end_transaction(struct btrfs_trans_handle *trans)
0955 {
0956 struct btrfs_fs_info *fs_info = trans->fs_info;
0957
0958 if (btrfs_check_space_for_delayed_refs(fs_info))
0959 return true;
0960
0961 return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
0962 }
0963
0964 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
0965 {
0966 struct btrfs_transaction *cur_trans = trans->transaction;
0967
0968 if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
0969 test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags))
0970 return true;
0971
0972 return should_end_transaction(trans);
0973 }
0974
0975 static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
0976
0977 {
0978 struct btrfs_fs_info *fs_info = trans->fs_info;
0979
0980 if (!trans->block_rsv) {
0981 ASSERT(!trans->bytes_reserved);
0982 return;
0983 }
0984
0985 if (!trans->bytes_reserved)
0986 return;
0987
0988 ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
0989 trace_btrfs_space_reservation(fs_info, "transaction",
0990 trans->transid, trans->bytes_reserved, 0);
0991 btrfs_block_rsv_release(fs_info, trans->block_rsv,
0992 trans->bytes_reserved, NULL);
0993 trans->bytes_reserved = 0;
0994 }
0995
0996 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
0997 int throttle)
0998 {
0999 struct btrfs_fs_info *info = trans->fs_info;
1000 struct btrfs_transaction *cur_trans = trans->transaction;
1001 int err = 0;
1002
1003 if (refcount_read(&trans->use_count) > 1) {
1004 refcount_dec(&trans->use_count);
1005 trans->block_rsv = trans->orig_rsv;
1006 return 0;
1007 }
1008
1009 btrfs_trans_release_metadata(trans);
1010 trans->block_rsv = NULL;
1011
1012 btrfs_create_pending_block_groups(trans);
1013
1014 btrfs_trans_release_chunk_metadata(trans);
1015
1016 if (trans->type & __TRANS_FREEZABLE)
1017 sb_end_intwrite(info->sb);
1018
1019 WARN_ON(cur_trans != info->running_transaction);
1020 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
1021 atomic_dec(&cur_trans->num_writers);
1022 extwriter_counter_dec(cur_trans, trans->type);
1023
1024 cond_wake_up(&cur_trans->writer_wait);
1025 btrfs_put_transaction(cur_trans);
1026
1027 if (current->journal_info == trans)
1028 current->journal_info = NULL;
1029
1030 if (throttle)
1031 btrfs_run_delayed_iputs(info);
1032
1033 if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
1034 wake_up_process(info->transaction_kthread);
1035 if (TRANS_ABORTED(trans))
1036 err = trans->aborted;
1037 else
1038 err = -EROFS;
1039 }
1040
1041 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1042 return err;
1043 }
1044
1045 int btrfs_end_transaction(struct btrfs_trans_handle *trans)
1046 {
1047 return __btrfs_end_transaction(trans, 0);
1048 }
1049
1050 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
1051 {
1052 return __btrfs_end_transaction(trans, 1);
1053 }
1054
1055
1056
1057
1058
1059
1060 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
1061 struct extent_io_tree *dirty_pages, int mark)
1062 {
1063 int err = 0;
1064 int werr = 0;
1065 struct address_space *mapping = fs_info->btree_inode->i_mapping;
1066 struct extent_state *cached_state = NULL;
1067 u64 start = 0;
1068 u64 end;
1069
1070 atomic_inc(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1071 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1072 mark, &cached_state)) {
1073 bool wait_writeback = false;
1074
1075 err = convert_extent_bit(dirty_pages, start, end,
1076 EXTENT_NEED_WAIT,
1077 mark, &cached_state);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 if (err == -ENOMEM) {
1092 err = 0;
1093 wait_writeback = true;
1094 }
1095 if (!err)
1096 err = filemap_fdatawrite_range(mapping, start, end);
1097 if (err)
1098 werr = err;
1099 else if (wait_writeback)
1100 werr = filemap_fdatawait_range(mapping, start, end);
1101 free_extent_state(cached_state);
1102 cached_state = NULL;
1103 cond_resched();
1104 start = end + 1;
1105 }
1106 atomic_dec(&BTRFS_I(fs_info->btree_inode)->sync_writers);
1107 return werr;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116 static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
1117 struct extent_io_tree *dirty_pages)
1118 {
1119 int err = 0;
1120 int werr = 0;
1121 struct address_space *mapping = fs_info->btree_inode->i_mapping;
1122 struct extent_state *cached_state = NULL;
1123 u64 start = 0;
1124 u64 end;
1125
1126 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
1127 EXTENT_NEED_WAIT, &cached_state)) {
1128
1129
1130
1131
1132
1133
1134
1135
1136 err = clear_extent_bit(dirty_pages, start, end,
1137 EXTENT_NEED_WAIT, 0, 0, &cached_state);
1138 if (err == -ENOMEM)
1139 err = 0;
1140 if (!err)
1141 err = filemap_fdatawait_range(mapping, start, end);
1142 if (err)
1143 werr = err;
1144 free_extent_state(cached_state);
1145 cached_state = NULL;
1146 cond_resched();
1147 start = end + 1;
1148 }
1149 if (err)
1150 werr = err;
1151 return werr;
1152 }
1153
1154 static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
1155 struct extent_io_tree *dirty_pages)
1156 {
1157 bool errors = false;
1158 int err;
1159
1160 err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1161 if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
1162 errors = true;
1163
1164 if (errors && !err)
1165 err = -EIO;
1166 return err;
1167 }
1168
1169 int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
1170 {
1171 struct btrfs_fs_info *fs_info = log_root->fs_info;
1172 struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
1173 bool errors = false;
1174 int err;
1175
1176 ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
1177
1178 err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
1179 if ((mark & EXTENT_DIRTY) &&
1180 test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
1181 errors = true;
1182
1183 if ((mark & EXTENT_NEW) &&
1184 test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
1185 errors = true;
1186
1187 if (errors && !err)
1188 err = -EIO;
1189 return err;
1190 }
1191
1192
1193
1194
1195
1196
1197
1198
1199 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans)
1200 {
1201 int ret;
1202 int ret2;
1203 struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages;
1204 struct btrfs_fs_info *fs_info = trans->fs_info;
1205 struct blk_plug plug;
1206
1207 blk_start_plug(&plug);
1208 ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY);
1209 blk_finish_plug(&plug);
1210 ret2 = btrfs_wait_extents(fs_info, dirty_pages);
1211
1212 extent_io_tree_release(&trans->transaction->dirty_pages);
1213
1214 if (ret)
1215 return ret;
1216 else if (ret2)
1217 return ret2;
1218 else
1219 return 0;
1220 }
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1233 struct btrfs_root *root)
1234 {
1235 int ret;
1236 u64 old_root_bytenr;
1237 u64 old_root_used;
1238 struct btrfs_fs_info *fs_info = root->fs_info;
1239 struct btrfs_root *tree_root = fs_info->tree_root;
1240
1241 old_root_used = btrfs_root_used(&root->root_item);
1242
1243 while (1) {
1244 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1245 if (old_root_bytenr == root->node->start &&
1246 old_root_used == btrfs_root_used(&root->root_item))
1247 break;
1248
1249 btrfs_set_root_node(&root->root_item, root->node);
1250 ret = btrfs_update_root(trans, tree_root,
1251 &root->root_key,
1252 &root->root_item);
1253 if (ret)
1254 return ret;
1255
1256 old_root_used = btrfs_root_used(&root->root_item);
1257 }
1258
1259 return 0;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
1270 {
1271 struct btrfs_fs_info *fs_info = trans->fs_info;
1272 struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1273 struct list_head *io_bgs = &trans->transaction->io_bgs;
1274 struct list_head *next;
1275 struct extent_buffer *eb;
1276 int ret;
1277
1278
1279
1280
1281
1282 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1283
1284 eb = btrfs_lock_root_node(fs_info->tree_root);
1285 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1286 0, &eb, BTRFS_NESTING_COW);
1287 btrfs_tree_unlock(eb);
1288 free_extent_buffer(eb);
1289
1290 if (ret)
1291 return ret;
1292
1293 ret = btrfs_run_dev_stats(trans);
1294 if (ret)
1295 return ret;
1296 ret = btrfs_run_dev_replace(trans);
1297 if (ret)
1298 return ret;
1299 ret = btrfs_run_qgroups(trans);
1300 if (ret)
1301 return ret;
1302
1303 ret = btrfs_setup_space_cache(trans);
1304 if (ret)
1305 return ret;
1306
1307 again:
1308 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1309 struct btrfs_root *root;
1310 next = fs_info->dirty_cowonly_roots.next;
1311 list_del_init(next);
1312 root = list_entry(next, struct btrfs_root, dirty_list);
1313 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1314
1315 list_add_tail(&root->dirty_list,
1316 &trans->transaction->switch_commits);
1317 ret = update_cowonly_root(trans, root);
1318 if (ret)
1319 return ret;
1320 }
1321
1322
1323 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1324 if (ret)
1325 return ret;
1326
1327 while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1328 ret = btrfs_write_dirty_block_groups(trans);
1329 if (ret)
1330 return ret;
1331
1332
1333
1334
1335
1336
1337
1338 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1339 if (ret)
1340 return ret;
1341 }
1342
1343 if (!list_empty(&fs_info->dirty_cowonly_roots))
1344 goto again;
1345
1346
1347 fs_info->dev_replace.committed_cursor_left =
1348 fs_info->dev_replace.cursor_left_last_write_of_item;
1349
1350 return 0;
1351 }
1352
1353
1354
1355
1356
1357 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
1358 {
1359
1360
1361
1362
1363
1364 spin_lock(&fs_info->trans_lock);
1365 if (!list_empty(&fs_info->dead_roots)) {
1366 struct btrfs_root *root = list_first_entry(&fs_info->dead_roots,
1367 struct btrfs_root,
1368 root_list);
1369 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) {
1370 spin_unlock(&fs_info->trans_lock);
1371 return;
1372 }
1373 }
1374 spin_unlock(&fs_info->trans_lock);
1375
1376 btrfs_wake_unfinished_drop(fs_info);
1377 }
1378
1379
1380
1381
1382
1383
1384 void btrfs_add_dead_root(struct btrfs_root *root)
1385 {
1386 struct btrfs_fs_info *fs_info = root->fs_info;
1387
1388 spin_lock(&fs_info->trans_lock);
1389 if (list_empty(&root->root_list)) {
1390 btrfs_grab_root(root);
1391
1392
1393 if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state))
1394 list_add(&root->root_list, &fs_info->dead_roots);
1395 else
1396 list_add_tail(&root->root_list, &fs_info->dead_roots);
1397 }
1398 spin_unlock(&fs_info->trans_lock);
1399 }
1400
1401
1402
1403
1404
1405 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
1406 {
1407 struct btrfs_fs_info *fs_info = trans->fs_info;
1408 struct btrfs_root *gang[8];
1409 int i;
1410 int ret;
1411
1412
1413
1414
1415
1416 ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING);
1417
1418 spin_lock(&fs_info->fs_roots_radix_lock);
1419 while (1) {
1420 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1421 (void **)gang, 0,
1422 ARRAY_SIZE(gang),
1423 BTRFS_ROOT_TRANS_TAG);
1424 if (ret == 0)
1425 break;
1426 for (i = 0; i < ret; i++) {
1427 struct btrfs_root *root = gang[i];
1428 int ret2;
1429
1430
1431
1432
1433
1434 ASSERT(atomic_read(&root->log_writers) == 0);
1435 ASSERT(atomic_read(&root->log_commit[0]) == 0);
1436 ASSERT(atomic_read(&root->log_commit[1]) == 0);
1437
1438 radix_tree_tag_clear(&fs_info->fs_roots_radix,
1439 (unsigned long)root->root_key.objectid,
1440 BTRFS_ROOT_TRANS_TAG);
1441 spin_unlock(&fs_info->fs_roots_radix_lock);
1442
1443 btrfs_free_log(trans, root);
1444 ret2 = btrfs_update_reloc_root(trans, root);
1445 if (ret2)
1446 return ret2;
1447
1448
1449 clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1450 smp_mb__after_atomic();
1451
1452 if (root->commit_root != root->node) {
1453 list_add_tail(&root->dirty_list,
1454 &trans->transaction->switch_commits);
1455 btrfs_set_root_node(&root->root_item,
1456 root->node);
1457 }
1458
1459 ret2 = btrfs_update_root(trans, fs_info->tree_root,
1460 &root->root_key,
1461 &root->root_item);
1462 if (ret2)
1463 return ret2;
1464 spin_lock(&fs_info->fs_roots_radix_lock);
1465 btrfs_qgroup_free_meta_all_pertrans(root);
1466 }
1467 }
1468 spin_unlock(&fs_info->fs_roots_radix_lock);
1469 return 0;
1470 }
1471
1472
1473
1474
1475
1476 int btrfs_defrag_root(struct btrfs_root *root)
1477 {
1478 struct btrfs_fs_info *info = root->fs_info;
1479 struct btrfs_trans_handle *trans;
1480 int ret;
1481
1482 if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1483 return 0;
1484
1485 while (1) {
1486 trans = btrfs_start_transaction(root, 0);
1487 if (IS_ERR(trans)) {
1488 ret = PTR_ERR(trans);
1489 break;
1490 }
1491
1492 ret = btrfs_defrag_leaves(trans, root);
1493
1494 btrfs_end_transaction(trans);
1495 btrfs_btree_balance_dirty(info);
1496 cond_resched();
1497
1498 if (btrfs_fs_closing(info) || ret != -EAGAIN)
1499 break;
1500
1501 if (btrfs_defrag_cancelled(info)) {
1502 btrfs_debug(info, "defrag_root cancelled");
1503 ret = -EAGAIN;
1504 break;
1505 }
1506 }
1507 clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1508 return ret;
1509 }
1510
1511
1512
1513
1514
1515
1516
1517
1518 static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
1519 struct btrfs_root *src,
1520 struct btrfs_root *parent,
1521 struct btrfs_qgroup_inherit *inherit,
1522 u64 dst_objectid)
1523 {
1524 struct btrfs_fs_info *fs_info = src->fs_info;
1525 int ret;
1526
1527
1528
1529
1530
1531
1532 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
1533 return 0;
1534
1535
1536
1537
1538
1539
1540
1541 ret = record_root_in_trans(trans, src, 1);
1542 if (ret)
1543 return ret;
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
1557 if (ret) {
1558 btrfs_abort_transaction(trans, ret);
1559 return ret;
1560 }
1561
1562 ret = commit_fs_roots(trans);
1563 if (ret)
1564 goto out;
1565 ret = btrfs_qgroup_account_extents(trans);
1566 if (ret < 0)
1567 goto out;
1568
1569
1570 ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
1571 inherit);
1572 if (ret < 0)
1573 goto out;
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587 ret = commit_cowonly_roots(trans);
1588 if (ret)
1589 goto out;
1590 switch_commit_roots(trans);
1591 ret = btrfs_write_and_wait_transaction(trans);
1592 if (ret)
1593 btrfs_handle_fs_error(fs_info, ret,
1594 "Error while writing out transaction for qgroup");
1595
1596 out:
1597
1598
1599
1600
1601
1602
1603 if (!ret)
1604 ret = record_root_in_trans(trans, parent, 1);
1605 return ret;
1606 }
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1618 struct btrfs_pending_snapshot *pending)
1619 {
1620
1621 struct btrfs_fs_info *fs_info = trans->fs_info;
1622 struct btrfs_key key;
1623 struct btrfs_root_item *new_root_item;
1624 struct btrfs_root *tree_root = fs_info->tree_root;
1625 struct btrfs_root *root = pending->root;
1626 struct btrfs_root *parent_root;
1627 struct btrfs_block_rsv *rsv;
1628 struct inode *parent_inode;
1629 struct btrfs_path *path;
1630 struct btrfs_dir_item *dir_item;
1631 struct dentry *dentry;
1632 struct extent_buffer *tmp;
1633 struct extent_buffer *old;
1634 struct timespec64 cur_time;
1635 int ret = 0;
1636 u64 to_reserve = 0;
1637 u64 index = 0;
1638 u64 objectid;
1639 u64 root_flags;
1640
1641 ASSERT(pending->path);
1642 path = pending->path;
1643
1644 ASSERT(pending->root_item);
1645 new_root_item = pending->root_item;
1646
1647 pending->error = btrfs_get_free_objectid(tree_root, &objectid);
1648 if (pending->error)
1649 goto no_free_objectid;
1650
1651
1652
1653
1654
1655 btrfs_set_skip_qgroup(trans, objectid);
1656
1657 btrfs_reloc_pre_snapshot(pending, &to_reserve);
1658
1659 if (to_reserve > 0) {
1660 pending->error = btrfs_block_rsv_add(fs_info,
1661 &pending->block_rsv,
1662 to_reserve,
1663 BTRFS_RESERVE_NO_FLUSH);
1664 if (pending->error)
1665 goto clear_skip_qgroup;
1666 }
1667
1668 key.objectid = objectid;
1669 key.offset = (u64)-1;
1670 key.type = BTRFS_ROOT_ITEM_KEY;
1671
1672 rsv = trans->block_rsv;
1673 trans->block_rsv = &pending->block_rsv;
1674 trans->bytes_reserved = trans->block_rsv->reserved;
1675 trace_btrfs_space_reservation(fs_info, "transaction",
1676 trans->transid,
1677 trans->bytes_reserved, 1);
1678 dentry = pending->dentry;
1679 parent_inode = pending->dir;
1680 parent_root = BTRFS_I(parent_inode)->root;
1681 ret = record_root_in_trans(trans, parent_root, 0);
1682 if (ret)
1683 goto fail;
1684 cur_time = current_time(parent_inode);
1685
1686
1687
1688
1689 ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index);
1690 BUG_ON(ret);
1691
1692
1693 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1694 btrfs_ino(BTRFS_I(parent_inode)),
1695 dentry->d_name.name,
1696 dentry->d_name.len, 0);
1697 if (dir_item != NULL && !IS_ERR(dir_item)) {
1698 pending->error = -EEXIST;
1699 goto dir_item_existed;
1700 } else if (IS_ERR(dir_item)) {
1701 ret = PTR_ERR(dir_item);
1702 btrfs_abort_transaction(trans, ret);
1703 goto fail;
1704 }
1705 btrfs_release_path(path);
1706
1707
1708
1709
1710
1711
1712
1713 ret = btrfs_run_delayed_items(trans);
1714 if (ret) {
1715 btrfs_abort_transaction(trans, ret);
1716 goto fail;
1717 }
1718
1719 ret = record_root_in_trans(trans, root, 0);
1720 if (ret) {
1721 btrfs_abort_transaction(trans, ret);
1722 goto fail;
1723 }
1724 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1725 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1726 btrfs_check_and_init_root_item(new_root_item);
1727
1728 root_flags = btrfs_root_flags(new_root_item);
1729 if (pending->readonly)
1730 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1731 else
1732 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1733 btrfs_set_root_flags(new_root_item, root_flags);
1734
1735 btrfs_set_root_generation_v2(new_root_item,
1736 trans->transid);
1737 generate_random_guid(new_root_item->uuid);
1738 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1739 BTRFS_UUID_SIZE);
1740 if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1741 memset(new_root_item->received_uuid, 0,
1742 sizeof(new_root_item->received_uuid));
1743 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1744 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1745 btrfs_set_root_stransid(new_root_item, 0);
1746 btrfs_set_root_rtransid(new_root_item, 0);
1747 }
1748 btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1749 btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1750 btrfs_set_root_otransid(new_root_item, trans->transid);
1751
1752 old = btrfs_lock_root_node(root);
1753 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old,
1754 BTRFS_NESTING_COW);
1755 if (ret) {
1756 btrfs_tree_unlock(old);
1757 free_extent_buffer(old);
1758 btrfs_abort_transaction(trans, ret);
1759 goto fail;
1760 }
1761
1762 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1763
1764 btrfs_tree_unlock(old);
1765 free_extent_buffer(old);
1766 if (ret) {
1767 btrfs_abort_transaction(trans, ret);
1768 goto fail;
1769 }
1770
1771 set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1772 smp_wmb();
1773
1774 btrfs_set_root_node(new_root_item, tmp);
1775
1776 key.offset = trans->transid;
1777 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1778 btrfs_tree_unlock(tmp);
1779 free_extent_buffer(tmp);
1780 if (ret) {
1781 btrfs_abort_transaction(trans, ret);
1782 goto fail;
1783 }
1784
1785
1786
1787
1788 ret = btrfs_add_root_ref(trans, objectid,
1789 parent_root->root_key.objectid,
1790 btrfs_ino(BTRFS_I(parent_inode)), index,
1791 dentry->d_name.name, dentry->d_name.len);
1792 if (ret) {
1793 btrfs_abort_transaction(trans, ret);
1794 goto fail;
1795 }
1796
1797 key.offset = (u64)-1;
1798 pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
1799 if (IS_ERR(pending->snap)) {
1800 ret = PTR_ERR(pending->snap);
1801 pending->snap = NULL;
1802 btrfs_abort_transaction(trans, ret);
1803 goto fail;
1804 }
1805
1806 ret = btrfs_reloc_post_snapshot(trans, pending);
1807 if (ret) {
1808 btrfs_abort_transaction(trans, ret);
1809 goto fail;
1810 }
1811
1812
1813
1814
1815
1816
1817
1818 ret = qgroup_account_snapshot(trans, root, parent_root,
1819 pending->inherit, objectid);
1820 if (ret < 0)
1821 goto fail;
1822
1823 ret = btrfs_insert_dir_item(trans, dentry->d_name.name,
1824 dentry->d_name.len, BTRFS_I(parent_inode),
1825 &key, BTRFS_FT_DIR, index);
1826
1827 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1828 if (ret) {
1829 btrfs_abort_transaction(trans, ret);
1830 goto fail;
1831 }
1832
1833 btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
1834 dentry->d_name.len * 2);
1835 parent_inode->i_mtime = current_time(parent_inode);
1836 parent_inode->i_ctime = parent_inode->i_mtime;
1837 ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
1838 if (ret) {
1839 btrfs_abort_transaction(trans, ret);
1840 goto fail;
1841 }
1842 ret = btrfs_uuid_tree_add(trans, new_root_item->uuid,
1843 BTRFS_UUID_KEY_SUBVOL,
1844 objectid);
1845 if (ret) {
1846 btrfs_abort_transaction(trans, ret);
1847 goto fail;
1848 }
1849 if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1850 ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
1851 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1852 objectid);
1853 if (ret && ret != -EEXIST) {
1854 btrfs_abort_transaction(trans, ret);
1855 goto fail;
1856 }
1857 }
1858
1859 fail:
1860 pending->error = ret;
1861 dir_item_existed:
1862 trans->block_rsv = rsv;
1863 trans->bytes_reserved = 0;
1864 clear_skip_qgroup:
1865 btrfs_clear_skip_qgroup(trans);
1866 no_free_objectid:
1867 kfree(new_root_item);
1868 pending->root_item = NULL;
1869 btrfs_free_path(path);
1870 pending->path = NULL;
1871
1872 return ret;
1873 }
1874
1875
1876
1877
1878 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans)
1879 {
1880 struct btrfs_pending_snapshot *pending, *next;
1881 struct list_head *head = &trans->transaction->pending_snapshots;
1882 int ret = 0;
1883
1884 list_for_each_entry_safe(pending, next, head, list) {
1885 list_del(&pending->list);
1886 ret = create_pending_snapshot(trans, pending);
1887 if (ret)
1888 break;
1889 }
1890 return ret;
1891 }
1892
1893 static void update_super_roots(struct btrfs_fs_info *fs_info)
1894 {
1895 struct btrfs_root_item *root_item;
1896 struct btrfs_super_block *super;
1897
1898 super = fs_info->super_copy;
1899
1900 root_item = &fs_info->chunk_root->root_item;
1901 super->chunk_root = root_item->bytenr;
1902 super->chunk_root_generation = root_item->generation;
1903 super->chunk_root_level = root_item->level;
1904
1905 root_item = &fs_info->tree_root->root_item;
1906 super->root = root_item->bytenr;
1907 super->generation = root_item->generation;
1908 super->root_level = root_item->level;
1909 if (btrfs_test_opt(fs_info, SPACE_CACHE))
1910 super->cache_generation = root_item->generation;
1911 else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags))
1912 super->cache_generation = 0;
1913 if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
1914 super->uuid_tree_generation = root_item->generation;
1915
1916 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
1917 root_item = &fs_info->block_group_root->root_item;
1918
1919 super->block_group_root = root_item->bytenr;
1920 super->block_group_root_generation = root_item->generation;
1921 super->block_group_root_level = root_item->level;
1922 }
1923 }
1924
1925 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1926 {
1927 struct btrfs_transaction *trans;
1928 int ret = 0;
1929
1930 spin_lock(&info->trans_lock);
1931 trans = info->running_transaction;
1932 if (trans)
1933 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1934 spin_unlock(&info->trans_lock);
1935 return ret;
1936 }
1937
1938 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1939 {
1940 struct btrfs_transaction *trans;
1941 int ret = 0;
1942
1943 spin_lock(&info->trans_lock);
1944 trans = info->running_transaction;
1945 if (trans)
1946 ret = is_transaction_blocked(trans);
1947 spin_unlock(&info->trans_lock);
1948 return ret;
1949 }
1950
1951 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans)
1952 {
1953 struct btrfs_fs_info *fs_info = trans->fs_info;
1954 struct btrfs_transaction *cur_trans;
1955
1956
1957 set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
1958 wake_up_process(fs_info->transaction_kthread);
1959
1960
1961 cur_trans = trans->transaction;
1962 refcount_inc(&cur_trans->use_count);
1963
1964 btrfs_end_transaction(trans);
1965
1966
1967
1968
1969
1970 wait_event(fs_info->transaction_blocked_wait,
1971 cur_trans->state >= TRANS_STATE_COMMIT_START ||
1972 TRANS_ABORTED(cur_trans));
1973 btrfs_put_transaction(cur_trans);
1974 }
1975
1976 static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
1977 {
1978 struct btrfs_fs_info *fs_info = trans->fs_info;
1979 struct btrfs_transaction *cur_trans = trans->transaction;
1980
1981 WARN_ON(refcount_read(&trans->use_count) > 1);
1982
1983 btrfs_abort_transaction(trans, err);
1984
1985 spin_lock(&fs_info->trans_lock);
1986
1987
1988
1989
1990
1991
1992 BUG_ON(list_empty(&cur_trans->list));
1993
1994 if (cur_trans == fs_info->running_transaction) {
1995 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1996 spin_unlock(&fs_info->trans_lock);
1997 wait_event(cur_trans->writer_wait,
1998 atomic_read(&cur_trans->num_writers) == 1);
1999
2000 spin_lock(&fs_info->trans_lock);
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 list_del_init(&cur_trans->list);
2012
2013 spin_unlock(&fs_info->trans_lock);
2014
2015 btrfs_cleanup_one_transaction(trans->transaction, fs_info);
2016
2017 spin_lock(&fs_info->trans_lock);
2018 if (cur_trans == fs_info->running_transaction)
2019 fs_info->running_transaction = NULL;
2020 spin_unlock(&fs_info->trans_lock);
2021
2022 if (trans->type & __TRANS_FREEZABLE)
2023 sb_end_intwrite(fs_info->sb);
2024 btrfs_put_transaction(cur_trans);
2025 btrfs_put_transaction(cur_trans);
2026
2027 trace_btrfs_transaction_commit(fs_info);
2028
2029 if (current->journal_info == trans)
2030 current->journal_info = NULL;
2031 btrfs_scrub_cancel(fs_info);
2032
2033 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2034 }
2035
2036
2037
2038
2039
2040 static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
2041 {
2042 struct btrfs_fs_info *fs_info = trans->fs_info;
2043 struct btrfs_block_group *block_group, *tmp;
2044
2045 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
2046 btrfs_delayed_refs_rsv_release(fs_info, 1);
2047 list_del_init(&block_group->bg_list);
2048 }
2049 }
2050
2051 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
2052 {
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2071 try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
2072 return 0;
2073 }
2074
2075 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
2076 {
2077 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
2078 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 static void add_pending_snapshot(struct btrfs_trans_handle *trans)
2090 {
2091 struct btrfs_transaction *cur_trans = trans->transaction;
2092
2093 if (!trans->pending_snapshot)
2094 return;
2095
2096 lockdep_assert_held(&trans->fs_info->trans_lock);
2097 ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
2098
2099 list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
2100 }
2101
2102 static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval)
2103 {
2104 fs_info->commit_stats.commit_count++;
2105 fs_info->commit_stats.last_commit_dur = interval;
2106 fs_info->commit_stats.max_commit_dur =
2107 max_t(u64, fs_info->commit_stats.max_commit_dur, interval);
2108 fs_info->commit_stats.total_commit_dur += interval;
2109 }
2110
2111 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2112 {
2113 struct btrfs_fs_info *fs_info = trans->fs_info;
2114 struct btrfs_transaction *cur_trans = trans->transaction;
2115 struct btrfs_transaction *prev_trans = NULL;
2116 int ret;
2117 ktime_t start_time;
2118 ktime_t interval;
2119
2120 ASSERT(refcount_read(&trans->use_count) == 1);
2121
2122
2123 if (TRANS_ABORTED(cur_trans)) {
2124 ret = cur_trans->aborted;
2125 btrfs_end_transaction(trans);
2126 return ret;
2127 }
2128
2129 btrfs_trans_release_metadata(trans);
2130 trans->block_rsv = NULL;
2131
2132
2133
2134
2135
2136 if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
2137 &cur_trans->delayed_refs.flags)) {
2138
2139
2140
2141
2142 ret = btrfs_run_delayed_refs(trans, 0);
2143 if (ret) {
2144 btrfs_end_transaction(trans);
2145 return ret;
2146 }
2147 }
2148
2149 btrfs_create_pending_block_groups(trans);
2150
2151 if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
2152 int run_it = 0;
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167 mutex_lock(&fs_info->ro_block_group_mutex);
2168 if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
2169 &cur_trans->flags))
2170 run_it = 1;
2171 mutex_unlock(&fs_info->ro_block_group_mutex);
2172
2173 if (run_it) {
2174 ret = btrfs_start_dirty_block_groups(trans);
2175 if (ret) {
2176 btrfs_end_transaction(trans);
2177 return ret;
2178 }
2179 }
2180 }
2181
2182 spin_lock(&fs_info->trans_lock);
2183 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
2184 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2185
2186 add_pending_snapshot(trans);
2187
2188 spin_unlock(&fs_info->trans_lock);
2189 refcount_inc(&cur_trans->use_count);
2190
2191 if (trans->in_fsync)
2192 want_state = TRANS_STATE_SUPER_COMMITTED;
2193 ret = btrfs_end_transaction(trans);
2194 wait_for_commit(cur_trans, want_state);
2195
2196 if (TRANS_ABORTED(cur_trans))
2197 ret = cur_trans->aborted;
2198
2199 btrfs_put_transaction(cur_trans);
2200
2201 return ret;
2202 }
2203
2204 cur_trans->state = TRANS_STATE_COMMIT_START;
2205 wake_up(&fs_info->transaction_blocked_wait);
2206
2207 if (cur_trans->list.prev != &fs_info->trans_list) {
2208 enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
2209
2210 if (trans->in_fsync)
2211 want_state = TRANS_STATE_SUPER_COMMITTED;
2212
2213 prev_trans = list_entry(cur_trans->list.prev,
2214 struct btrfs_transaction, list);
2215 if (prev_trans->state < want_state) {
2216 refcount_inc(&prev_trans->use_count);
2217 spin_unlock(&fs_info->trans_lock);
2218
2219 wait_for_commit(prev_trans, want_state);
2220
2221 ret = READ_ONCE(prev_trans->aborted);
2222
2223 btrfs_put_transaction(prev_trans);
2224 if (ret)
2225 goto cleanup_transaction;
2226 } else {
2227 spin_unlock(&fs_info->trans_lock);
2228 }
2229 } else {
2230 spin_unlock(&fs_info->trans_lock);
2231
2232
2233
2234
2235
2236
2237 if (BTRFS_FS_ERROR(fs_info)) {
2238 ret = -EROFS;
2239 goto cleanup_transaction;
2240 }
2241 }
2242
2243
2244
2245
2246
2247 start_time = ktime_get_ns();
2248
2249 extwriter_counter_dec(cur_trans, trans->type);
2250
2251 ret = btrfs_start_delalloc_flush(fs_info);
2252 if (ret)
2253 goto cleanup_transaction;
2254
2255 ret = btrfs_run_delayed_items(trans);
2256 if (ret)
2257 goto cleanup_transaction;
2258
2259 wait_event(cur_trans->writer_wait,
2260 extwriter_counter_read(cur_trans) == 0);
2261
2262
2263 ret = btrfs_run_delayed_items(trans);
2264 if (ret)
2265 goto cleanup_transaction;
2266
2267 btrfs_wait_delalloc_flush(fs_info);
2268
2269
2270
2271
2272
2273
2274 wait_event(cur_trans->pending_wait,
2275 atomic_read(&cur_trans->pending_ordered) == 0);
2276
2277 btrfs_scrub_pause(fs_info);
2278
2279
2280
2281
2282
2283 spin_lock(&fs_info->trans_lock);
2284 add_pending_snapshot(trans);
2285 cur_trans->state = TRANS_STATE_COMMIT_DOING;
2286 spin_unlock(&fs_info->trans_lock);
2287 wait_event(cur_trans->writer_wait,
2288 atomic_read(&cur_trans->num_writers) == 1);
2289
2290
2291
2292
2293
2294
2295 clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags);
2296
2297 if (TRANS_ABORTED(cur_trans)) {
2298 ret = cur_trans->aborted;
2299 goto scrub_continue;
2300 }
2301
2302
2303
2304
2305
2306 mutex_lock(&fs_info->reloc_mutex);
2307
2308
2309
2310
2311
2312
2313 ret = create_pending_snapshots(trans);
2314 if (ret)
2315 goto unlock_reloc;
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327 ret = btrfs_run_delayed_items(trans);
2328 if (ret)
2329 goto unlock_reloc;
2330
2331 ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
2332 if (ret)
2333 goto unlock_reloc;
2334
2335
2336
2337
2338
2339 btrfs_assert_delayed_root_empty(fs_info);
2340
2341 WARN_ON(cur_trans != trans->transaction);
2342
2343 ret = commit_fs_roots(trans);
2344 if (ret)
2345 goto unlock_reloc;
2346
2347
2348
2349
2350
2351 btrfs_apply_pending_changes(fs_info);
2352
2353
2354
2355
2356 btrfs_free_log_root_tree(trans, fs_info);
2357
2358
2359
2360
2361
2362 ret = btrfs_qgroup_account_extents(trans);
2363 if (ret < 0)
2364 goto unlock_reloc;
2365
2366 ret = commit_cowonly_roots(trans);
2367 if (ret)
2368 goto unlock_reloc;
2369
2370
2371
2372
2373
2374 if (TRANS_ABORTED(cur_trans)) {
2375 ret = cur_trans->aborted;
2376 goto unlock_reloc;
2377 }
2378
2379 cur_trans = fs_info->running_transaction;
2380
2381 btrfs_set_root_node(&fs_info->tree_root->root_item,
2382 fs_info->tree_root->node);
2383 list_add_tail(&fs_info->tree_root->dirty_list,
2384 &cur_trans->switch_commits);
2385
2386 btrfs_set_root_node(&fs_info->chunk_root->root_item,
2387 fs_info->chunk_root->node);
2388 list_add_tail(&fs_info->chunk_root->dirty_list,
2389 &cur_trans->switch_commits);
2390
2391 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2392 btrfs_set_root_node(&fs_info->block_group_root->root_item,
2393 fs_info->block_group_root->node);
2394 list_add_tail(&fs_info->block_group_root->dirty_list,
2395 &cur_trans->switch_commits);
2396 }
2397
2398 switch_commit_roots(trans);
2399
2400 ASSERT(list_empty(&cur_trans->dirty_bgs));
2401 ASSERT(list_empty(&cur_trans->io_bgs));
2402 update_super_roots(fs_info);
2403
2404 btrfs_set_super_log_root(fs_info->super_copy, 0);
2405 btrfs_set_super_log_root_level(fs_info->super_copy, 0);
2406 memcpy(fs_info->super_for_commit, fs_info->super_copy,
2407 sizeof(*fs_info->super_copy));
2408
2409 btrfs_commit_device_sizes(cur_trans);
2410
2411 clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
2412 clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
2413
2414 btrfs_trans_release_chunk_metadata(trans);
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424 mutex_lock(&fs_info->tree_log_mutex);
2425
2426 spin_lock(&fs_info->trans_lock);
2427 cur_trans->state = TRANS_STATE_UNBLOCKED;
2428 fs_info->running_transaction = NULL;
2429 spin_unlock(&fs_info->trans_lock);
2430 mutex_unlock(&fs_info->reloc_mutex);
2431
2432 wake_up(&fs_info->transaction_wait);
2433
2434 ret = btrfs_write_and_wait_transaction(trans);
2435 if (ret) {
2436 btrfs_handle_fs_error(fs_info, ret,
2437 "Error while writing out transaction");
2438 mutex_unlock(&fs_info->tree_log_mutex);
2439 goto scrub_continue;
2440 }
2441
2442
2443
2444
2445
2446
2447 btrfs_free_redirty_list(cur_trans);
2448
2449 ret = write_all_supers(fs_info, 0);
2450
2451
2452
2453
2454 mutex_unlock(&fs_info->tree_log_mutex);
2455 if (ret)
2456 goto scrub_continue;
2457
2458
2459
2460
2461
2462 cur_trans->state = TRANS_STATE_SUPER_COMMITTED;
2463 wake_up(&cur_trans->commit_wait);
2464
2465 btrfs_finish_extent_commit(trans);
2466
2467 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
2468 btrfs_clear_space_info_full(fs_info);
2469
2470 fs_info->last_trans_committed = cur_trans->transid;
2471
2472
2473
2474
2475 cur_trans->state = TRANS_STATE_COMPLETED;
2476 wake_up(&cur_trans->commit_wait);
2477
2478 spin_lock(&fs_info->trans_lock);
2479 list_del_init(&cur_trans->list);
2480 spin_unlock(&fs_info->trans_lock);
2481
2482 btrfs_put_transaction(cur_trans);
2483 btrfs_put_transaction(cur_trans);
2484
2485 if (trans->type & __TRANS_FREEZABLE)
2486 sb_end_intwrite(fs_info->sb);
2487
2488 trace_btrfs_transaction_commit(fs_info);
2489
2490 interval = ktime_get_ns() - start_time;
2491
2492 btrfs_scrub_continue(fs_info);
2493
2494 if (current->journal_info == trans)
2495 current->journal_info = NULL;
2496
2497 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2498
2499 update_commit_stats(fs_info, interval);
2500
2501 return ret;
2502
2503 unlock_reloc:
2504 mutex_unlock(&fs_info->reloc_mutex);
2505 scrub_continue:
2506 btrfs_scrub_continue(fs_info);
2507 cleanup_transaction:
2508 btrfs_trans_release_metadata(trans);
2509 btrfs_cleanup_pending_block_groups(trans);
2510 btrfs_trans_release_chunk_metadata(trans);
2511 trans->block_rsv = NULL;
2512 btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
2513 if (current->journal_info == trans)
2514 current->journal_info = NULL;
2515 cleanup_transaction(trans, ret);
2516
2517 return ret;
2518 }
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
2531 {
2532 struct btrfs_root *root;
2533 int ret;
2534
2535 spin_lock(&fs_info->trans_lock);
2536 if (list_empty(&fs_info->dead_roots)) {
2537 spin_unlock(&fs_info->trans_lock);
2538 return 0;
2539 }
2540 root = list_first_entry(&fs_info->dead_roots,
2541 struct btrfs_root, root_list);
2542 list_del_init(&root->root_list);
2543 spin_unlock(&fs_info->trans_lock);
2544
2545 btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
2546
2547 btrfs_kill_all_delayed_nodes(root);
2548
2549 if (btrfs_header_backref_rev(root->node) <
2550 BTRFS_MIXED_BACKREF_REV)
2551 ret = btrfs_drop_snapshot(root, 0, 0);
2552 else
2553 ret = btrfs_drop_snapshot(root, 1, 0);
2554
2555 btrfs_put_root(root);
2556 return (ret < 0) ? 0 : 1;
2557 }
2558
2559 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2560 {
2561 unsigned long prev;
2562 unsigned long bit;
2563
2564 prev = xchg(&fs_info->pending_changes, 0);
2565 if (!prev)
2566 return;
2567
2568 bit = 1 << BTRFS_PENDING_COMMIT;
2569 if (prev & bit)
2570 btrfs_debug(fs_info, "pending commit done");
2571 prev &= ~bit;
2572
2573 if (prev)
2574 btrfs_warn(fs_info,
2575 "unknown pending changes left 0x%lx, ignoring", prev);
2576 }