0001
0002
0003 #include "misc.h"
0004 #include "ctree.h"
0005 #include "space-info.h"
0006 #include "sysfs.h"
0007 #include "volumes.h"
0008 #include "free-space-cache.h"
0009 #include "ordered-data.h"
0010 #include "transaction.h"
0011 #include "block-group.h"
0012 #include "zoned.h"
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
0163 bool may_use_included)
0164 {
0165 ASSERT(s_info);
0166 return s_info->bytes_used + s_info->bytes_reserved +
0167 s_info->bytes_pinned + s_info->bytes_readonly +
0168 s_info->bytes_zone_unusable +
0169 (may_use_included ? s_info->bytes_may_use : 0);
0170 }
0171
0172
0173
0174
0175
0176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
0177 {
0178 struct list_head *head = &info->space_info;
0179 struct btrfs_space_info *found;
0180
0181 list_for_each_entry(found, head, list)
0182 found->full = 0;
0183 }
0184
0185
0186
0187
0188
0189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
0190
0191
0192
0193
0194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
0195 {
0196 if (btrfs_is_zoned(fs_info))
0197 return fs_info->zone_size;
0198
0199 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
0200
0201 if (flags & BTRFS_BLOCK_GROUP_DATA)
0202 return BTRFS_MAX_DATA_CHUNK_SIZE;
0203 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
0204 return SZ_32M;
0205
0206
0207 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
0208 return SZ_1G;
0209
0210 return SZ_256M;
0211 }
0212
0213
0214
0215
0216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
0217 u64 chunk_size)
0218 {
0219 WRITE_ONCE(space_info->chunk_size, chunk_size);
0220 }
0221
0222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
0223 {
0224
0225 struct btrfs_space_info *space_info;
0226 int i;
0227 int ret;
0228
0229 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
0230 if (!space_info)
0231 return -ENOMEM;
0232
0233 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
0234 INIT_LIST_HEAD(&space_info->block_groups[i]);
0235 init_rwsem(&space_info->groups_sem);
0236 spin_lock_init(&space_info->lock);
0237 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
0238 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
0239 INIT_LIST_HEAD(&space_info->ro_bgs);
0240 INIT_LIST_HEAD(&space_info->tickets);
0241 INIT_LIST_HEAD(&space_info->priority_tickets);
0242 space_info->clamp = 1;
0243 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
0244
0245 if (btrfs_is_zoned(info))
0246 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
0247
0248 ret = btrfs_sysfs_add_space_info_type(info, space_info);
0249 if (ret)
0250 return ret;
0251
0252 list_add(&space_info->list, &info->space_info);
0253 if (flags & BTRFS_BLOCK_GROUP_DATA)
0254 info->data_sinfo = space_info;
0255
0256 return ret;
0257 }
0258
0259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
0260 {
0261 struct btrfs_super_block *disk_super;
0262 u64 features;
0263 u64 flags;
0264 int mixed = 0;
0265 int ret;
0266
0267 disk_super = fs_info->super_copy;
0268 if (!btrfs_super_root(disk_super))
0269 return -EINVAL;
0270
0271 features = btrfs_super_incompat_flags(disk_super);
0272 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
0273 mixed = 1;
0274
0275 flags = BTRFS_BLOCK_GROUP_SYSTEM;
0276 ret = create_space_info(fs_info, flags);
0277 if (ret)
0278 goto out;
0279
0280 if (mixed) {
0281 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
0282 ret = create_space_info(fs_info, flags);
0283 } else {
0284 flags = BTRFS_BLOCK_GROUP_METADATA;
0285 ret = create_space_info(fs_info, flags);
0286 if (ret)
0287 goto out;
0288
0289 flags = BTRFS_BLOCK_GROUP_DATA;
0290 ret = create_space_info(fs_info, flags);
0291 }
0292 out:
0293 return ret;
0294 }
0295
0296 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
0297 u64 total_bytes, u64 bytes_used,
0298 u64 bytes_readonly, u64 bytes_zone_unusable,
0299 bool active, struct btrfs_space_info **space_info)
0300 {
0301 struct btrfs_space_info *found;
0302 int factor;
0303
0304 factor = btrfs_bg_type_to_factor(flags);
0305
0306 found = btrfs_find_space_info(info, flags);
0307 ASSERT(found);
0308 spin_lock(&found->lock);
0309 found->total_bytes += total_bytes;
0310 if (active)
0311 found->active_total_bytes += total_bytes;
0312 found->disk_total += total_bytes * factor;
0313 found->bytes_used += bytes_used;
0314 found->disk_used += bytes_used * factor;
0315 found->bytes_readonly += bytes_readonly;
0316 found->bytes_zone_unusable += bytes_zone_unusable;
0317 if (total_bytes > 0)
0318 found->full = 0;
0319 btrfs_try_granting_tickets(info, found);
0320 spin_unlock(&found->lock);
0321 *space_info = found;
0322 }
0323
0324 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
0325 u64 flags)
0326 {
0327 struct list_head *head = &info->space_info;
0328 struct btrfs_space_info *found;
0329
0330 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
0331
0332 list_for_each_entry(found, head, list) {
0333 if (found->flags & flags)
0334 return found;
0335 }
0336 return NULL;
0337 }
0338
0339 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
0340 struct btrfs_space_info *space_info,
0341 enum btrfs_reserve_flush_enum flush)
0342 {
0343 u64 profile;
0344 u64 avail;
0345 int factor;
0346
0347 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
0348 profile = btrfs_system_alloc_profile(fs_info);
0349 else
0350 profile = btrfs_metadata_alloc_profile(fs_info);
0351
0352 avail = atomic64_read(&fs_info->free_chunk_space);
0353
0354
0355
0356
0357
0358
0359
0360 factor = btrfs_bg_type_to_factor(profile);
0361 avail = div_u64(avail, factor);
0362
0363
0364
0365
0366
0367
0368 if (flush == BTRFS_RESERVE_FLUSH_ALL)
0369 avail >>= 3;
0370 else
0371 avail >>= 1;
0372 return avail;
0373 }
0374
0375 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
0376 struct btrfs_space_info *space_info)
0377 {
0378
0379
0380
0381
0382
0383
0384
0385 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
0386 return space_info->total_bytes;
0387
0388 return space_info->active_total_bytes;
0389 }
0390
0391 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
0392 struct btrfs_space_info *space_info, u64 bytes,
0393 enum btrfs_reserve_flush_enum flush)
0394 {
0395 u64 avail;
0396 u64 used;
0397
0398
0399 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
0400 return 0;
0401
0402 used = btrfs_space_info_used(space_info, true);
0403 if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
0404 avail = 0;
0405 else
0406 avail = calc_available_free_space(fs_info, space_info, flush);
0407
0408 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
0409 return 1;
0410 return 0;
0411 }
0412
0413 static void remove_ticket(struct btrfs_space_info *space_info,
0414 struct reserve_ticket *ticket)
0415 {
0416 if (!list_empty(&ticket->list)) {
0417 list_del_init(&ticket->list);
0418 ASSERT(space_info->reclaim_size >= ticket->bytes);
0419 space_info->reclaim_size -= ticket->bytes;
0420 }
0421 }
0422
0423
0424
0425
0426
0427 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
0428 struct btrfs_space_info *space_info)
0429 {
0430 struct list_head *head;
0431 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
0432
0433 lockdep_assert_held(&space_info->lock);
0434
0435 head = &space_info->priority_tickets;
0436 again:
0437 while (!list_empty(head)) {
0438 struct reserve_ticket *ticket;
0439 u64 used = btrfs_space_info_used(space_info, true);
0440
0441 ticket = list_first_entry(head, struct reserve_ticket, list);
0442
0443
0444 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
0445 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
0446 flush)) {
0447 btrfs_space_info_update_bytes_may_use(fs_info,
0448 space_info,
0449 ticket->bytes);
0450 remove_ticket(space_info, ticket);
0451 ticket->bytes = 0;
0452 space_info->tickets_id++;
0453 wake_up(&ticket->wait);
0454 } else {
0455 break;
0456 }
0457 }
0458
0459 if (head == &space_info->priority_tickets) {
0460 head = &space_info->tickets;
0461 flush = BTRFS_RESERVE_FLUSH_ALL;
0462 goto again;
0463 }
0464 }
0465
0466 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
0467 do { \
0468 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
0469 spin_lock(&__rsv->lock); \
0470 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
0471 __rsv->size, __rsv->reserved); \
0472 spin_unlock(&__rsv->lock); \
0473 } while (0)
0474
0475 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
0476 struct btrfs_space_info *info)
0477 {
0478 lockdep_assert_held(&info->lock);
0479
0480
0481 btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
0482 info->flags,
0483 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
0484 info->full ? "" : "not ");
0485 btrfs_info(fs_info,
0486 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
0487 info->total_bytes, info->bytes_used, info->bytes_pinned,
0488 info->bytes_reserved, info->bytes_may_use,
0489 info->bytes_readonly, info->bytes_zone_unusable);
0490
0491 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
0492 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
0493 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
0494 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
0495 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
0496
0497 }
0498
0499 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
0500 struct btrfs_space_info *info, u64 bytes,
0501 int dump_block_groups)
0502 {
0503 struct btrfs_block_group *cache;
0504 int index = 0;
0505
0506 spin_lock(&info->lock);
0507 __btrfs_dump_space_info(fs_info, info);
0508 spin_unlock(&info->lock);
0509
0510 if (!dump_block_groups)
0511 return;
0512
0513 down_read(&info->groups_sem);
0514 again:
0515 list_for_each_entry(cache, &info->block_groups[index], list) {
0516 spin_lock(&cache->lock);
0517 btrfs_info(fs_info,
0518 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
0519 cache->start, cache->length, cache->used, cache->pinned,
0520 cache->reserved, cache->zone_unusable,
0521 cache->ro ? "[readonly]" : "");
0522 spin_unlock(&cache->lock);
0523 btrfs_dump_free_space(cache, bytes);
0524 }
0525 if (++index < BTRFS_NR_RAID_TYPES)
0526 goto again;
0527 up_read(&info->groups_sem);
0528 }
0529
0530 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
0531 u64 to_reclaim)
0532 {
0533 u64 bytes;
0534 u64 nr;
0535
0536 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
0537 nr = div64_u64(to_reclaim, bytes);
0538 if (!nr)
0539 nr = 1;
0540 return nr;
0541 }
0542
0543 #define EXTENT_SIZE_PER_ITEM SZ_256K
0544
0545
0546
0547
0548 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
0549 struct btrfs_space_info *space_info,
0550 u64 to_reclaim, bool wait_ordered,
0551 bool for_preempt)
0552 {
0553 struct btrfs_trans_handle *trans;
0554 u64 delalloc_bytes;
0555 u64 ordered_bytes;
0556 u64 items;
0557 long time_left;
0558 int loops;
0559
0560 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
0561 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
0562 if (delalloc_bytes == 0 && ordered_bytes == 0)
0563 return;
0564
0565
0566 if (to_reclaim == U64_MAX) {
0567 items = U64_MAX;
0568 } else {
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
0582 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
0583 }
0584
0585 trans = current->journal_info;
0586
0587
0588
0589
0590
0591
0592 if (ordered_bytes > delalloc_bytes && !for_preempt)
0593 wait_ordered = true;
0594
0595 loops = 0;
0596 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
0597 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
0598 long nr_pages = min_t(u64, temp, LONG_MAX);
0599 int async_pages;
0600
0601 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624 async_pages = atomic_read(&fs_info->async_delalloc_pages);
0625 if (!async_pages)
0626 goto skip_async;
0627
0628
0629
0630
0631
0632
0633
0634 if (async_pages > nr_pages)
0635 async_pages -= nr_pages;
0636 else
0637 async_pages = 0;
0638 wait_event(fs_info->async_submit_wait,
0639 atomic_read(&fs_info->async_delalloc_pages) <=
0640 async_pages);
0641 skip_async:
0642 loops++;
0643 if (wait_ordered && !trans) {
0644 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
0645 } else {
0646 time_left = schedule_timeout_killable(1);
0647 if (time_left)
0648 break;
0649 }
0650
0651
0652
0653
0654
0655
0656 if (for_preempt)
0657 break;
0658
0659 spin_lock(&space_info->lock);
0660 if (list_empty(&space_info->tickets) &&
0661 list_empty(&space_info->priority_tickets)) {
0662 spin_unlock(&space_info->lock);
0663 break;
0664 }
0665 spin_unlock(&space_info->lock);
0666
0667 delalloc_bytes = percpu_counter_sum_positive(
0668 &fs_info->delalloc_bytes);
0669 ordered_bytes = percpu_counter_sum_positive(
0670 &fs_info->ordered_bytes);
0671 }
0672 }
0673
0674
0675
0676
0677
0678
0679 static void flush_space(struct btrfs_fs_info *fs_info,
0680 struct btrfs_space_info *space_info, u64 num_bytes,
0681 enum btrfs_flush_state state, bool for_preempt)
0682 {
0683 struct btrfs_root *root = fs_info->tree_root;
0684 struct btrfs_trans_handle *trans;
0685 int nr;
0686 int ret = 0;
0687
0688 switch (state) {
0689 case FLUSH_DELAYED_ITEMS_NR:
0690 case FLUSH_DELAYED_ITEMS:
0691 if (state == FLUSH_DELAYED_ITEMS_NR)
0692 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
0693 else
0694 nr = -1;
0695
0696 trans = btrfs_join_transaction(root);
0697 if (IS_ERR(trans)) {
0698 ret = PTR_ERR(trans);
0699 break;
0700 }
0701 ret = btrfs_run_delayed_items_nr(trans, nr);
0702 btrfs_end_transaction(trans);
0703 break;
0704 case FLUSH_DELALLOC:
0705 case FLUSH_DELALLOC_WAIT:
0706 case FLUSH_DELALLOC_FULL:
0707 if (state == FLUSH_DELALLOC_FULL)
0708 num_bytes = U64_MAX;
0709 shrink_delalloc(fs_info, space_info, num_bytes,
0710 state != FLUSH_DELALLOC, for_preempt);
0711 break;
0712 case FLUSH_DELAYED_REFS_NR:
0713 case FLUSH_DELAYED_REFS:
0714 trans = btrfs_join_transaction(root);
0715 if (IS_ERR(trans)) {
0716 ret = PTR_ERR(trans);
0717 break;
0718 }
0719 if (state == FLUSH_DELAYED_REFS_NR)
0720 nr = calc_reclaim_items_nr(fs_info, num_bytes);
0721 else
0722 nr = 0;
0723 btrfs_run_delayed_refs(trans, nr);
0724 btrfs_end_transaction(trans);
0725 break;
0726 case ALLOC_CHUNK:
0727 case ALLOC_CHUNK_FORCE:
0728
0729
0730
0731
0732
0733
0734 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
0735 if (ret < 0)
0736 break;
0737 else if (ret == 1)
0738 break;
0739
0740 trans = btrfs_join_transaction(root);
0741 if (IS_ERR(trans)) {
0742 ret = PTR_ERR(trans);
0743 break;
0744 }
0745 ret = btrfs_chunk_alloc(trans,
0746 btrfs_get_alloc_profile(fs_info, space_info->flags),
0747 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
0748 CHUNK_ALLOC_FORCE);
0749 btrfs_end_transaction(trans);
0750
0751
0752
0753
0754
0755
0756
0757 if (ret == 1) {
0758 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
0759
0760
0761
0762
0763 if (ret >= 0)
0764 ret = 1;
0765 }
0766
0767 if (ret > 0 || ret == -ENOSPC)
0768 ret = 0;
0769 break;
0770 case RUN_DELAYED_IPUTS:
0771
0772
0773
0774
0775
0776 btrfs_run_delayed_iputs(fs_info);
0777 btrfs_wait_on_delayed_iputs(fs_info);
0778 break;
0779 case COMMIT_TRANS:
0780 ASSERT(current->journal_info == NULL);
0781 trans = btrfs_join_transaction(root);
0782 if (IS_ERR(trans)) {
0783 ret = PTR_ERR(trans);
0784 break;
0785 }
0786 ret = btrfs_commit_transaction(trans);
0787 break;
0788 default:
0789 ret = -ENOSPC;
0790 break;
0791 }
0792
0793 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
0794 ret, for_preempt);
0795 return;
0796 }
0797
0798 static inline u64
0799 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
0800 struct btrfs_space_info *space_info)
0801 {
0802 u64 used;
0803 u64 avail;
0804 u64 total;
0805 u64 to_reclaim = space_info->reclaim_size;
0806
0807 lockdep_assert_held(&space_info->lock);
0808
0809 avail = calc_available_free_space(fs_info, space_info,
0810 BTRFS_RESERVE_FLUSH_ALL);
0811 used = btrfs_space_info_used(space_info, true);
0812
0813
0814
0815
0816
0817
0818
0819 total = writable_total_bytes(fs_info, space_info);
0820 if (total + avail < used)
0821 to_reclaim += used - (total + avail);
0822
0823 return to_reclaim;
0824 }
0825
0826 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
0827 struct btrfs_space_info *space_info)
0828 {
0829 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
0830 u64 ordered, delalloc;
0831 u64 total = writable_total_bytes(fs_info, space_info);
0832 u64 thresh;
0833 u64 used;
0834
0835 thresh = div_factor_fine(total, 90);
0836
0837 lockdep_assert_held(&space_info->lock);
0838
0839
0840 if ((space_info->bytes_used + space_info->bytes_reserved +
0841 global_rsv_size) >= thresh)
0842 return false;
0843
0844 used = space_info->bytes_may_use + space_info->bytes_pinned;
0845
0846
0847 if (global_rsv_size >= used)
0848 return false;
0849
0850
0851
0852
0853
0854
0855 if (used - global_rsv_size <= SZ_128M)
0856 return false;
0857
0858
0859
0860
0861
0862 if (space_info->reclaim_size)
0863 return false;
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 thresh = calc_available_free_space(fs_info, space_info,
0895 BTRFS_RESERVE_FLUSH_ALL);
0896 used = space_info->bytes_used + space_info->bytes_reserved +
0897 space_info->bytes_readonly + global_rsv_size;
0898 if (used < total)
0899 thresh += total - used;
0900 thresh >>= space_info->clamp;
0901
0902 used = space_info->bytes_pinned;
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924
0925
0926
0927 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
0928 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
0929 if (ordered >= delalloc)
0930 used += fs_info->delayed_refs_rsv.reserved +
0931 fs_info->delayed_block_rsv.reserved;
0932 else
0933 used += space_info->bytes_may_use - global_rsv_size;
0934
0935 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
0936 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
0937 }
0938
0939 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
0940 struct btrfs_space_info *space_info,
0941 struct reserve_ticket *ticket)
0942 {
0943 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
0944 u64 min_bytes;
0945
0946 if (!ticket->steal)
0947 return false;
0948
0949 if (global_rsv->space_info != space_info)
0950 return false;
0951
0952 spin_lock(&global_rsv->lock);
0953 min_bytes = div_factor(global_rsv->size, 1);
0954 if (global_rsv->reserved < min_bytes + ticket->bytes) {
0955 spin_unlock(&global_rsv->lock);
0956 return false;
0957 }
0958 global_rsv->reserved -= ticket->bytes;
0959 remove_ticket(space_info, ticket);
0960 ticket->bytes = 0;
0961 wake_up(&ticket->wait);
0962 space_info->tickets_id++;
0963 if (global_rsv->reserved < global_rsv->size)
0964 global_rsv->full = 0;
0965 spin_unlock(&global_rsv->lock);
0966
0967 return true;
0968 }
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
0986 struct btrfs_space_info *space_info)
0987 {
0988 struct reserve_ticket *ticket;
0989 u64 tickets_id = space_info->tickets_id;
0990 const bool aborted = BTRFS_FS_ERROR(fs_info);
0991
0992 trace_btrfs_fail_all_tickets(fs_info, space_info);
0993
0994 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
0995 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
0996 __btrfs_dump_space_info(fs_info, space_info);
0997 }
0998
0999 while (!list_empty(&space_info->tickets) &&
1000 tickets_id == space_info->tickets_id) {
1001 ticket = list_first_entry(&space_info->tickets,
1002 struct reserve_ticket, list);
1003
1004 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1005 return true;
1006
1007 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1008 btrfs_info(fs_info, "failing ticket with %llu bytes",
1009 ticket->bytes);
1010
1011 remove_ticket(space_info, ticket);
1012 if (aborted)
1013 ticket->error = -EIO;
1014 else
1015 ticket->error = -ENOSPC;
1016 wake_up(&ticket->wait);
1017
1018
1019
1020
1021
1022
1023
1024 if (!aborted)
1025 btrfs_try_granting_tickets(fs_info, space_info);
1026 }
1027 return (tickets_id != space_info->tickets_id);
1028 }
1029
1030
1031
1032
1033
1034
1035 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1036 {
1037 struct btrfs_fs_info *fs_info;
1038 struct btrfs_space_info *space_info;
1039 u64 to_reclaim;
1040 enum btrfs_flush_state flush_state;
1041 int commit_cycles = 0;
1042 u64 last_tickets_id;
1043
1044 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1045 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1046
1047 spin_lock(&space_info->lock);
1048 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1049 if (!to_reclaim) {
1050 space_info->flush = 0;
1051 spin_unlock(&space_info->lock);
1052 return;
1053 }
1054 last_tickets_id = space_info->tickets_id;
1055 spin_unlock(&space_info->lock);
1056
1057 flush_state = FLUSH_DELAYED_ITEMS_NR;
1058 do {
1059 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1060 spin_lock(&space_info->lock);
1061 if (list_empty(&space_info->tickets)) {
1062 space_info->flush = 0;
1063 spin_unlock(&space_info->lock);
1064 return;
1065 }
1066 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1067 space_info);
1068 if (last_tickets_id == space_info->tickets_id) {
1069 flush_state++;
1070 } else {
1071 last_tickets_id = space_info->tickets_id;
1072 flush_state = FLUSH_DELAYED_ITEMS_NR;
1073 if (commit_cycles)
1074 commit_cycles--;
1075 }
1076
1077
1078
1079
1080
1081
1082 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1083 flush_state++;
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1096 flush_state++;
1097
1098 if (flush_state > COMMIT_TRANS) {
1099 commit_cycles++;
1100 if (commit_cycles > 2) {
1101 if (maybe_fail_all_tickets(fs_info, space_info)) {
1102 flush_state = FLUSH_DELAYED_ITEMS_NR;
1103 commit_cycles--;
1104 } else {
1105 space_info->flush = 0;
1106 }
1107 } else {
1108 flush_state = FLUSH_DELAYED_ITEMS_NR;
1109 }
1110 }
1111 spin_unlock(&space_info->lock);
1112 } while (flush_state <= COMMIT_TRANS);
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1124 {
1125 struct btrfs_fs_info *fs_info;
1126 struct btrfs_space_info *space_info;
1127 struct btrfs_block_rsv *delayed_block_rsv;
1128 struct btrfs_block_rsv *delayed_refs_rsv;
1129 struct btrfs_block_rsv *global_rsv;
1130 struct btrfs_block_rsv *trans_rsv;
1131 int loops = 0;
1132
1133 fs_info = container_of(work, struct btrfs_fs_info,
1134 preempt_reclaim_work);
1135 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1136 delayed_block_rsv = &fs_info->delayed_block_rsv;
1137 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1138 global_rsv = &fs_info->global_block_rsv;
1139 trans_rsv = &fs_info->trans_block_rsv;
1140
1141 spin_lock(&space_info->lock);
1142 while (need_preemptive_reclaim(fs_info, space_info)) {
1143 enum btrfs_flush_state flush;
1144 u64 delalloc_size = 0;
1145 u64 to_reclaim, block_rsv_size;
1146 u64 global_rsv_size = global_rsv->reserved;
1147
1148 loops++;
1149
1150
1151
1152
1153
1154
1155
1156
1157 block_rsv_size = global_rsv_size +
1158 delayed_block_rsv->reserved +
1159 delayed_refs_rsv->reserved +
1160 trans_rsv->reserved;
1161 if (block_rsv_size < space_info->bytes_may_use)
1162 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1163
1164
1165
1166
1167
1168
1169 block_rsv_size -= global_rsv_size;
1170
1171
1172
1173
1174
1175
1176 if (delalloc_size > block_rsv_size) {
1177 to_reclaim = delalloc_size;
1178 flush = FLUSH_DELALLOC;
1179 } else if (space_info->bytes_pinned >
1180 (delayed_block_rsv->reserved +
1181 delayed_refs_rsv->reserved)) {
1182 to_reclaim = space_info->bytes_pinned;
1183 flush = COMMIT_TRANS;
1184 } else if (delayed_block_rsv->reserved >
1185 delayed_refs_rsv->reserved) {
1186 to_reclaim = delayed_block_rsv->reserved;
1187 flush = FLUSH_DELAYED_ITEMS_NR;
1188 } else {
1189 to_reclaim = delayed_refs_rsv->reserved;
1190 flush = FLUSH_DELAYED_REFS_NR;
1191 }
1192
1193 spin_unlock(&space_info->lock);
1194
1195
1196
1197
1198
1199
1200 to_reclaim >>= 2;
1201 if (!to_reclaim)
1202 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1203 flush_space(fs_info, space_info, to_reclaim, flush, true);
1204 cond_resched();
1205 spin_lock(&space_info->lock);
1206 }
1207
1208
1209 if (loops == 1 && !space_info->reclaim_size)
1210 space_info->clamp = max(1, space_info->clamp - 1);
1211 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1212 spin_unlock(&space_info->lock);
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248 static const enum btrfs_flush_state data_flush_states[] = {
1249 FLUSH_DELALLOC_FULL,
1250 RUN_DELAYED_IPUTS,
1251 COMMIT_TRANS,
1252 ALLOC_CHUNK_FORCE,
1253 };
1254
1255 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1256 {
1257 struct btrfs_fs_info *fs_info;
1258 struct btrfs_space_info *space_info;
1259 u64 last_tickets_id;
1260 enum btrfs_flush_state flush_state = 0;
1261
1262 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1263 space_info = fs_info->data_sinfo;
1264
1265 spin_lock(&space_info->lock);
1266 if (list_empty(&space_info->tickets)) {
1267 space_info->flush = 0;
1268 spin_unlock(&space_info->lock);
1269 return;
1270 }
1271 last_tickets_id = space_info->tickets_id;
1272 spin_unlock(&space_info->lock);
1273
1274 while (!space_info->full) {
1275 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1276 spin_lock(&space_info->lock);
1277 if (list_empty(&space_info->tickets)) {
1278 space_info->flush = 0;
1279 spin_unlock(&space_info->lock);
1280 return;
1281 }
1282
1283
1284 if (BTRFS_FS_ERROR(fs_info))
1285 goto aborted_fs;
1286 last_tickets_id = space_info->tickets_id;
1287 spin_unlock(&space_info->lock);
1288 }
1289
1290 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1291 flush_space(fs_info, space_info, U64_MAX,
1292 data_flush_states[flush_state], false);
1293 spin_lock(&space_info->lock);
1294 if (list_empty(&space_info->tickets)) {
1295 space_info->flush = 0;
1296 spin_unlock(&space_info->lock);
1297 return;
1298 }
1299
1300 if (last_tickets_id == space_info->tickets_id) {
1301 flush_state++;
1302 } else {
1303 last_tickets_id = space_info->tickets_id;
1304 flush_state = 0;
1305 }
1306
1307 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1308 if (space_info->full) {
1309 if (maybe_fail_all_tickets(fs_info, space_info))
1310 flush_state = 0;
1311 else
1312 space_info->flush = 0;
1313 } else {
1314 flush_state = 0;
1315 }
1316
1317
1318 if (BTRFS_FS_ERROR(fs_info))
1319 goto aborted_fs;
1320
1321 }
1322 spin_unlock(&space_info->lock);
1323 }
1324 return;
1325
1326 aborted_fs:
1327 maybe_fail_all_tickets(fs_info, space_info);
1328 space_info->flush = 0;
1329 spin_unlock(&space_info->lock);
1330 }
1331
1332 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1333 {
1334 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1335 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1336 INIT_WORK(&fs_info->preempt_reclaim_work,
1337 btrfs_preempt_reclaim_metadata_space);
1338 }
1339
1340 static const enum btrfs_flush_state priority_flush_states[] = {
1341 FLUSH_DELAYED_ITEMS_NR,
1342 FLUSH_DELAYED_ITEMS,
1343 ALLOC_CHUNK,
1344 };
1345
1346 static const enum btrfs_flush_state evict_flush_states[] = {
1347 FLUSH_DELAYED_ITEMS_NR,
1348 FLUSH_DELAYED_ITEMS,
1349 FLUSH_DELAYED_REFS_NR,
1350 FLUSH_DELAYED_REFS,
1351 FLUSH_DELALLOC,
1352 FLUSH_DELALLOC_WAIT,
1353 FLUSH_DELALLOC_FULL,
1354 ALLOC_CHUNK,
1355 COMMIT_TRANS,
1356 };
1357
1358 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1359 struct btrfs_space_info *space_info,
1360 struct reserve_ticket *ticket,
1361 const enum btrfs_flush_state *states,
1362 int states_nr)
1363 {
1364 u64 to_reclaim;
1365 int flush_state = 0;
1366
1367 spin_lock(&space_info->lock);
1368 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1369
1370
1371
1372
1373
1374
1375 if (ticket->bytes == 0) {
1376 spin_unlock(&space_info->lock);
1377 return;
1378 }
1379
1380 while (flush_state < states_nr) {
1381 spin_unlock(&space_info->lock);
1382 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1383 false);
1384 flush_state++;
1385 spin_lock(&space_info->lock);
1386 if (ticket->bytes == 0) {
1387 spin_unlock(&space_info->lock);
1388 return;
1389 }
1390 }
1391
1392
1393 if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1394 ticket->error = -ENOSPC;
1395 remove_ticket(space_info, ticket);
1396 }
1397
1398
1399
1400
1401
1402
1403 btrfs_try_granting_tickets(fs_info, space_info);
1404 spin_unlock(&space_info->lock);
1405 }
1406
1407 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1408 struct btrfs_space_info *space_info,
1409 struct reserve_ticket *ticket)
1410 {
1411 spin_lock(&space_info->lock);
1412
1413
1414 if (ticket->bytes == 0) {
1415 spin_unlock(&space_info->lock);
1416 return;
1417 }
1418
1419 while (!space_info->full) {
1420 spin_unlock(&space_info->lock);
1421 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1422 spin_lock(&space_info->lock);
1423 if (ticket->bytes == 0) {
1424 spin_unlock(&space_info->lock);
1425 return;
1426 }
1427 }
1428
1429 ticket->error = -ENOSPC;
1430 remove_ticket(space_info, ticket);
1431 btrfs_try_granting_tickets(fs_info, space_info);
1432 spin_unlock(&space_info->lock);
1433 }
1434
1435 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1436 struct btrfs_space_info *space_info,
1437 struct reserve_ticket *ticket)
1438
1439 {
1440 DEFINE_WAIT(wait);
1441 int ret = 0;
1442
1443 spin_lock(&space_info->lock);
1444 while (ticket->bytes > 0 && ticket->error == 0) {
1445 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1446 if (ret) {
1447
1448
1449
1450
1451
1452
1453
1454
1455 remove_ticket(space_info, ticket);
1456 ticket->error = -EINTR;
1457 break;
1458 }
1459 spin_unlock(&space_info->lock);
1460
1461 schedule();
1462
1463 finish_wait(&ticket->wait, &wait);
1464 spin_lock(&space_info->lock);
1465 }
1466 spin_unlock(&space_info->lock);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1483 struct btrfs_space_info *space_info,
1484 struct reserve_ticket *ticket,
1485 u64 start_ns, u64 orig_bytes,
1486 enum btrfs_reserve_flush_enum flush)
1487 {
1488 int ret;
1489
1490 switch (flush) {
1491 case BTRFS_RESERVE_FLUSH_DATA:
1492 case BTRFS_RESERVE_FLUSH_ALL:
1493 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1494 wait_reserve_ticket(fs_info, space_info, ticket);
1495 break;
1496 case BTRFS_RESERVE_FLUSH_LIMIT:
1497 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1498 priority_flush_states,
1499 ARRAY_SIZE(priority_flush_states));
1500 break;
1501 case BTRFS_RESERVE_FLUSH_EVICT:
1502 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1503 evict_flush_states,
1504 ARRAY_SIZE(evict_flush_states));
1505 break;
1506 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1507 priority_reclaim_data_space(fs_info, space_info, ticket);
1508 break;
1509 default:
1510 ASSERT(0);
1511 break;
1512 }
1513
1514 ret = ticket->error;
1515 ASSERT(list_empty(&ticket->list));
1516
1517
1518
1519
1520
1521
1522 ASSERT(!(ticket->bytes == 0 && ticket->error));
1523 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1524 start_ns, flush, ticket->error);
1525 return ret;
1526 }
1527
1528
1529
1530
1531
1532 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1533 {
1534 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1535 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1536 }
1537
1538 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1539 struct btrfs_space_info *space_info)
1540 {
1541 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1542 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 if (ordered < delalloc)
1553 space_info->clamp = min(space_info->clamp + 1, 8);
1554 }
1555
1556 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1557 {
1558 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1559 flush == BTRFS_RESERVE_FLUSH_EVICT);
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1578 struct btrfs_space_info *space_info, u64 orig_bytes,
1579 enum btrfs_reserve_flush_enum flush)
1580 {
1581 struct work_struct *async_work;
1582 struct reserve_ticket ticket;
1583 u64 start_ns = 0;
1584 u64 used;
1585 int ret = 0;
1586 bool pending_tickets;
1587
1588 ASSERT(orig_bytes);
1589 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1590
1591 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1592 async_work = &fs_info->async_data_reclaim_work;
1593 else
1594 async_work = &fs_info->async_reclaim_work;
1595
1596 spin_lock(&space_info->lock);
1597 ret = -ENOSPC;
1598 used = btrfs_space_info_used(space_info, true);
1599
1600
1601
1602
1603
1604
1605 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1606 pending_tickets = !list_empty(&space_info->tickets) ||
1607 !list_empty(&space_info->priority_tickets);
1608 else
1609 pending_tickets = !list_empty(&space_info->priority_tickets);
1610
1611
1612
1613
1614
1615 if (!pending_tickets &&
1616 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1617 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1618 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1619 orig_bytes);
1620 ret = 0;
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1631 ticket.bytes = orig_bytes;
1632 ticket.error = 0;
1633 space_info->reclaim_size += ticket.bytes;
1634 init_waitqueue_head(&ticket.wait);
1635 ticket.steal = can_steal(flush);
1636 if (trace_btrfs_reserve_ticket_enabled())
1637 start_ns = ktime_get_ns();
1638
1639 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1640 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1641 flush == BTRFS_RESERVE_FLUSH_DATA) {
1642 list_add_tail(&ticket.list, &space_info->tickets);
1643 if (!space_info->flush) {
1644
1645
1646
1647
1648
1649
1650
1651 maybe_clamp_preempt(fs_info, space_info);
1652
1653 space_info->flush = 1;
1654 trace_btrfs_trigger_flush(fs_info,
1655 space_info->flags,
1656 orig_bytes, flush,
1657 "enospc");
1658 queue_work(system_unbound_wq, async_work);
1659 }
1660 } else {
1661 list_add_tail(&ticket.list,
1662 &space_info->priority_tickets);
1663 }
1664 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1665 used += orig_bytes;
1666
1667
1668
1669
1670
1671 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1672 !work_busy(&fs_info->preempt_reclaim_work) &&
1673 need_preemptive_reclaim(fs_info, space_info)) {
1674 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1675 orig_bytes, flush, "preempt");
1676 queue_work(system_unbound_wq,
1677 &fs_info->preempt_reclaim_work);
1678 }
1679 }
1680 spin_unlock(&space_info->lock);
1681 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1682 return ret;
1683
1684 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1685 orig_bytes, flush);
1686 }
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1704 struct btrfs_block_rsv *block_rsv,
1705 u64 orig_bytes,
1706 enum btrfs_reserve_flush_enum flush)
1707 {
1708 int ret;
1709
1710 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1711 if (ret == -ENOSPC) {
1712 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1713 block_rsv->space_info->flags,
1714 orig_bytes, 1);
1715
1716 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1717 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1718 orig_bytes, 0);
1719 }
1720 return ret;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1734 enum btrfs_reserve_flush_enum flush)
1735 {
1736 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1737 int ret;
1738
1739 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1740 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1741 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1742
1743 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1744 if (ret == -ENOSPC) {
1745 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1746 data_sinfo->flags, bytes, 1);
1747 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1748 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1749 }
1750 return ret;
1751 }