Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include "misc.h"
0004 #include "ctree.h"
0005 #include "space-info.h"
0006 #include "sysfs.h"
0007 #include "volumes.h"
0008 #include "free-space-cache.h"
0009 #include "ordered-data.h"
0010 #include "transaction.h"
0011 #include "block-group.h"
0012 #include "zoned.h"
0013 
0014 /*
0015  * HOW DOES SPACE RESERVATION WORK
0016  *
0017  * If you want to know about delalloc specifically, there is a separate comment
0018  * for that with the delalloc code.  This comment is about how the whole system
0019  * works generally.
0020  *
0021  * BASIC CONCEPTS
0022  *
0023  *   1) space_info.  This is the ultimate arbiter of how much space we can use.
0024  *   There's a description of the bytes_ fields with the struct declaration,
0025  *   refer to that for specifics on each field.  Suffice it to say that for
0026  *   reservations we care about total_bytes - SUM(space_info->bytes_) when
0027  *   determining if there is space to make an allocation.  There is a space_info
0028  *   for METADATA, SYSTEM, and DATA areas.
0029  *
0030  *   2) block_rsv's.  These are basically buckets for every different type of
0031  *   metadata reservation we have.  You can see the comment in the block_rsv
0032  *   code on the rules for each type, but generally block_rsv->reserved is how
0033  *   much space is accounted for in space_info->bytes_may_use.
0034  *
0035  *   3) btrfs_calc*_size.  These are the worst case calculations we used based
0036  *   on the number of items we will want to modify.  We have one for changing
0037  *   items, and one for inserting new items.  Generally we use these helpers to
0038  *   determine the size of the block reserves, and then use the actual bytes
0039  *   values to adjust the space_info counters.
0040  *
0041  * MAKING RESERVATIONS, THE NORMAL CASE
0042  *
0043  *   We call into either btrfs_reserve_data_bytes() or
0044  *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
0045  *   num_bytes we want to reserve.
0046  *
0047  *   ->reserve
0048  *     space_info->bytes_may_reserve += num_bytes
0049  *
0050  *   ->extent allocation
0051  *     Call btrfs_add_reserved_bytes() which does
0052  *     space_info->bytes_may_reserve -= num_bytes
0053  *     space_info->bytes_reserved += extent_bytes
0054  *
0055  *   ->insert reference
0056  *     Call btrfs_update_block_group() which does
0057  *     space_info->bytes_reserved -= extent_bytes
0058  *     space_info->bytes_used += extent_bytes
0059  *
0060  * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
0061  *
0062  *   Assume we are unable to simply make the reservation because we do not have
0063  *   enough space
0064  *
0065  *   -> __reserve_bytes
0066  *     create a reserve_ticket with ->bytes set to our reservation, add it to
0067  *     the tail of space_info->tickets, kick async flush thread
0068  *
0069  *   ->handle_reserve_ticket
0070  *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
0071  *     on the ticket.
0072  *
0073  *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
0074  *     Flushes various things attempting to free up space.
0075  *
0076  *   -> btrfs_try_granting_tickets()
0077  *     This is called by anything that either subtracts space from
0078  *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
0079  *     space_info->total_bytes.  This loops through the ->priority_tickets and
0080  *     then the ->tickets list checking to see if the reservation can be
0081  *     completed.  If it can the space is added to space_info->bytes_may_use and
0082  *     the ticket is woken up.
0083  *
0084  *   -> ticket wakeup
0085  *     Check if ->bytes == 0, if it does we got our reservation and we can carry
0086  *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
0087  *     were interrupted.)
0088  *
0089  * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
0090  *
0091  *   Same as the above, except we add ourselves to the
0092  *   space_info->priority_tickets, and we do not use ticket->wait, we simply
0093  *   call flush_space() ourselves for the states that are safe for us to call
0094  *   without deadlocking and hope for the best.
0095  *
0096  * THE FLUSHING STATES
0097  *
0098  *   Generally speaking we will have two cases for each state, a "nice" state
0099  *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
0100  *   reduce the locking over head on the various trees, and even to keep from
0101  *   doing any work at all in the case of delayed refs.  Each of these delayed
0102  *   things however hold reservations, and so letting them run allows us to
0103  *   reclaim space so we can make new reservations.
0104  *
0105  *   FLUSH_DELAYED_ITEMS
0106  *     Every inode has a delayed item to update the inode.  Take a simple write
0107  *     for example, we would update the inode item at write time to update the
0108  *     mtime, and then again at finish_ordered_io() time in order to update the
0109  *     isize or bytes.  We keep these delayed items to coalesce these operations
0110  *     into a single operation done on demand.  These are an easy way to reclaim
0111  *     metadata space.
0112  *
0113  *   FLUSH_DELALLOC
0114  *     Look at the delalloc comment to get an idea of how much space is reserved
0115  *     for delayed allocation.  We can reclaim some of this space simply by
0116  *     running delalloc, but usually we need to wait for ordered extents to
0117  *     reclaim the bulk of this space.
0118  *
0119  *   FLUSH_DELAYED_REFS
0120  *     We have a block reserve for the outstanding delayed refs space, and every
0121  *     delayed ref operation holds a reservation.  Running these is a quick way
0122  *     to reclaim space, but we want to hold this until the end because COW can
0123  *     churn a lot and we can avoid making some extent tree modifications if we
0124  *     are able to delay for as long as possible.
0125  *
0126  *   ALLOC_CHUNK
0127  *     We will skip this the first time through space reservation, because of
0128  *     overcommit and we don't want to have a lot of useless metadata space when
0129  *     our worst case reservations will likely never come true.
0130  *
0131  *   RUN_DELAYED_IPUTS
0132  *     If we're freeing inodes we're likely freeing checksums, file extent
0133  *     items, and extent tree items.  Loads of space could be freed up by these
0134  *     operations, however they won't be usable until the transaction commits.
0135  *
0136  *   COMMIT_TRANS
0137  *     This will commit the transaction.  Historically we had a lot of logic
0138  *     surrounding whether or not we'd commit the transaction, but this waits born
0139  *     out of a pre-tickets era where we could end up committing the transaction
0140  *     thousands of times in a row without making progress.  Now thanks to our
0141  *     ticketing system we know if we're not making progress and can error
0142  *     everybody out after a few commits rather than burning the disk hoping for
0143  *     a different answer.
0144  *
0145  * OVERCOMMIT
0146  *
0147  *   Because we hold so many reservations for metadata we will allow you to
0148  *   reserve more space than is currently free in the currently allocate
0149  *   metadata space.  This only happens with metadata, data does not allow
0150  *   overcommitting.
0151  *
0152  *   You can see the current logic for when we allow overcommit in
0153  *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
0154  *   is no unallocated space to be had, all reservations are kept within the
0155  *   free space in the allocated metadata chunks.
0156  *
0157  *   Because of overcommitting, you generally want to use the
0158  *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
0159  *   thing with or without extra unallocated space.
0160  */
0161 
0162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
0163               bool may_use_included)
0164 {
0165     ASSERT(s_info);
0166     return s_info->bytes_used + s_info->bytes_reserved +
0167         s_info->bytes_pinned + s_info->bytes_readonly +
0168         s_info->bytes_zone_unusable +
0169         (may_use_included ? s_info->bytes_may_use : 0);
0170 }
0171 
0172 /*
0173  * after adding space to the filesystem, we need to clear the full flags
0174  * on all the space infos.
0175  */
0176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
0177 {
0178     struct list_head *head = &info->space_info;
0179     struct btrfs_space_info *found;
0180 
0181     list_for_each_entry(found, head, list)
0182         found->full = 0;
0183 }
0184 
0185 /*
0186  * Block groups with more than this value (percents) of unusable space will be
0187  * scheduled for background reclaim.
0188  */
0189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH          (75)
0190 
0191 /*
0192  * Calculate chunk size depending on volume type (regular or zoned).
0193  */
0194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
0195 {
0196     if (btrfs_is_zoned(fs_info))
0197         return fs_info->zone_size;
0198 
0199     ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
0200 
0201     if (flags & BTRFS_BLOCK_GROUP_DATA)
0202         return BTRFS_MAX_DATA_CHUNK_SIZE;
0203     else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
0204         return SZ_32M;
0205 
0206     /* Handle BTRFS_BLOCK_GROUP_METADATA */
0207     if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
0208         return SZ_1G;
0209 
0210     return SZ_256M;
0211 }
0212 
0213 /*
0214  * Update default chunk size.
0215  */
0216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
0217                     u64 chunk_size)
0218 {
0219     WRITE_ONCE(space_info->chunk_size, chunk_size);
0220 }
0221 
0222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
0223 {
0224 
0225     struct btrfs_space_info *space_info;
0226     int i;
0227     int ret;
0228 
0229     space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
0230     if (!space_info)
0231         return -ENOMEM;
0232 
0233     for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
0234         INIT_LIST_HEAD(&space_info->block_groups[i]);
0235     init_rwsem(&space_info->groups_sem);
0236     spin_lock_init(&space_info->lock);
0237     space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
0238     space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
0239     INIT_LIST_HEAD(&space_info->ro_bgs);
0240     INIT_LIST_HEAD(&space_info->tickets);
0241     INIT_LIST_HEAD(&space_info->priority_tickets);
0242     space_info->clamp = 1;
0243     btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
0244 
0245     if (btrfs_is_zoned(info))
0246         space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
0247 
0248     ret = btrfs_sysfs_add_space_info_type(info, space_info);
0249     if (ret)
0250         return ret;
0251 
0252     list_add(&space_info->list, &info->space_info);
0253     if (flags & BTRFS_BLOCK_GROUP_DATA)
0254         info->data_sinfo = space_info;
0255 
0256     return ret;
0257 }
0258 
0259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
0260 {
0261     struct btrfs_super_block *disk_super;
0262     u64 features;
0263     u64 flags;
0264     int mixed = 0;
0265     int ret;
0266 
0267     disk_super = fs_info->super_copy;
0268     if (!btrfs_super_root(disk_super))
0269         return -EINVAL;
0270 
0271     features = btrfs_super_incompat_flags(disk_super);
0272     if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
0273         mixed = 1;
0274 
0275     flags = BTRFS_BLOCK_GROUP_SYSTEM;
0276     ret = create_space_info(fs_info, flags);
0277     if (ret)
0278         goto out;
0279 
0280     if (mixed) {
0281         flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
0282         ret = create_space_info(fs_info, flags);
0283     } else {
0284         flags = BTRFS_BLOCK_GROUP_METADATA;
0285         ret = create_space_info(fs_info, flags);
0286         if (ret)
0287             goto out;
0288 
0289         flags = BTRFS_BLOCK_GROUP_DATA;
0290         ret = create_space_info(fs_info, flags);
0291     }
0292 out:
0293     return ret;
0294 }
0295 
0296 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
0297                  u64 total_bytes, u64 bytes_used,
0298                  u64 bytes_readonly, u64 bytes_zone_unusable,
0299                  bool active, struct btrfs_space_info **space_info)
0300 {
0301     struct btrfs_space_info *found;
0302     int factor;
0303 
0304     factor = btrfs_bg_type_to_factor(flags);
0305 
0306     found = btrfs_find_space_info(info, flags);
0307     ASSERT(found);
0308     spin_lock(&found->lock);
0309     found->total_bytes += total_bytes;
0310     if (active)
0311         found->active_total_bytes += total_bytes;
0312     found->disk_total += total_bytes * factor;
0313     found->bytes_used += bytes_used;
0314     found->disk_used += bytes_used * factor;
0315     found->bytes_readonly += bytes_readonly;
0316     found->bytes_zone_unusable += bytes_zone_unusable;
0317     if (total_bytes > 0)
0318         found->full = 0;
0319     btrfs_try_granting_tickets(info, found);
0320     spin_unlock(&found->lock);
0321     *space_info = found;
0322 }
0323 
0324 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
0325                            u64 flags)
0326 {
0327     struct list_head *head = &info->space_info;
0328     struct btrfs_space_info *found;
0329 
0330     flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
0331 
0332     list_for_each_entry(found, head, list) {
0333         if (found->flags & flags)
0334             return found;
0335     }
0336     return NULL;
0337 }
0338 
0339 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
0340               struct btrfs_space_info *space_info,
0341               enum btrfs_reserve_flush_enum flush)
0342 {
0343     u64 profile;
0344     u64 avail;
0345     int factor;
0346 
0347     if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
0348         profile = btrfs_system_alloc_profile(fs_info);
0349     else
0350         profile = btrfs_metadata_alloc_profile(fs_info);
0351 
0352     avail = atomic64_read(&fs_info->free_chunk_space);
0353 
0354     /*
0355      * If we have dup, raid1 or raid10 then only half of the free
0356      * space is actually usable.  For raid56, the space info used
0357      * doesn't include the parity drive, so we don't have to
0358      * change the math
0359      */
0360     factor = btrfs_bg_type_to_factor(profile);
0361     avail = div_u64(avail, factor);
0362 
0363     /*
0364      * If we aren't flushing all things, let us overcommit up to
0365      * 1/2th of the space. If we can flush, don't let us overcommit
0366      * too much, let it overcommit up to 1/8 of the space.
0367      */
0368     if (flush == BTRFS_RESERVE_FLUSH_ALL)
0369         avail >>= 3;
0370     else
0371         avail >>= 1;
0372     return avail;
0373 }
0374 
0375 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
0376                        struct btrfs_space_info *space_info)
0377 {
0378     /*
0379      * On regular filesystem, all total_bytes are always writable. On zoned
0380      * filesystem, there may be a limitation imposed by max_active_zones.
0381      * For metadata allocation, we cannot finish an existing active block
0382      * group to avoid a deadlock. Thus, we need to consider only the active
0383      * groups to be writable for metadata space.
0384      */
0385     if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
0386         return space_info->total_bytes;
0387 
0388     return space_info->active_total_bytes;
0389 }
0390 
0391 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
0392              struct btrfs_space_info *space_info, u64 bytes,
0393              enum btrfs_reserve_flush_enum flush)
0394 {
0395     u64 avail;
0396     u64 used;
0397 
0398     /* Don't overcommit when in mixed mode */
0399     if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
0400         return 0;
0401 
0402     used = btrfs_space_info_used(space_info, true);
0403     if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
0404         avail = 0;
0405     else
0406         avail = calc_available_free_space(fs_info, space_info, flush);
0407 
0408     if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
0409         return 1;
0410     return 0;
0411 }
0412 
0413 static void remove_ticket(struct btrfs_space_info *space_info,
0414               struct reserve_ticket *ticket)
0415 {
0416     if (!list_empty(&ticket->list)) {
0417         list_del_init(&ticket->list);
0418         ASSERT(space_info->reclaim_size >= ticket->bytes);
0419         space_info->reclaim_size -= ticket->bytes;
0420     }
0421 }
0422 
0423 /*
0424  * This is for space we already have accounted in space_info->bytes_may_use, so
0425  * basically when we're returning space from block_rsv's.
0426  */
0427 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
0428                 struct btrfs_space_info *space_info)
0429 {
0430     struct list_head *head;
0431     enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
0432 
0433     lockdep_assert_held(&space_info->lock);
0434 
0435     head = &space_info->priority_tickets;
0436 again:
0437     while (!list_empty(head)) {
0438         struct reserve_ticket *ticket;
0439         u64 used = btrfs_space_info_used(space_info, true);
0440 
0441         ticket = list_first_entry(head, struct reserve_ticket, list);
0442 
0443         /* Check and see if our ticket can be satisfied now. */
0444         if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
0445             btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
0446                      flush)) {
0447             btrfs_space_info_update_bytes_may_use(fs_info,
0448                                   space_info,
0449                                   ticket->bytes);
0450             remove_ticket(space_info, ticket);
0451             ticket->bytes = 0;
0452             space_info->tickets_id++;
0453             wake_up(&ticket->wait);
0454         } else {
0455             break;
0456         }
0457     }
0458 
0459     if (head == &space_info->priority_tickets) {
0460         head = &space_info->tickets;
0461         flush = BTRFS_RESERVE_FLUSH_ALL;
0462         goto again;
0463     }
0464 }
0465 
0466 #define DUMP_BLOCK_RSV(fs_info, rsv_name)               \
0467 do {                                    \
0468     struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;       \
0469     spin_lock(&__rsv->lock);                    \
0470     btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",  \
0471            __rsv->size, __rsv->reserved);           \
0472     spin_unlock(&__rsv->lock);                  \
0473 } while (0)
0474 
0475 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
0476                     struct btrfs_space_info *info)
0477 {
0478     lockdep_assert_held(&info->lock);
0479 
0480     /* The free space could be negative in case of overcommit */
0481     btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
0482            info->flags,
0483            (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
0484            info->full ? "" : "not ");
0485     btrfs_info(fs_info,
0486         "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
0487         info->total_bytes, info->bytes_used, info->bytes_pinned,
0488         info->bytes_reserved, info->bytes_may_use,
0489         info->bytes_readonly, info->bytes_zone_unusable);
0490 
0491     DUMP_BLOCK_RSV(fs_info, global_block_rsv);
0492     DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
0493     DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
0494     DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
0495     DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
0496 
0497 }
0498 
0499 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
0500                struct btrfs_space_info *info, u64 bytes,
0501                int dump_block_groups)
0502 {
0503     struct btrfs_block_group *cache;
0504     int index = 0;
0505 
0506     spin_lock(&info->lock);
0507     __btrfs_dump_space_info(fs_info, info);
0508     spin_unlock(&info->lock);
0509 
0510     if (!dump_block_groups)
0511         return;
0512 
0513     down_read(&info->groups_sem);
0514 again:
0515     list_for_each_entry(cache, &info->block_groups[index], list) {
0516         spin_lock(&cache->lock);
0517         btrfs_info(fs_info,
0518             "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
0519             cache->start, cache->length, cache->used, cache->pinned,
0520             cache->reserved, cache->zone_unusable,
0521             cache->ro ? "[readonly]" : "");
0522         spin_unlock(&cache->lock);
0523         btrfs_dump_free_space(cache, bytes);
0524     }
0525     if (++index < BTRFS_NR_RAID_TYPES)
0526         goto again;
0527     up_read(&info->groups_sem);
0528 }
0529 
0530 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
0531                     u64 to_reclaim)
0532 {
0533     u64 bytes;
0534     u64 nr;
0535 
0536     bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
0537     nr = div64_u64(to_reclaim, bytes);
0538     if (!nr)
0539         nr = 1;
0540     return nr;
0541 }
0542 
0543 #define EXTENT_SIZE_PER_ITEM    SZ_256K
0544 
0545 /*
0546  * shrink metadata reservation for delalloc
0547  */
0548 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
0549                 struct btrfs_space_info *space_info,
0550                 u64 to_reclaim, bool wait_ordered,
0551                 bool for_preempt)
0552 {
0553     struct btrfs_trans_handle *trans;
0554     u64 delalloc_bytes;
0555     u64 ordered_bytes;
0556     u64 items;
0557     long time_left;
0558     int loops;
0559 
0560     delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
0561     ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
0562     if (delalloc_bytes == 0 && ordered_bytes == 0)
0563         return;
0564 
0565     /* Calc the number of the pages we need flush for space reservation */
0566     if (to_reclaim == U64_MAX) {
0567         items = U64_MAX;
0568     } else {
0569         /*
0570          * to_reclaim is set to however much metadata we need to
0571          * reclaim, but reclaiming that much data doesn't really track
0572          * exactly.  What we really want to do is reclaim full inode's
0573          * worth of reservations, however that's not available to us
0574          * here.  We will take a fraction of the delalloc bytes for our
0575          * flushing loops and hope for the best.  Delalloc will expand
0576          * the amount we write to cover an entire dirty extent, which
0577          * will reclaim the metadata reservation for that range.  If
0578          * it's not enough subsequent flush stages will be more
0579          * aggressive.
0580          */
0581         to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
0582         items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
0583     }
0584 
0585     trans = current->journal_info;
0586 
0587     /*
0588      * If we are doing more ordered than delalloc we need to just wait on
0589      * ordered extents, otherwise we'll waste time trying to flush delalloc
0590      * that likely won't give us the space back we need.
0591      */
0592     if (ordered_bytes > delalloc_bytes && !for_preempt)
0593         wait_ordered = true;
0594 
0595     loops = 0;
0596     while ((delalloc_bytes || ordered_bytes) && loops < 3) {
0597         u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
0598         long nr_pages = min_t(u64, temp, LONG_MAX);
0599         int async_pages;
0600 
0601         btrfs_start_delalloc_roots(fs_info, nr_pages, true);
0602 
0603         /*
0604          * We need to make sure any outstanding async pages are now
0605          * processed before we continue.  This is because things like
0606          * sync_inode() try to be smart and skip writing if the inode is
0607          * marked clean.  We don't use filemap_fwrite for flushing
0608          * because we want to control how many pages we write out at a
0609          * time, thus this is the only safe way to make sure we've
0610          * waited for outstanding compressed workers to have started
0611          * their jobs and thus have ordered extents set up properly.
0612          *
0613          * This exists because we do not want to wait for each
0614          * individual inode to finish its async work, we simply want to
0615          * start the IO on everybody, and then come back here and wait
0616          * for all of the async work to catch up.  Once we're done with
0617          * that we know we'll have ordered extents for everything and we
0618          * can decide if we wait for that or not.
0619          *
0620          * If we choose to replace this in the future, make absolutely
0621          * sure that the proper waiting is being done in the async case,
0622          * as there have been bugs in that area before.
0623          */
0624         async_pages = atomic_read(&fs_info->async_delalloc_pages);
0625         if (!async_pages)
0626             goto skip_async;
0627 
0628         /*
0629          * We don't want to wait forever, if we wrote less pages in this
0630          * loop than we have outstanding, only wait for that number of
0631          * pages, otherwise we can wait for all async pages to finish
0632          * before continuing.
0633          */
0634         if (async_pages > nr_pages)
0635             async_pages -= nr_pages;
0636         else
0637             async_pages = 0;
0638         wait_event(fs_info->async_submit_wait,
0639                atomic_read(&fs_info->async_delalloc_pages) <=
0640                async_pages);
0641 skip_async:
0642         loops++;
0643         if (wait_ordered && !trans) {
0644             btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
0645         } else {
0646             time_left = schedule_timeout_killable(1);
0647             if (time_left)
0648                 break;
0649         }
0650 
0651         /*
0652          * If we are for preemption we just want a one-shot of delalloc
0653          * flushing so we can stop flushing if we decide we don't need
0654          * to anymore.
0655          */
0656         if (for_preempt)
0657             break;
0658 
0659         spin_lock(&space_info->lock);
0660         if (list_empty(&space_info->tickets) &&
0661             list_empty(&space_info->priority_tickets)) {
0662             spin_unlock(&space_info->lock);
0663             break;
0664         }
0665         spin_unlock(&space_info->lock);
0666 
0667         delalloc_bytes = percpu_counter_sum_positive(
0668                         &fs_info->delalloc_bytes);
0669         ordered_bytes = percpu_counter_sum_positive(
0670                         &fs_info->ordered_bytes);
0671     }
0672 }
0673 
0674 /*
0675  * Try to flush some data based on policy set by @state. This is only advisory
0676  * and may fail for various reasons. The caller is supposed to examine the
0677  * state of @space_info to detect the outcome.
0678  */
0679 static void flush_space(struct btrfs_fs_info *fs_info,
0680                struct btrfs_space_info *space_info, u64 num_bytes,
0681                enum btrfs_flush_state state, bool for_preempt)
0682 {
0683     struct btrfs_root *root = fs_info->tree_root;
0684     struct btrfs_trans_handle *trans;
0685     int nr;
0686     int ret = 0;
0687 
0688     switch (state) {
0689     case FLUSH_DELAYED_ITEMS_NR:
0690     case FLUSH_DELAYED_ITEMS:
0691         if (state == FLUSH_DELAYED_ITEMS_NR)
0692             nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
0693         else
0694             nr = -1;
0695 
0696         trans = btrfs_join_transaction(root);
0697         if (IS_ERR(trans)) {
0698             ret = PTR_ERR(trans);
0699             break;
0700         }
0701         ret = btrfs_run_delayed_items_nr(trans, nr);
0702         btrfs_end_transaction(trans);
0703         break;
0704     case FLUSH_DELALLOC:
0705     case FLUSH_DELALLOC_WAIT:
0706     case FLUSH_DELALLOC_FULL:
0707         if (state == FLUSH_DELALLOC_FULL)
0708             num_bytes = U64_MAX;
0709         shrink_delalloc(fs_info, space_info, num_bytes,
0710                 state != FLUSH_DELALLOC, for_preempt);
0711         break;
0712     case FLUSH_DELAYED_REFS_NR:
0713     case FLUSH_DELAYED_REFS:
0714         trans = btrfs_join_transaction(root);
0715         if (IS_ERR(trans)) {
0716             ret = PTR_ERR(trans);
0717             break;
0718         }
0719         if (state == FLUSH_DELAYED_REFS_NR)
0720             nr = calc_reclaim_items_nr(fs_info, num_bytes);
0721         else
0722             nr = 0;
0723         btrfs_run_delayed_refs(trans, nr);
0724         btrfs_end_transaction(trans);
0725         break;
0726     case ALLOC_CHUNK:
0727     case ALLOC_CHUNK_FORCE:
0728         /*
0729          * For metadata space on zoned filesystem, reaching here means we
0730          * don't have enough space left in active_total_bytes. Try to
0731          * activate a block group first, because we may have inactive
0732          * block group already allocated.
0733          */
0734         ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
0735         if (ret < 0)
0736             break;
0737         else if (ret == 1)
0738             break;
0739 
0740         trans = btrfs_join_transaction(root);
0741         if (IS_ERR(trans)) {
0742             ret = PTR_ERR(trans);
0743             break;
0744         }
0745         ret = btrfs_chunk_alloc(trans,
0746                 btrfs_get_alloc_profile(fs_info, space_info->flags),
0747                 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
0748                     CHUNK_ALLOC_FORCE);
0749         btrfs_end_transaction(trans);
0750 
0751         /*
0752          * For metadata space on zoned filesystem, allocating a new chunk
0753          * is not enough. We still need to activate the block * group.
0754          * Active the newly allocated block group by (maybe) finishing
0755          * a block group.
0756          */
0757         if (ret == 1) {
0758             ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
0759             /*
0760              * Revert to the original ret regardless we could finish
0761              * one block group or not.
0762              */
0763             if (ret >= 0)
0764                 ret = 1;
0765         }
0766 
0767         if (ret > 0 || ret == -ENOSPC)
0768             ret = 0;
0769         break;
0770     case RUN_DELAYED_IPUTS:
0771         /*
0772          * If we have pending delayed iputs then we could free up a
0773          * bunch of pinned space, so make sure we run the iputs before
0774          * we do our pinned bytes check below.
0775          */
0776         btrfs_run_delayed_iputs(fs_info);
0777         btrfs_wait_on_delayed_iputs(fs_info);
0778         break;
0779     case COMMIT_TRANS:
0780         ASSERT(current->journal_info == NULL);
0781         trans = btrfs_join_transaction(root);
0782         if (IS_ERR(trans)) {
0783             ret = PTR_ERR(trans);
0784             break;
0785         }
0786         ret = btrfs_commit_transaction(trans);
0787         break;
0788     default:
0789         ret = -ENOSPC;
0790         break;
0791     }
0792 
0793     trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
0794                 ret, for_preempt);
0795     return;
0796 }
0797 
0798 static inline u64
0799 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
0800                  struct btrfs_space_info *space_info)
0801 {
0802     u64 used;
0803     u64 avail;
0804     u64 total;
0805     u64 to_reclaim = space_info->reclaim_size;
0806 
0807     lockdep_assert_held(&space_info->lock);
0808 
0809     avail = calc_available_free_space(fs_info, space_info,
0810                       BTRFS_RESERVE_FLUSH_ALL);
0811     used = btrfs_space_info_used(space_info, true);
0812 
0813     /*
0814      * We may be flushing because suddenly we have less space than we had
0815      * before, and now we're well over-committed based on our current free
0816      * space.  If that's the case add in our overage so we make sure to put
0817      * appropriate pressure on the flushing state machine.
0818      */
0819     total = writable_total_bytes(fs_info, space_info);
0820     if (total + avail < used)
0821         to_reclaim += used - (total + avail);
0822 
0823     return to_reclaim;
0824 }
0825 
0826 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
0827                     struct btrfs_space_info *space_info)
0828 {
0829     u64 global_rsv_size = fs_info->global_block_rsv.reserved;
0830     u64 ordered, delalloc;
0831     u64 total = writable_total_bytes(fs_info, space_info);
0832     u64 thresh;
0833     u64 used;
0834 
0835     thresh = div_factor_fine(total, 90);
0836 
0837     lockdep_assert_held(&space_info->lock);
0838 
0839     /* If we're just plain full then async reclaim just slows us down. */
0840     if ((space_info->bytes_used + space_info->bytes_reserved +
0841          global_rsv_size) >= thresh)
0842         return false;
0843 
0844     used = space_info->bytes_may_use + space_info->bytes_pinned;
0845 
0846     /* The total flushable belongs to the global rsv, don't flush. */
0847     if (global_rsv_size >= used)
0848         return false;
0849 
0850     /*
0851      * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
0852      * that devoted to other reservations then there's no sense in flushing,
0853      * we don't have a lot of things that need flushing.
0854      */
0855     if (used - global_rsv_size <= SZ_128M)
0856         return false;
0857 
0858     /*
0859      * We have tickets queued, bail so we don't compete with the async
0860      * flushers.
0861      */
0862     if (space_info->reclaim_size)
0863         return false;
0864 
0865     /*
0866      * If we have over half of the free space occupied by reservations or
0867      * pinned then we want to start flushing.
0868      *
0869      * We do not do the traditional thing here, which is to say
0870      *
0871      *   if (used >= ((total_bytes + avail) / 2))
0872      *     return 1;
0873      *
0874      * because this doesn't quite work how we want.  If we had more than 50%
0875      * of the space_info used by bytes_used and we had 0 available we'd just
0876      * constantly run the background flusher.  Instead we want it to kick in
0877      * if our reclaimable space exceeds our clamped free space.
0878      *
0879      * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
0880      * the following:
0881      *
0882      * Amount of RAM        Minimum threshold       Maximum threshold
0883      *
0884      *        256GiB                     1GiB                  128GiB
0885      *        128GiB                   512MiB                   64GiB
0886      *         64GiB                   256MiB                   32GiB
0887      *         32GiB                   128MiB                   16GiB
0888      *         16GiB                    64MiB                    8GiB
0889      *
0890      * These are the range our thresholds will fall in, corresponding to how
0891      * much delalloc we need for the background flusher to kick in.
0892      */
0893 
0894     thresh = calc_available_free_space(fs_info, space_info,
0895                        BTRFS_RESERVE_FLUSH_ALL);
0896     used = space_info->bytes_used + space_info->bytes_reserved +
0897            space_info->bytes_readonly + global_rsv_size;
0898     if (used < total)
0899         thresh += total - used;
0900     thresh >>= space_info->clamp;
0901 
0902     used = space_info->bytes_pinned;
0903 
0904     /*
0905      * If we have more ordered bytes than delalloc bytes then we're either
0906      * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
0907      * around.  Preemptive flushing is only useful in that it can free up
0908      * space before tickets need to wait for things to finish.  In the case
0909      * of ordered extents, preemptively waiting on ordered extents gets us
0910      * nothing, if our reservations are tied up in ordered extents we'll
0911      * simply have to slow down writers by forcing them to wait on ordered
0912      * extents.
0913      *
0914      * In the case that ordered is larger than delalloc, only include the
0915      * block reserves that we would actually be able to directly reclaim
0916      * from.  In this case if we're heavy on metadata operations this will
0917      * clearly be heavy enough to warrant preemptive flushing.  In the case
0918      * of heavy DIO or ordered reservations, preemptive flushing will just
0919      * waste time and cause us to slow down.
0920      *
0921      * We want to make sure we truly are maxed out on ordered however, so
0922      * cut ordered in half, and if it's still higher than delalloc then we
0923      * can keep flushing.  This is to avoid the case where we start
0924      * flushing, and now delalloc == ordered and we stop preemptively
0925      * flushing when we could still have several gigs of delalloc to flush.
0926      */
0927     ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
0928     delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
0929     if (ordered >= delalloc)
0930         used += fs_info->delayed_refs_rsv.reserved +
0931             fs_info->delayed_block_rsv.reserved;
0932     else
0933         used += space_info->bytes_may_use - global_rsv_size;
0934 
0935     return (used >= thresh && !btrfs_fs_closing(fs_info) &&
0936         !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
0937 }
0938 
0939 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
0940                   struct btrfs_space_info *space_info,
0941                   struct reserve_ticket *ticket)
0942 {
0943     struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
0944     u64 min_bytes;
0945 
0946     if (!ticket->steal)
0947         return false;
0948 
0949     if (global_rsv->space_info != space_info)
0950         return false;
0951 
0952     spin_lock(&global_rsv->lock);
0953     min_bytes = div_factor(global_rsv->size, 1);
0954     if (global_rsv->reserved < min_bytes + ticket->bytes) {
0955         spin_unlock(&global_rsv->lock);
0956         return false;
0957     }
0958     global_rsv->reserved -= ticket->bytes;
0959     remove_ticket(space_info, ticket);
0960     ticket->bytes = 0;
0961     wake_up(&ticket->wait);
0962     space_info->tickets_id++;
0963     if (global_rsv->reserved < global_rsv->size)
0964         global_rsv->full = 0;
0965     spin_unlock(&global_rsv->lock);
0966 
0967     return true;
0968 }
0969 
0970 /*
0971  * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
0972  * @fs_info - fs_info for this fs
0973  * @space_info - the space info we were flushing
0974  *
0975  * We call this when we've exhausted our flushing ability and haven't made
0976  * progress in satisfying tickets.  The reservation code handles tickets in
0977  * order, so if there is a large ticket first and then smaller ones we could
0978  * very well satisfy the smaller tickets.  This will attempt to wake up any
0979  * tickets in the list to catch this case.
0980  *
0981  * This function returns true if it was able to make progress by clearing out
0982  * other tickets, or if it stumbles across a ticket that was smaller than the
0983  * first ticket.
0984  */
0985 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
0986                    struct btrfs_space_info *space_info)
0987 {
0988     struct reserve_ticket *ticket;
0989     u64 tickets_id = space_info->tickets_id;
0990     const bool aborted = BTRFS_FS_ERROR(fs_info);
0991 
0992     trace_btrfs_fail_all_tickets(fs_info, space_info);
0993 
0994     if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
0995         btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
0996         __btrfs_dump_space_info(fs_info, space_info);
0997     }
0998 
0999     while (!list_empty(&space_info->tickets) &&
1000            tickets_id == space_info->tickets_id) {
1001         ticket = list_first_entry(&space_info->tickets,
1002                       struct reserve_ticket, list);
1003 
1004         if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1005             return true;
1006 
1007         if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1008             btrfs_info(fs_info, "failing ticket with %llu bytes",
1009                    ticket->bytes);
1010 
1011         remove_ticket(space_info, ticket);
1012         if (aborted)
1013             ticket->error = -EIO;
1014         else
1015             ticket->error = -ENOSPC;
1016         wake_up(&ticket->wait);
1017 
1018         /*
1019          * We're just throwing tickets away, so more flushing may not
1020          * trip over btrfs_try_granting_tickets, so we need to call it
1021          * here to see if we can make progress with the next ticket in
1022          * the list.
1023          */
1024         if (!aborted)
1025             btrfs_try_granting_tickets(fs_info, space_info);
1026     }
1027     return (tickets_id != space_info->tickets_id);
1028 }
1029 
1030 /*
1031  * This is for normal flushers, we can wait all goddamned day if we want to.  We
1032  * will loop and continuously try to flush as long as we are making progress.
1033  * We count progress as clearing off tickets each time we have to loop.
1034  */
1035 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1036 {
1037     struct btrfs_fs_info *fs_info;
1038     struct btrfs_space_info *space_info;
1039     u64 to_reclaim;
1040     enum btrfs_flush_state flush_state;
1041     int commit_cycles = 0;
1042     u64 last_tickets_id;
1043 
1044     fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1045     space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1046 
1047     spin_lock(&space_info->lock);
1048     to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1049     if (!to_reclaim) {
1050         space_info->flush = 0;
1051         spin_unlock(&space_info->lock);
1052         return;
1053     }
1054     last_tickets_id = space_info->tickets_id;
1055     spin_unlock(&space_info->lock);
1056 
1057     flush_state = FLUSH_DELAYED_ITEMS_NR;
1058     do {
1059         flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1060         spin_lock(&space_info->lock);
1061         if (list_empty(&space_info->tickets)) {
1062             space_info->flush = 0;
1063             spin_unlock(&space_info->lock);
1064             return;
1065         }
1066         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1067                                   space_info);
1068         if (last_tickets_id == space_info->tickets_id) {
1069             flush_state++;
1070         } else {
1071             last_tickets_id = space_info->tickets_id;
1072             flush_state = FLUSH_DELAYED_ITEMS_NR;
1073             if (commit_cycles)
1074                 commit_cycles--;
1075         }
1076 
1077         /*
1078          * We do not want to empty the system of delalloc unless we're
1079          * under heavy pressure, so allow one trip through the flushing
1080          * logic before we start doing a FLUSH_DELALLOC_FULL.
1081          */
1082         if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1083             flush_state++;
1084 
1085         /*
1086          * We don't want to force a chunk allocation until we've tried
1087          * pretty hard to reclaim space.  Think of the case where we
1088          * freed up a bunch of space and so have a lot of pinned space
1089          * to reclaim.  We would rather use that than possibly create a
1090          * underutilized metadata chunk.  So if this is our first run
1091          * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1092          * commit the transaction.  If nothing has changed the next go
1093          * around then we can force a chunk allocation.
1094          */
1095         if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1096             flush_state++;
1097 
1098         if (flush_state > COMMIT_TRANS) {
1099             commit_cycles++;
1100             if (commit_cycles > 2) {
1101                 if (maybe_fail_all_tickets(fs_info, space_info)) {
1102                     flush_state = FLUSH_DELAYED_ITEMS_NR;
1103                     commit_cycles--;
1104                 } else {
1105                     space_info->flush = 0;
1106                 }
1107             } else {
1108                 flush_state = FLUSH_DELAYED_ITEMS_NR;
1109             }
1110         }
1111         spin_unlock(&space_info->lock);
1112     } while (flush_state <= COMMIT_TRANS);
1113 }
1114 
1115 /*
1116  * This handles pre-flushing of metadata space before we get to the point that
1117  * we need to start blocking threads on tickets.  The logic here is different
1118  * from the other flush paths because it doesn't rely on tickets to tell us how
1119  * much we need to flush, instead it attempts to keep us below the 80% full
1120  * watermark of space by flushing whichever reservation pool is currently the
1121  * largest.
1122  */
1123 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1124 {
1125     struct btrfs_fs_info *fs_info;
1126     struct btrfs_space_info *space_info;
1127     struct btrfs_block_rsv *delayed_block_rsv;
1128     struct btrfs_block_rsv *delayed_refs_rsv;
1129     struct btrfs_block_rsv *global_rsv;
1130     struct btrfs_block_rsv *trans_rsv;
1131     int loops = 0;
1132 
1133     fs_info = container_of(work, struct btrfs_fs_info,
1134                    preempt_reclaim_work);
1135     space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1136     delayed_block_rsv = &fs_info->delayed_block_rsv;
1137     delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1138     global_rsv = &fs_info->global_block_rsv;
1139     trans_rsv = &fs_info->trans_block_rsv;
1140 
1141     spin_lock(&space_info->lock);
1142     while (need_preemptive_reclaim(fs_info, space_info)) {
1143         enum btrfs_flush_state flush;
1144         u64 delalloc_size = 0;
1145         u64 to_reclaim, block_rsv_size;
1146         u64 global_rsv_size = global_rsv->reserved;
1147 
1148         loops++;
1149 
1150         /*
1151          * We don't have a precise counter for the metadata being
1152          * reserved for delalloc, so we'll approximate it by subtracting
1153          * out the block rsv's space from the bytes_may_use.  If that
1154          * amount is higher than the individual reserves, then we can
1155          * assume it's tied up in delalloc reservations.
1156          */
1157         block_rsv_size = global_rsv_size +
1158             delayed_block_rsv->reserved +
1159             delayed_refs_rsv->reserved +
1160             trans_rsv->reserved;
1161         if (block_rsv_size < space_info->bytes_may_use)
1162             delalloc_size = space_info->bytes_may_use - block_rsv_size;
1163 
1164         /*
1165          * We don't want to include the global_rsv in our calculation,
1166          * because that's space we can't touch.  Subtract it from the
1167          * block_rsv_size for the next checks.
1168          */
1169         block_rsv_size -= global_rsv_size;
1170 
1171         /*
1172          * We really want to avoid flushing delalloc too much, as it
1173          * could result in poor allocation patterns, so only flush it if
1174          * it's larger than the rest of the pools combined.
1175          */
1176         if (delalloc_size > block_rsv_size) {
1177             to_reclaim = delalloc_size;
1178             flush = FLUSH_DELALLOC;
1179         } else if (space_info->bytes_pinned >
1180                (delayed_block_rsv->reserved +
1181                 delayed_refs_rsv->reserved)) {
1182             to_reclaim = space_info->bytes_pinned;
1183             flush = COMMIT_TRANS;
1184         } else if (delayed_block_rsv->reserved >
1185                delayed_refs_rsv->reserved) {
1186             to_reclaim = delayed_block_rsv->reserved;
1187             flush = FLUSH_DELAYED_ITEMS_NR;
1188         } else {
1189             to_reclaim = delayed_refs_rsv->reserved;
1190             flush = FLUSH_DELAYED_REFS_NR;
1191         }
1192 
1193         spin_unlock(&space_info->lock);
1194 
1195         /*
1196          * We don't want to reclaim everything, just a portion, so scale
1197          * down the to_reclaim by 1/4.  If it takes us down to 0,
1198          * reclaim 1 items worth.
1199          */
1200         to_reclaim >>= 2;
1201         if (!to_reclaim)
1202             to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1203         flush_space(fs_info, space_info, to_reclaim, flush, true);
1204         cond_resched();
1205         spin_lock(&space_info->lock);
1206     }
1207 
1208     /* We only went through once, back off our clamping. */
1209     if (loops == 1 && !space_info->reclaim_size)
1210         space_info->clamp = max(1, space_info->clamp - 1);
1211     trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1212     spin_unlock(&space_info->lock);
1213 }
1214 
1215 /*
1216  * FLUSH_DELALLOC_WAIT:
1217  *   Space is freed from flushing delalloc in one of two ways.
1218  *
1219  *   1) compression is on and we allocate less space than we reserved
1220  *   2) we are overwriting existing space
1221  *
1222  *   For #1 that extra space is reclaimed as soon as the delalloc pages are
1223  *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1224  *   length to ->bytes_reserved, and subtracts the reserved space from
1225  *   ->bytes_may_use.
1226  *
1227  *   For #2 this is trickier.  Once the ordered extent runs we will drop the
1228  *   extent in the range we are overwriting, which creates a delayed ref for
1229  *   that freed extent.  This however is not reclaimed until the transaction
1230  *   commits, thus the next stages.
1231  *
1232  * RUN_DELAYED_IPUTS
1233  *   If we are freeing inodes, we want to make sure all delayed iputs have
1234  *   completed, because they could have been on an inode with i_nlink == 0, and
1235  *   thus have been truncated and freed up space.  But again this space is not
1236  *   immediately re-usable, it comes in the form of a delayed ref, which must be
1237  *   run and then the transaction must be committed.
1238  *
1239  * COMMIT_TRANS
1240  *   This is where we reclaim all of the pinned space generated by running the
1241  *   iputs
1242  *
1243  * ALLOC_CHUNK_FORCE
1244  *   For data we start with alloc chunk force, however we could have been full
1245  *   before, and then the transaction commit could have freed new block groups,
1246  *   so if we now have space to allocate do the force chunk allocation.
1247  */
1248 static const enum btrfs_flush_state data_flush_states[] = {
1249     FLUSH_DELALLOC_FULL,
1250     RUN_DELAYED_IPUTS,
1251     COMMIT_TRANS,
1252     ALLOC_CHUNK_FORCE,
1253 };
1254 
1255 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1256 {
1257     struct btrfs_fs_info *fs_info;
1258     struct btrfs_space_info *space_info;
1259     u64 last_tickets_id;
1260     enum btrfs_flush_state flush_state = 0;
1261 
1262     fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1263     space_info = fs_info->data_sinfo;
1264 
1265     spin_lock(&space_info->lock);
1266     if (list_empty(&space_info->tickets)) {
1267         space_info->flush = 0;
1268         spin_unlock(&space_info->lock);
1269         return;
1270     }
1271     last_tickets_id = space_info->tickets_id;
1272     spin_unlock(&space_info->lock);
1273 
1274     while (!space_info->full) {
1275         flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1276         spin_lock(&space_info->lock);
1277         if (list_empty(&space_info->tickets)) {
1278             space_info->flush = 0;
1279             spin_unlock(&space_info->lock);
1280             return;
1281         }
1282 
1283         /* Something happened, fail everything and bail. */
1284         if (BTRFS_FS_ERROR(fs_info))
1285             goto aborted_fs;
1286         last_tickets_id = space_info->tickets_id;
1287         spin_unlock(&space_info->lock);
1288     }
1289 
1290     while (flush_state < ARRAY_SIZE(data_flush_states)) {
1291         flush_space(fs_info, space_info, U64_MAX,
1292                 data_flush_states[flush_state], false);
1293         spin_lock(&space_info->lock);
1294         if (list_empty(&space_info->tickets)) {
1295             space_info->flush = 0;
1296             spin_unlock(&space_info->lock);
1297             return;
1298         }
1299 
1300         if (last_tickets_id == space_info->tickets_id) {
1301             flush_state++;
1302         } else {
1303             last_tickets_id = space_info->tickets_id;
1304             flush_state = 0;
1305         }
1306 
1307         if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1308             if (space_info->full) {
1309                 if (maybe_fail_all_tickets(fs_info, space_info))
1310                     flush_state = 0;
1311                 else
1312                     space_info->flush = 0;
1313             } else {
1314                 flush_state = 0;
1315             }
1316 
1317             /* Something happened, fail everything and bail. */
1318             if (BTRFS_FS_ERROR(fs_info))
1319                 goto aborted_fs;
1320 
1321         }
1322         spin_unlock(&space_info->lock);
1323     }
1324     return;
1325 
1326 aborted_fs:
1327     maybe_fail_all_tickets(fs_info, space_info);
1328     space_info->flush = 0;
1329     spin_unlock(&space_info->lock);
1330 }
1331 
1332 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1333 {
1334     INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1335     INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1336     INIT_WORK(&fs_info->preempt_reclaim_work,
1337           btrfs_preempt_reclaim_metadata_space);
1338 }
1339 
1340 static const enum btrfs_flush_state priority_flush_states[] = {
1341     FLUSH_DELAYED_ITEMS_NR,
1342     FLUSH_DELAYED_ITEMS,
1343     ALLOC_CHUNK,
1344 };
1345 
1346 static const enum btrfs_flush_state evict_flush_states[] = {
1347     FLUSH_DELAYED_ITEMS_NR,
1348     FLUSH_DELAYED_ITEMS,
1349     FLUSH_DELAYED_REFS_NR,
1350     FLUSH_DELAYED_REFS,
1351     FLUSH_DELALLOC,
1352     FLUSH_DELALLOC_WAIT,
1353     FLUSH_DELALLOC_FULL,
1354     ALLOC_CHUNK,
1355     COMMIT_TRANS,
1356 };
1357 
1358 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1359                 struct btrfs_space_info *space_info,
1360                 struct reserve_ticket *ticket,
1361                 const enum btrfs_flush_state *states,
1362                 int states_nr)
1363 {
1364     u64 to_reclaim;
1365     int flush_state = 0;
1366 
1367     spin_lock(&space_info->lock);
1368     to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1369     /*
1370      * This is the priority reclaim path, so to_reclaim could be >0 still
1371      * because we may have only satisfied the priority tickets and still
1372      * left non priority tickets on the list.  We would then have
1373      * to_reclaim but ->bytes == 0.
1374      */
1375     if (ticket->bytes == 0) {
1376         spin_unlock(&space_info->lock);
1377         return;
1378     }
1379 
1380     while (flush_state < states_nr) {
1381         spin_unlock(&space_info->lock);
1382         flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1383                 false);
1384         flush_state++;
1385         spin_lock(&space_info->lock);
1386         if (ticket->bytes == 0) {
1387             spin_unlock(&space_info->lock);
1388             return;
1389         }
1390     }
1391 
1392     /* Attempt to steal from the global rsv if we can. */
1393     if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1394         ticket->error = -ENOSPC;
1395         remove_ticket(space_info, ticket);
1396     }
1397 
1398     /*
1399      * We must run try_granting_tickets here because we could be a large
1400      * ticket in front of a smaller ticket that can now be satisfied with
1401      * the available space.
1402      */
1403     btrfs_try_granting_tickets(fs_info, space_info);
1404     spin_unlock(&space_info->lock);
1405 }
1406 
1407 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1408                     struct btrfs_space_info *space_info,
1409                     struct reserve_ticket *ticket)
1410 {
1411     spin_lock(&space_info->lock);
1412 
1413     /* We could have been granted before we got here. */
1414     if (ticket->bytes == 0) {
1415         spin_unlock(&space_info->lock);
1416         return;
1417     }
1418 
1419     while (!space_info->full) {
1420         spin_unlock(&space_info->lock);
1421         flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1422         spin_lock(&space_info->lock);
1423         if (ticket->bytes == 0) {
1424             spin_unlock(&space_info->lock);
1425             return;
1426         }
1427     }
1428 
1429     ticket->error = -ENOSPC;
1430     remove_ticket(space_info, ticket);
1431     btrfs_try_granting_tickets(fs_info, space_info);
1432     spin_unlock(&space_info->lock);
1433 }
1434 
1435 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1436                 struct btrfs_space_info *space_info,
1437                 struct reserve_ticket *ticket)
1438 
1439 {
1440     DEFINE_WAIT(wait);
1441     int ret = 0;
1442 
1443     spin_lock(&space_info->lock);
1444     while (ticket->bytes > 0 && ticket->error == 0) {
1445         ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1446         if (ret) {
1447             /*
1448              * Delete us from the list. After we unlock the space
1449              * info, we don't want the async reclaim job to reserve
1450              * space for this ticket. If that would happen, then the
1451              * ticket's task would not known that space was reserved
1452              * despite getting an error, resulting in a space leak
1453              * (bytes_may_use counter of our space_info).
1454              */
1455             remove_ticket(space_info, ticket);
1456             ticket->error = -EINTR;
1457             break;
1458         }
1459         spin_unlock(&space_info->lock);
1460 
1461         schedule();
1462 
1463         finish_wait(&ticket->wait, &wait);
1464         spin_lock(&space_info->lock);
1465     }
1466     spin_unlock(&space_info->lock);
1467 }
1468 
1469 /**
1470  * Do the appropriate flushing and waiting for a ticket
1471  *
1472  * @fs_info:    the filesystem
1473  * @space_info: space info for the reservation
1474  * @ticket:     ticket for the reservation
1475  * @start_ns:   timestamp when the reservation started
1476  * @orig_bytes: amount of bytes originally reserved
1477  * @flush:      how much we can flush
1478  *
1479  * This does the work of figuring out how to flush for the ticket, waiting for
1480  * the reservation, and returning the appropriate error if there is one.
1481  */
1482 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1483                  struct btrfs_space_info *space_info,
1484                  struct reserve_ticket *ticket,
1485                  u64 start_ns, u64 orig_bytes,
1486                  enum btrfs_reserve_flush_enum flush)
1487 {
1488     int ret;
1489 
1490     switch (flush) {
1491     case BTRFS_RESERVE_FLUSH_DATA:
1492     case BTRFS_RESERVE_FLUSH_ALL:
1493     case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1494         wait_reserve_ticket(fs_info, space_info, ticket);
1495         break;
1496     case BTRFS_RESERVE_FLUSH_LIMIT:
1497         priority_reclaim_metadata_space(fs_info, space_info, ticket,
1498                         priority_flush_states,
1499                         ARRAY_SIZE(priority_flush_states));
1500         break;
1501     case BTRFS_RESERVE_FLUSH_EVICT:
1502         priority_reclaim_metadata_space(fs_info, space_info, ticket,
1503                         evict_flush_states,
1504                         ARRAY_SIZE(evict_flush_states));
1505         break;
1506     case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1507         priority_reclaim_data_space(fs_info, space_info, ticket);
1508         break;
1509     default:
1510         ASSERT(0);
1511         break;
1512     }
1513 
1514     ret = ticket->error;
1515     ASSERT(list_empty(&ticket->list));
1516     /*
1517      * Check that we can't have an error set if the reservation succeeded,
1518      * as that would confuse tasks and lead them to error out without
1519      * releasing reserved space (if an error happens the expectation is that
1520      * space wasn't reserved at all).
1521      */
1522     ASSERT(!(ticket->bytes == 0 && ticket->error));
1523     trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1524                    start_ns, flush, ticket->error);
1525     return ret;
1526 }
1527 
1528 /*
1529  * This returns true if this flush state will go through the ordinary flushing
1530  * code.
1531  */
1532 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1533 {
1534     return  (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1535         (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1536 }
1537 
1538 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1539                        struct btrfs_space_info *space_info)
1540 {
1541     u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1542     u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1543 
1544     /*
1545      * If we're heavy on ordered operations then clamping won't help us.  We
1546      * need to clamp specifically to keep up with dirty'ing buffered
1547      * writers, because there's not a 1:1 correlation of writing delalloc
1548      * and freeing space, like there is with flushing delayed refs or
1549      * delayed nodes.  If we're already more ordered than delalloc then
1550      * we're keeping up, otherwise we aren't and should probably clamp.
1551      */
1552     if (ordered < delalloc)
1553         space_info->clamp = min(space_info->clamp + 1, 8);
1554 }
1555 
1556 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1557 {
1558     return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1559         flush == BTRFS_RESERVE_FLUSH_EVICT);
1560 }
1561 
1562 /**
1563  * Try to reserve bytes from the block_rsv's space
1564  *
1565  * @fs_info:    the filesystem
1566  * @space_info: space info we want to allocate from
1567  * @orig_bytes: number of bytes we want
1568  * @flush:      whether or not we can flush to make our reservation
1569  *
1570  * This will reserve orig_bytes number of bytes from the space info associated
1571  * with the block_rsv.  If there is not enough space it will make an attempt to
1572  * flush out space to make room.  It will do this by flushing delalloc if
1573  * possible or committing the transaction.  If flush is 0 then no attempts to
1574  * regain reservations will be made and this will fail if there is not enough
1575  * space already.
1576  */
1577 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1578                struct btrfs_space_info *space_info, u64 orig_bytes,
1579                enum btrfs_reserve_flush_enum flush)
1580 {
1581     struct work_struct *async_work;
1582     struct reserve_ticket ticket;
1583     u64 start_ns = 0;
1584     u64 used;
1585     int ret = 0;
1586     bool pending_tickets;
1587 
1588     ASSERT(orig_bytes);
1589     ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1590 
1591     if (flush == BTRFS_RESERVE_FLUSH_DATA)
1592         async_work = &fs_info->async_data_reclaim_work;
1593     else
1594         async_work = &fs_info->async_reclaim_work;
1595 
1596     spin_lock(&space_info->lock);
1597     ret = -ENOSPC;
1598     used = btrfs_space_info_used(space_info, true);
1599 
1600     /*
1601      * We don't want NO_FLUSH allocations to jump everybody, they can
1602      * generally handle ENOSPC in a different way, so treat them the same as
1603      * normal flushers when it comes to skipping pending tickets.
1604      */
1605     if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1606         pending_tickets = !list_empty(&space_info->tickets) ||
1607             !list_empty(&space_info->priority_tickets);
1608     else
1609         pending_tickets = !list_empty(&space_info->priority_tickets);
1610 
1611     /*
1612      * Carry on if we have enough space (short-circuit) OR call
1613      * can_overcommit() to ensure we can overcommit to continue.
1614      */
1615     if (!pending_tickets &&
1616         ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1617          btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1618         btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1619                               orig_bytes);
1620         ret = 0;
1621     }
1622 
1623     /*
1624      * If we couldn't make a reservation then setup our reservation ticket
1625      * and kick the async worker if it's not already running.
1626      *
1627      * If we are a priority flusher then we just need to add our ticket to
1628      * the list and we will do our own flushing further down.
1629      */
1630     if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1631         ticket.bytes = orig_bytes;
1632         ticket.error = 0;
1633         space_info->reclaim_size += ticket.bytes;
1634         init_waitqueue_head(&ticket.wait);
1635         ticket.steal = can_steal(flush);
1636         if (trace_btrfs_reserve_ticket_enabled())
1637             start_ns = ktime_get_ns();
1638 
1639         if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1640             flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1641             flush == BTRFS_RESERVE_FLUSH_DATA) {
1642             list_add_tail(&ticket.list, &space_info->tickets);
1643             if (!space_info->flush) {
1644                 /*
1645                  * We were forced to add a reserve ticket, so
1646                  * our preemptive flushing is unable to keep
1647                  * up.  Clamp down on the threshold for the
1648                  * preemptive flushing in order to keep up with
1649                  * the workload.
1650                  */
1651                 maybe_clamp_preempt(fs_info, space_info);
1652 
1653                 space_info->flush = 1;
1654                 trace_btrfs_trigger_flush(fs_info,
1655                               space_info->flags,
1656                               orig_bytes, flush,
1657                               "enospc");
1658                 queue_work(system_unbound_wq, async_work);
1659             }
1660         } else {
1661             list_add_tail(&ticket.list,
1662                       &space_info->priority_tickets);
1663         }
1664     } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1665         used += orig_bytes;
1666         /*
1667          * We will do the space reservation dance during log replay,
1668          * which means we won't have fs_info->fs_root set, so don't do
1669          * the async reclaim as we will panic.
1670          */
1671         if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1672             !work_busy(&fs_info->preempt_reclaim_work) &&
1673             need_preemptive_reclaim(fs_info, space_info)) {
1674             trace_btrfs_trigger_flush(fs_info, space_info->flags,
1675                           orig_bytes, flush, "preempt");
1676             queue_work(system_unbound_wq,
1677                    &fs_info->preempt_reclaim_work);
1678         }
1679     }
1680     spin_unlock(&space_info->lock);
1681     if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1682         return ret;
1683 
1684     return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1685                      orig_bytes, flush);
1686 }
1687 
1688 /**
1689  * Trye to reserve metadata bytes from the block_rsv's space
1690  *
1691  * @fs_info:    the filesystem
1692  * @block_rsv:  block_rsv we're allocating for
1693  * @orig_bytes: number of bytes we want
1694  * @flush:      whether or not we can flush to make our reservation
1695  *
1696  * This will reserve orig_bytes number of bytes from the space info associated
1697  * with the block_rsv.  If there is not enough space it will make an attempt to
1698  * flush out space to make room.  It will do this by flushing delalloc if
1699  * possible or committing the transaction.  If flush is 0 then no attempts to
1700  * regain reservations will be made and this will fail if there is not enough
1701  * space already.
1702  */
1703 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1704                  struct btrfs_block_rsv *block_rsv,
1705                  u64 orig_bytes,
1706                  enum btrfs_reserve_flush_enum flush)
1707 {
1708     int ret;
1709 
1710     ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1711     if (ret == -ENOSPC) {
1712         trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1713                           block_rsv->space_info->flags,
1714                           orig_bytes, 1);
1715 
1716         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1717             btrfs_dump_space_info(fs_info, block_rsv->space_info,
1718                           orig_bytes, 0);
1719     }
1720     return ret;
1721 }
1722 
1723 /**
1724  * Try to reserve data bytes for an allocation
1725  *
1726  * @fs_info: the filesystem
1727  * @bytes:   number of bytes we need
1728  * @flush:   how we are allowed to flush
1729  *
1730  * This will reserve bytes from the data space info.  If there is not enough
1731  * space then we will attempt to flush space as specified by flush.
1732  */
1733 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1734                  enum btrfs_reserve_flush_enum flush)
1735 {
1736     struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1737     int ret;
1738 
1739     ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1740            flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1741     ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1742 
1743     ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1744     if (ret == -ENOSPC) {
1745         trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1746                           data_sinfo->flags, bytes, 1);
1747         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1748             btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1749     }
1750     return ret;
1751 }