Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * background writeback - scan btree for dirty data and write it to the backing
0004  * device
0005  *
0006  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
0007  * Copyright 2012 Google, Inc.
0008  */
0009 
0010 #include "bcache.h"
0011 #include "btree.h"
0012 #include "debug.h"
0013 #include "writeback.h"
0014 
0015 #include <linux/delay.h>
0016 #include <linux/kthread.h>
0017 #include <linux/sched/clock.h>
0018 #include <trace/events/bcache.h>
0019 
0020 static void update_gc_after_writeback(struct cache_set *c)
0021 {
0022     if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
0023         c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
0024         return;
0025 
0026     c->gc_after_writeback |= BCH_DO_AUTO_GC;
0027 }
0028 
0029 /* Rate limiting */
0030 static uint64_t __calc_target_rate(struct cached_dev *dc)
0031 {
0032     struct cache_set *c = dc->disk.c;
0033 
0034     /*
0035      * This is the size of the cache, minus the amount used for
0036      * flash-only devices
0037      */
0038     uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
0039                 atomic_long_read(&c->flash_dev_dirty_sectors);
0040 
0041     /*
0042      * Unfortunately there is no control of global dirty data.  If the
0043      * user states that they want 10% dirty data in the cache, and has,
0044      * e.g., 5 backing volumes of equal size, we try and ensure each
0045      * backing volume uses about 2% of the cache for dirty data.
0046      */
0047     uint32_t bdev_share =
0048         div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
0049                 c->cached_dev_sectors);
0050 
0051     uint64_t cache_dirty_target =
0052         div_u64(cache_sectors * dc->writeback_percent, 100);
0053 
0054     /* Ensure each backing dev gets at least one dirty share */
0055     if (bdev_share < 1)
0056         bdev_share = 1;
0057 
0058     return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
0059 }
0060 
0061 static void __update_writeback_rate(struct cached_dev *dc)
0062 {
0063     /*
0064      * PI controller:
0065      * Figures out the amount that should be written per second.
0066      *
0067      * First, the error (number of sectors that are dirty beyond our
0068      * target) is calculated.  The error is accumulated (numerically
0069      * integrated).
0070      *
0071      * Then, the proportional value and integral value are scaled
0072      * based on configured values.  These are stored as inverses to
0073      * avoid fixed point math and to make configuration easy-- e.g.
0074      * the default value of 40 for writeback_rate_p_term_inverse
0075      * attempts to write at a rate that would retire all the dirty
0076      * blocks in 40 seconds.
0077      *
0078      * The writeback_rate_i_inverse value of 10000 means that 1/10000th
0079      * of the error is accumulated in the integral term per second.
0080      * This acts as a slow, long-term average that is not subject to
0081      * variations in usage like the p term.
0082      */
0083     int64_t target = __calc_target_rate(dc);
0084     int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
0085     int64_t error = dirty - target;
0086     int64_t proportional_scaled =
0087         div_s64(error, dc->writeback_rate_p_term_inverse);
0088     int64_t integral_scaled;
0089     uint32_t new_rate;
0090 
0091     /*
0092      * We need to consider the number of dirty buckets as well
0093      * when calculating the proportional_scaled, Otherwise we might
0094      * have an unreasonable small writeback rate at a highly fragmented situation
0095      * when very few dirty sectors consumed a lot dirty buckets, the
0096      * worst case is when dirty buckets reached cutoff_writeback_sync and
0097      * dirty data is still not even reached to writeback percent, so the rate
0098      * still will be at the minimum value, which will cause the write
0099      * stuck at a non-writeback mode.
0100      */
0101     struct cache_set *c = dc->disk.c;
0102 
0103     int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
0104 
0105     if (dc->writeback_consider_fragment &&
0106         c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
0107         int64_t fragment =
0108             div_s64((dirty_buckets *  c->cache->sb.bucket_size), dirty);
0109         int64_t fp_term;
0110         int64_t fps;
0111 
0112         if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
0113             fp_term = (int64_t)dc->writeback_rate_fp_term_low *
0114             (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
0115         } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
0116             fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
0117             (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
0118         } else {
0119             fp_term = (int64_t)dc->writeback_rate_fp_term_high *
0120             (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
0121         }
0122         fps = div_s64(dirty, dirty_buckets) * fp_term;
0123         if (fragment > 3 && fps > proportional_scaled) {
0124             /* Only overrite the p when fragment > 3 */
0125             proportional_scaled = fps;
0126         }
0127     }
0128 
0129     if ((error < 0 && dc->writeback_rate_integral > 0) ||
0130         (error > 0 && time_before64(local_clock(),
0131              dc->writeback_rate.next + NSEC_PER_MSEC))) {
0132         /*
0133          * Only decrease the integral term if it's more than
0134          * zero.  Only increase the integral term if the device
0135          * is keeping up.  (Don't wind up the integral
0136          * ineffectively in either case).
0137          *
0138          * It's necessary to scale this by
0139          * writeback_rate_update_seconds to keep the integral
0140          * term dimensioned properly.
0141          */
0142         dc->writeback_rate_integral += error *
0143             dc->writeback_rate_update_seconds;
0144     }
0145 
0146     integral_scaled = div_s64(dc->writeback_rate_integral,
0147             dc->writeback_rate_i_term_inverse);
0148 
0149     new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
0150             dc->writeback_rate_minimum, NSEC_PER_SEC);
0151 
0152     dc->writeback_rate_proportional = proportional_scaled;
0153     dc->writeback_rate_integral_scaled = integral_scaled;
0154     dc->writeback_rate_change = new_rate -
0155             atomic_long_read(&dc->writeback_rate.rate);
0156     atomic_long_set(&dc->writeback_rate.rate, new_rate);
0157     dc->writeback_rate_target = target;
0158 }
0159 
0160 static bool set_at_max_writeback_rate(struct cache_set *c,
0161                        struct cached_dev *dc)
0162 {
0163     /* Don't sst max writeback rate if it is disabled */
0164     if (!c->idle_max_writeback_rate_enabled)
0165         return false;
0166 
0167     /* Don't set max writeback rate if gc is running */
0168     if (!c->gc_mark_valid)
0169         return false;
0170     /*
0171      * Idle_counter is increased everytime when update_writeback_rate() is
0172      * called. If all backing devices attached to the same cache set have
0173      * identical dc->writeback_rate_update_seconds values, it is about 6
0174      * rounds of update_writeback_rate() on each backing device before
0175      * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
0176      * to each dc->writeback_rate.rate.
0177      * In order to avoid extra locking cost for counting exact dirty cached
0178      * devices number, c->attached_dev_nr is used to calculate the idle
0179      * throushold. It might be bigger if not all cached device are in write-
0180      * back mode, but it still works well with limited extra rounds of
0181      * update_writeback_rate().
0182      */
0183     if (atomic_inc_return(&c->idle_counter) <
0184         atomic_read(&c->attached_dev_nr) * 6)
0185         return false;
0186 
0187     if (atomic_read(&c->at_max_writeback_rate) != 1)
0188         atomic_set(&c->at_max_writeback_rate, 1);
0189 
0190     atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
0191 
0192     /* keep writeback_rate_target as existing value */
0193     dc->writeback_rate_proportional = 0;
0194     dc->writeback_rate_integral_scaled = 0;
0195     dc->writeback_rate_change = 0;
0196 
0197     /*
0198      * Check c->idle_counter and c->at_max_writeback_rate agagain in case
0199      * new I/O arrives during before set_at_max_writeback_rate() returns.
0200      * Then the writeback rate is set to 1, and its new value should be
0201      * decided via __update_writeback_rate().
0202      */
0203     if ((atomic_read(&c->idle_counter) <
0204          atomic_read(&c->attached_dev_nr) * 6) ||
0205         !atomic_read(&c->at_max_writeback_rate))
0206         return false;
0207 
0208     return true;
0209 }
0210 
0211 static void update_writeback_rate(struct work_struct *work)
0212 {
0213     struct cached_dev *dc = container_of(to_delayed_work(work),
0214                          struct cached_dev,
0215                          writeback_rate_update);
0216     struct cache_set *c = dc->disk.c;
0217 
0218     /*
0219      * should check BCACHE_DEV_RATE_DW_RUNNING before calling
0220      * cancel_delayed_work_sync().
0221      */
0222     set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
0223     /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
0224     smp_mb__after_atomic();
0225 
0226     /*
0227      * CACHE_SET_IO_DISABLE might be set via sysfs interface,
0228      * check it here too.
0229      */
0230     if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
0231         test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
0232         clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
0233         /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
0234         smp_mb__after_atomic();
0235         return;
0236     }
0237 
0238     /*
0239      * If the whole cache set is idle, set_at_max_writeback_rate()
0240      * will set writeback rate to a max number. Then it is
0241      * unncessary to update writeback rate for an idle cache set
0242      * in maximum writeback rate number(s).
0243      */
0244     if (atomic_read(&dc->has_dirty) && dc->writeback_percent &&
0245         !set_at_max_writeback_rate(c, dc)) {
0246         do {
0247             if (!down_read_trylock((&dc->writeback_lock))) {
0248                 dc->rate_update_retry++;
0249                 if (dc->rate_update_retry <=
0250                     BCH_WBRATE_UPDATE_MAX_SKIPS)
0251                     break;
0252                 down_read(&dc->writeback_lock);
0253                 dc->rate_update_retry = 0;
0254             }
0255             __update_writeback_rate(dc);
0256             update_gc_after_writeback(c);
0257             up_read(&dc->writeback_lock);
0258         } while (0);
0259     }
0260 
0261 
0262     /*
0263      * CACHE_SET_IO_DISABLE might be set via sysfs interface,
0264      * check it here too.
0265      */
0266     if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
0267         !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
0268         schedule_delayed_work(&dc->writeback_rate_update,
0269                   dc->writeback_rate_update_seconds * HZ);
0270     }
0271 
0272     /*
0273      * should check BCACHE_DEV_RATE_DW_RUNNING before calling
0274      * cancel_delayed_work_sync().
0275      */
0276     clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
0277     /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
0278     smp_mb__after_atomic();
0279 }
0280 
0281 static unsigned int writeback_delay(struct cached_dev *dc,
0282                     unsigned int sectors)
0283 {
0284     if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
0285         !dc->writeback_percent)
0286         return 0;
0287 
0288     return bch_next_delay(&dc->writeback_rate, sectors);
0289 }
0290 
0291 struct dirty_io {
0292     struct closure      cl;
0293     struct cached_dev   *dc;
0294     uint16_t        sequence;
0295     struct bio      bio;
0296 };
0297 
0298 static void dirty_init(struct keybuf_key *w)
0299 {
0300     struct dirty_io *io = w->private;
0301     struct bio *bio = &io->bio;
0302 
0303     bio_init(bio, NULL, bio->bi_inline_vecs,
0304          DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
0305     if (!io->dc->writeback_percent)
0306         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
0307 
0308     bio->bi_iter.bi_size    = KEY_SIZE(&w->key) << 9;
0309     bio->bi_private     = w;
0310     bch_bio_map(bio, NULL);
0311 }
0312 
0313 static void dirty_io_destructor(struct closure *cl)
0314 {
0315     struct dirty_io *io = container_of(cl, struct dirty_io, cl);
0316 
0317     kfree(io);
0318 }
0319 
0320 static void write_dirty_finish(struct closure *cl)
0321 {
0322     struct dirty_io *io = container_of(cl, struct dirty_io, cl);
0323     struct keybuf_key *w = io->bio.bi_private;
0324     struct cached_dev *dc = io->dc;
0325 
0326     bio_free_pages(&io->bio);
0327 
0328     /* This is kind of a dumb way of signalling errors. */
0329     if (KEY_DIRTY(&w->key)) {
0330         int ret;
0331         unsigned int i;
0332         struct keylist keys;
0333 
0334         bch_keylist_init(&keys);
0335 
0336         bkey_copy(keys.top, &w->key);
0337         SET_KEY_DIRTY(keys.top, false);
0338         bch_keylist_push(&keys);
0339 
0340         for (i = 0; i < KEY_PTRS(&w->key); i++)
0341             atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
0342 
0343         ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
0344 
0345         if (ret)
0346             trace_bcache_writeback_collision(&w->key);
0347 
0348         atomic_long_inc(ret
0349                 ? &dc->disk.c->writeback_keys_failed
0350                 : &dc->disk.c->writeback_keys_done);
0351     }
0352 
0353     bch_keybuf_del(&dc->writeback_keys, w);
0354     up(&dc->in_flight);
0355 
0356     closure_return_with_destructor(cl, dirty_io_destructor);
0357 }
0358 
0359 static void dirty_endio(struct bio *bio)
0360 {
0361     struct keybuf_key *w = bio->bi_private;
0362     struct dirty_io *io = w->private;
0363 
0364     if (bio->bi_status) {
0365         SET_KEY_DIRTY(&w->key, false);
0366         bch_count_backing_io_errors(io->dc, bio);
0367     }
0368 
0369     closure_put(&io->cl);
0370 }
0371 
0372 static void write_dirty(struct closure *cl)
0373 {
0374     struct dirty_io *io = container_of(cl, struct dirty_io, cl);
0375     struct keybuf_key *w = io->bio.bi_private;
0376     struct cached_dev *dc = io->dc;
0377 
0378     uint16_t next_sequence;
0379 
0380     if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
0381         /* Not our turn to write; wait for a write to complete */
0382         closure_wait(&dc->writeback_ordering_wait, cl);
0383 
0384         if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
0385             /*
0386              * Edge case-- it happened in indeterminate order
0387              * relative to when we were added to wait list..
0388              */
0389             closure_wake_up(&dc->writeback_ordering_wait);
0390         }
0391 
0392         continue_at(cl, write_dirty, io->dc->writeback_write_wq);
0393         return;
0394     }
0395 
0396     next_sequence = io->sequence + 1;
0397 
0398     /*
0399      * IO errors are signalled using the dirty bit on the key.
0400      * If we failed to read, we should not attempt to write to the
0401      * backing device.  Instead, immediately go to write_dirty_finish
0402      * to clean up.
0403      */
0404     if (KEY_DIRTY(&w->key)) {
0405         dirty_init(w);
0406         bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
0407         io->bio.bi_iter.bi_sector = KEY_START(&w->key);
0408         bio_set_dev(&io->bio, io->dc->bdev);
0409         io->bio.bi_end_io   = dirty_endio;
0410 
0411         /* I/O request sent to backing device */
0412         closure_bio_submit(io->dc->disk.c, &io->bio, cl);
0413     }
0414 
0415     atomic_set(&dc->writeback_sequence_next, next_sequence);
0416     closure_wake_up(&dc->writeback_ordering_wait);
0417 
0418     continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
0419 }
0420 
0421 static void read_dirty_endio(struct bio *bio)
0422 {
0423     struct keybuf_key *w = bio->bi_private;
0424     struct dirty_io *io = w->private;
0425 
0426     /* is_read = 1 */
0427     bch_count_io_errors(io->dc->disk.c->cache,
0428                 bio->bi_status, 1,
0429                 "reading dirty data from cache");
0430 
0431     dirty_endio(bio);
0432 }
0433 
0434 static void read_dirty_submit(struct closure *cl)
0435 {
0436     struct dirty_io *io = container_of(cl, struct dirty_io, cl);
0437 
0438     closure_bio_submit(io->dc->disk.c, &io->bio, cl);
0439 
0440     continue_at(cl, write_dirty, io->dc->writeback_write_wq);
0441 }
0442 
0443 static void read_dirty(struct cached_dev *dc)
0444 {
0445     unsigned int delay = 0;
0446     struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
0447     size_t size;
0448     int nk, i;
0449     struct dirty_io *io;
0450     struct closure cl;
0451     uint16_t sequence = 0;
0452 
0453     BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
0454     atomic_set(&dc->writeback_sequence_next, sequence);
0455     closure_init_stack(&cl);
0456 
0457     /*
0458      * XXX: if we error, background writeback just spins. Should use some
0459      * mempools.
0460      */
0461 
0462     next = bch_keybuf_next(&dc->writeback_keys);
0463 
0464     while (!kthread_should_stop() &&
0465            !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
0466            next) {
0467         size = 0;
0468         nk = 0;
0469 
0470         do {
0471             BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
0472 
0473             /*
0474              * Don't combine too many operations, even if they
0475              * are all small.
0476              */
0477             if (nk >= MAX_WRITEBACKS_IN_PASS)
0478                 break;
0479 
0480             /*
0481              * If the current operation is very large, don't
0482              * further combine operations.
0483              */
0484             if (size >= MAX_WRITESIZE_IN_PASS)
0485                 break;
0486 
0487             /*
0488              * Operations are only eligible to be combined
0489              * if they are contiguous.
0490              *
0491              * TODO: add a heuristic willing to fire a
0492              * certain amount of non-contiguous IO per pass,
0493              * so that we can benefit from backing device
0494              * command queueing.
0495              */
0496             if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
0497                         &START_KEY(&next->key)))
0498                 break;
0499 
0500             size += KEY_SIZE(&next->key);
0501             keys[nk++] = next;
0502         } while ((next = bch_keybuf_next(&dc->writeback_keys)));
0503 
0504         /* Now we have gathered a set of 1..5 keys to write back. */
0505         for (i = 0; i < nk; i++) {
0506             w = keys[i];
0507 
0508             io = kzalloc(struct_size(io, bio.bi_inline_vecs,
0509                         DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
0510                      GFP_KERNEL);
0511             if (!io)
0512                 goto err;
0513 
0514             w->private  = io;
0515             io->dc      = dc;
0516             io->sequence    = sequence++;
0517 
0518             dirty_init(w);
0519             bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
0520             io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
0521             bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
0522             io->bio.bi_end_io   = read_dirty_endio;
0523 
0524             if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
0525                 goto err_free;
0526 
0527             trace_bcache_writeback(&w->key);
0528 
0529             down(&dc->in_flight);
0530 
0531             /*
0532              * We've acquired a semaphore for the maximum
0533              * simultaneous number of writebacks; from here
0534              * everything happens asynchronously.
0535              */
0536             closure_call(&io->cl, read_dirty_submit, NULL, &cl);
0537         }
0538 
0539         delay = writeback_delay(dc, size);
0540 
0541         while (!kthread_should_stop() &&
0542                !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
0543                delay) {
0544             schedule_timeout_interruptible(delay);
0545             delay = writeback_delay(dc, 0);
0546         }
0547     }
0548 
0549     if (0) {
0550 err_free:
0551         kfree(w->private);
0552 err:
0553         bch_keybuf_del(&dc->writeback_keys, w);
0554     }
0555 
0556     /*
0557      * Wait for outstanding writeback IOs to finish (and keybuf slots to be
0558      * freed) before refilling again
0559      */
0560     closure_sync(&cl);
0561 }
0562 
0563 /* Scan for dirty data */
0564 
0565 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
0566                   uint64_t offset, int nr_sectors)
0567 {
0568     struct bcache_device *d = c->devices[inode];
0569     unsigned int stripe_offset, sectors_dirty;
0570     int stripe;
0571 
0572     if (!d)
0573         return;
0574 
0575     stripe = offset_to_stripe(d, offset);
0576     if (stripe < 0)
0577         return;
0578 
0579     if (UUID_FLASH_ONLY(&c->uuids[inode]))
0580         atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
0581 
0582     stripe_offset = offset & (d->stripe_size - 1);
0583 
0584     while (nr_sectors) {
0585         int s = min_t(unsigned int, abs(nr_sectors),
0586                   d->stripe_size - stripe_offset);
0587 
0588         if (nr_sectors < 0)
0589             s = -s;
0590 
0591         if (stripe >= d->nr_stripes)
0592             return;
0593 
0594         sectors_dirty = atomic_add_return(s,
0595                     d->stripe_sectors_dirty + stripe);
0596         if (sectors_dirty == d->stripe_size) {
0597             if (!test_bit(stripe, d->full_dirty_stripes))
0598                 set_bit(stripe, d->full_dirty_stripes);
0599         } else {
0600             if (test_bit(stripe, d->full_dirty_stripes))
0601                 clear_bit(stripe, d->full_dirty_stripes);
0602         }
0603 
0604         nr_sectors -= s;
0605         stripe_offset = 0;
0606         stripe++;
0607     }
0608 }
0609 
0610 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
0611 {
0612     struct cached_dev *dc = container_of(buf,
0613                          struct cached_dev,
0614                          writeback_keys);
0615 
0616     BUG_ON(KEY_INODE(k) != dc->disk.id);
0617 
0618     return KEY_DIRTY(k);
0619 }
0620 
0621 static void refill_full_stripes(struct cached_dev *dc)
0622 {
0623     struct keybuf *buf = &dc->writeback_keys;
0624     unsigned int start_stripe, next_stripe;
0625     int stripe;
0626     bool wrapped = false;
0627 
0628     stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
0629     if (stripe < 0)
0630         stripe = 0;
0631 
0632     start_stripe = stripe;
0633 
0634     while (1) {
0635         stripe = find_next_bit(dc->disk.full_dirty_stripes,
0636                        dc->disk.nr_stripes, stripe);
0637 
0638         if (stripe == dc->disk.nr_stripes)
0639             goto next;
0640 
0641         next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
0642                          dc->disk.nr_stripes, stripe);
0643 
0644         buf->last_scanned = KEY(dc->disk.id,
0645                     stripe * dc->disk.stripe_size, 0);
0646 
0647         bch_refill_keybuf(dc->disk.c, buf,
0648                   &KEY(dc->disk.id,
0649                        next_stripe * dc->disk.stripe_size, 0),
0650                   dirty_pred);
0651 
0652         if (array_freelist_empty(&buf->freelist))
0653             return;
0654 
0655         stripe = next_stripe;
0656 next:
0657         if (wrapped && stripe > start_stripe)
0658             return;
0659 
0660         if (stripe == dc->disk.nr_stripes) {
0661             stripe = 0;
0662             wrapped = true;
0663         }
0664     }
0665 }
0666 
0667 /*
0668  * Returns true if we scanned the entire disk
0669  */
0670 static bool refill_dirty(struct cached_dev *dc)
0671 {
0672     struct keybuf *buf = &dc->writeback_keys;
0673     struct bkey start = KEY(dc->disk.id, 0, 0);
0674     struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
0675     struct bkey start_pos;
0676 
0677     /*
0678      * make sure keybuf pos is inside the range for this disk - at bringup
0679      * we might not be attached yet so this disk's inode nr isn't
0680      * initialized then
0681      */
0682     if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
0683         bkey_cmp(&buf->last_scanned, &end) > 0)
0684         buf->last_scanned = start;
0685 
0686     if (dc->partial_stripes_expensive) {
0687         refill_full_stripes(dc);
0688         if (array_freelist_empty(&buf->freelist))
0689             return false;
0690     }
0691 
0692     start_pos = buf->last_scanned;
0693     bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
0694 
0695     if (bkey_cmp(&buf->last_scanned, &end) < 0)
0696         return false;
0697 
0698     /*
0699      * If we get to the end start scanning again from the beginning, and
0700      * only scan up to where we initially started scanning from:
0701      */
0702     buf->last_scanned = start;
0703     bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
0704 
0705     return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
0706 }
0707 
0708 static int bch_writeback_thread(void *arg)
0709 {
0710     struct cached_dev *dc = arg;
0711     struct cache_set *c = dc->disk.c;
0712     bool searched_full_index;
0713 
0714     bch_ratelimit_reset(&dc->writeback_rate);
0715 
0716     while (!kthread_should_stop() &&
0717            !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
0718         down_write(&dc->writeback_lock);
0719         set_current_state(TASK_INTERRUPTIBLE);
0720         /*
0721          * If the bache device is detaching, skip here and continue
0722          * to perform writeback. Otherwise, if no dirty data on cache,
0723          * or there is dirty data on cache but writeback is disabled,
0724          * the writeback thread should sleep here and wait for others
0725          * to wake up it.
0726          */
0727         if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
0728             (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
0729             up_write(&dc->writeback_lock);
0730 
0731             if (kthread_should_stop() ||
0732                 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
0733                 set_current_state(TASK_RUNNING);
0734                 break;
0735             }
0736 
0737             schedule();
0738             continue;
0739         }
0740         set_current_state(TASK_RUNNING);
0741 
0742         searched_full_index = refill_dirty(dc);
0743 
0744         if (searched_full_index &&
0745             RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
0746             atomic_set(&dc->has_dirty, 0);
0747             SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
0748             bch_write_bdev_super(dc, NULL);
0749             /*
0750              * If bcache device is detaching via sysfs interface,
0751              * writeback thread should stop after there is no dirty
0752              * data on cache. BCACHE_DEV_DETACHING flag is set in
0753              * bch_cached_dev_detach().
0754              */
0755             if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
0756                 struct closure cl;
0757 
0758                 closure_init_stack(&cl);
0759                 memset(&dc->sb.set_uuid, 0, 16);
0760                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
0761 
0762                 bch_write_bdev_super(dc, &cl);
0763                 closure_sync(&cl);
0764 
0765                 up_write(&dc->writeback_lock);
0766                 break;
0767             }
0768 
0769             /*
0770              * When dirty data rate is high (e.g. 50%+), there might
0771              * be heavy buckets fragmentation after writeback
0772              * finished, which hurts following write performance.
0773              * If users really care about write performance they
0774              * may set BCH_ENABLE_AUTO_GC via sysfs, then when
0775              * BCH_DO_AUTO_GC is set, garbage collection thread
0776              * will be wake up here. After moving gc, the shrunk
0777              * btree and discarded free buckets SSD space may be
0778              * helpful for following write requests.
0779              */
0780             if (c->gc_after_writeback ==
0781                 (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
0782                 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
0783                 force_wake_up_gc(c);
0784             }
0785         }
0786 
0787         up_write(&dc->writeback_lock);
0788 
0789         read_dirty(dc);
0790 
0791         if (searched_full_index) {
0792             unsigned int delay = dc->writeback_delay * HZ;
0793 
0794             while (delay &&
0795                    !kthread_should_stop() &&
0796                    !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
0797                    !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
0798                 delay = schedule_timeout_interruptible(delay);
0799 
0800             bch_ratelimit_reset(&dc->writeback_rate);
0801         }
0802     }
0803 
0804     if (dc->writeback_write_wq) {
0805         flush_workqueue(dc->writeback_write_wq);
0806         destroy_workqueue(dc->writeback_write_wq);
0807     }
0808     cached_dev_put(dc);
0809     wait_for_kthread_stop();
0810 
0811     return 0;
0812 }
0813 
0814 /* Init */
0815 #define INIT_KEYS_EACH_TIME 500000
0816 
0817 struct sectors_dirty_init {
0818     struct btree_op op;
0819     unsigned int    inode;
0820     size_t      count;
0821 };
0822 
0823 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
0824                  struct bkey *k)
0825 {
0826     struct sectors_dirty_init *op = container_of(_op,
0827                         struct sectors_dirty_init, op);
0828     if (KEY_INODE(k) > op->inode)
0829         return MAP_DONE;
0830 
0831     if (KEY_DIRTY(k))
0832         bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
0833                          KEY_START(k), KEY_SIZE(k));
0834 
0835     op->count++;
0836     if (!(op->count % INIT_KEYS_EACH_TIME))
0837         cond_resched();
0838 
0839     return MAP_CONTINUE;
0840 }
0841 
0842 static int bch_root_node_dirty_init(struct cache_set *c,
0843                      struct bcache_device *d,
0844                      struct bkey *k)
0845 {
0846     struct sectors_dirty_init op;
0847     int ret;
0848 
0849     bch_btree_op_init(&op.op, -1);
0850     op.inode = d->id;
0851     op.count = 0;
0852 
0853     ret = bcache_btree(map_keys_recurse,
0854                k,
0855                c->root,
0856                &op.op,
0857                &KEY(op.inode, 0, 0),
0858                sectors_dirty_init_fn,
0859                0);
0860     if (ret < 0)
0861         pr_warn("sectors dirty init failed, ret=%d!\n", ret);
0862 
0863     return ret;
0864 }
0865 
0866 static int bch_dirty_init_thread(void *arg)
0867 {
0868     struct dirty_init_thrd_info *info = arg;
0869     struct bch_dirty_init_state *state = info->state;
0870     struct cache_set *c = state->c;
0871     struct btree_iter iter;
0872     struct bkey *k, *p;
0873     int cur_idx, prev_idx, skip_nr;
0874 
0875     k = p = NULL;
0876     cur_idx = prev_idx = 0;
0877 
0878     bch_btree_iter_init(&c->root->keys, &iter, NULL);
0879     k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
0880     BUG_ON(!k);
0881 
0882     p = k;
0883 
0884     while (k) {
0885         spin_lock(&state->idx_lock);
0886         cur_idx = state->key_idx;
0887         state->key_idx++;
0888         spin_unlock(&state->idx_lock);
0889 
0890         skip_nr = cur_idx - prev_idx;
0891 
0892         while (skip_nr) {
0893             k = bch_btree_iter_next_filter(&iter,
0894                                &c->root->keys,
0895                                bch_ptr_bad);
0896             if (k)
0897                 p = k;
0898             else {
0899                 atomic_set(&state->enough, 1);
0900                 /* Update state->enough earlier */
0901                 smp_mb__after_atomic();
0902                 goto out;
0903             }
0904             skip_nr--;
0905         }
0906 
0907         if (p) {
0908             if (bch_root_node_dirty_init(c, state->d, p) < 0)
0909                 goto out;
0910         }
0911 
0912         p = NULL;
0913         prev_idx = cur_idx;
0914     }
0915 
0916 out:
0917     /* In order to wake up state->wait in time */
0918     smp_mb__before_atomic();
0919     if (atomic_dec_and_test(&state->started))
0920         wake_up(&state->wait);
0921 
0922     return 0;
0923 }
0924 
0925 static int bch_btre_dirty_init_thread_nr(void)
0926 {
0927     int n = num_online_cpus()/2;
0928 
0929     if (n == 0)
0930         n = 1;
0931     else if (n > BCH_DIRTY_INIT_THRD_MAX)
0932         n = BCH_DIRTY_INIT_THRD_MAX;
0933 
0934     return n;
0935 }
0936 
0937 void bch_sectors_dirty_init(struct bcache_device *d)
0938 {
0939     int i;
0940     struct bkey *k = NULL;
0941     struct btree_iter iter;
0942     struct sectors_dirty_init op;
0943     struct cache_set *c = d->c;
0944     struct bch_dirty_init_state state;
0945 
0946     /* Just count root keys if no leaf node */
0947     rw_lock(0, c->root, c->root->level);
0948     if (c->root->level == 0) {
0949         bch_btree_op_init(&op.op, -1);
0950         op.inode = d->id;
0951         op.count = 0;
0952 
0953         for_each_key_filter(&c->root->keys,
0954                     k, &iter, bch_ptr_invalid)
0955             sectors_dirty_init_fn(&op.op, c->root, k);
0956 
0957         rw_unlock(0, c->root);
0958         return;
0959     }
0960 
0961     memset(&state, 0, sizeof(struct bch_dirty_init_state));
0962     state.c = c;
0963     state.d = d;
0964     state.total_threads = bch_btre_dirty_init_thread_nr();
0965     state.key_idx = 0;
0966     spin_lock_init(&state.idx_lock);
0967     atomic_set(&state.started, 0);
0968     atomic_set(&state.enough, 0);
0969     init_waitqueue_head(&state.wait);
0970 
0971     for (i = 0; i < state.total_threads; i++) {
0972         /* Fetch latest state.enough earlier */
0973         smp_mb__before_atomic();
0974         if (atomic_read(&state.enough))
0975             break;
0976 
0977         state.infos[i].state = &state;
0978         state.infos[i].thread =
0979             kthread_run(bch_dirty_init_thread, &state.infos[i],
0980                     "bch_dirtcnt[%d]", i);
0981         if (IS_ERR(state.infos[i].thread)) {
0982             pr_err("fails to run thread bch_dirty_init[%d]\n", i);
0983             for (--i; i >= 0; i--)
0984                 kthread_stop(state.infos[i].thread);
0985             goto out;
0986         }
0987         atomic_inc(&state.started);
0988     }
0989 
0990 out:
0991     /* Must wait for all threads to stop. */
0992     wait_event(state.wait, atomic_read(&state.started) == 0);
0993     rw_unlock(0, c->root);
0994 }
0995 
0996 void bch_cached_dev_writeback_init(struct cached_dev *dc)
0997 {
0998     sema_init(&dc->in_flight, 64);
0999     init_rwsem(&dc->writeback_lock);
1000     bch_keybuf_init(&dc->writeback_keys);
1001 
1002     dc->writeback_metadata      = true;
1003     dc->writeback_running       = false;
1004     dc->writeback_consider_fragment = true;
1005     dc->writeback_percent       = 10;
1006     dc->writeback_delay     = 30;
1007     atomic_long_set(&dc->writeback_rate.rate, 1024);
1008     dc->writeback_rate_minimum  = 8;
1009 
1010     dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
1011     dc->writeback_rate_p_term_inverse = 40;
1012     dc->writeback_rate_fp_term_low = 1;
1013     dc->writeback_rate_fp_term_mid = 10;
1014     dc->writeback_rate_fp_term_high = 1000;
1015     dc->writeback_rate_i_term_inverse = 10000;
1016 
1017     /* For dc->writeback_lock contention in update_writeback_rate() */
1018     dc->rate_update_retry = 0;
1019 
1020     WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1021     INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
1022 }
1023 
1024 int bch_cached_dev_writeback_start(struct cached_dev *dc)
1025 {
1026     dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1027                         WQ_MEM_RECLAIM, 0);
1028     if (!dc->writeback_write_wq)
1029         return -ENOMEM;
1030 
1031     cached_dev_get(dc);
1032     dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1033                           "bcache_writeback");
1034     if (IS_ERR(dc->writeback_thread)) {
1035         cached_dev_put(dc);
1036         destroy_workqueue(dc->writeback_write_wq);
1037         return PTR_ERR(dc->writeback_thread);
1038     }
1039     dc->writeback_running = true;
1040 
1041     WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1042     schedule_delayed_work(&dc->writeback_rate_update,
1043                   dc->writeback_rate_update_seconds * HZ);
1044 
1045     bch_writeback_queue(dc);
1046 
1047     return 0;
1048 }