0001
0002 #ifndef _BCACHE_WRITEBACK_H
0003 #define _BCACHE_WRITEBACK_H
0004
0005 #define CUTOFF_WRITEBACK 40
0006 #define CUTOFF_WRITEBACK_SYNC 70
0007
0008 #define CUTOFF_WRITEBACK_MAX 70
0009 #define CUTOFF_WRITEBACK_SYNC_MAX 90
0010
0011 #define MAX_WRITEBACKS_IN_PASS 5
0012 #define MAX_WRITESIZE_IN_PASS 5000
0013
0014 #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
0015 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
0016
0017 #define BCH_AUTO_GC_DIRTY_THRESHOLD 50
0018
0019 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
0020 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
0021 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
0022
0023 #define BCH_DIRTY_INIT_THRD_MAX 12
0024
0025
0026
0027
0028
0029 #define WRITEBACK_SHARE_SHIFT 14
0030
0031 struct bch_dirty_init_state;
0032 struct dirty_init_thrd_info {
0033 struct bch_dirty_init_state *state;
0034 struct task_struct *thread;
0035 };
0036
0037 struct bch_dirty_init_state {
0038 struct cache_set *c;
0039 struct bcache_device *d;
0040 int total_threads;
0041 int key_idx;
0042 spinlock_t idx_lock;
0043 atomic_t started;
0044 atomic_t enough;
0045 wait_queue_head_t wait;
0046 struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
0047 };
0048
0049 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
0050 {
0051 uint64_t i, ret = 0;
0052
0053 for (i = 0; i < d->nr_stripes; i++)
0054 ret += atomic_read(d->stripe_sectors_dirty + i);
0055
0056 return ret;
0057 }
0058
0059 static inline int offset_to_stripe(struct bcache_device *d,
0060 uint64_t offset)
0061 {
0062 do_div(offset, d->stripe_size);
0063
0064
0065 if (unlikely(offset >= d->nr_stripes)) {
0066 pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
0067 offset, d->nr_stripes);
0068 return -EINVAL;
0069 }
0070
0071
0072
0073
0074
0075 return offset;
0076 }
0077
0078 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
0079 uint64_t offset,
0080 unsigned int nr_sectors)
0081 {
0082 int stripe = offset_to_stripe(&dc->disk, offset);
0083
0084 if (stripe < 0)
0085 return false;
0086
0087 while (1) {
0088 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
0089 return true;
0090
0091 if (nr_sectors <= dc->disk.stripe_size)
0092 return false;
0093
0094 nr_sectors -= dc->disk.stripe_size;
0095 stripe++;
0096 }
0097 }
0098
0099 extern unsigned int bch_cutoff_writeback;
0100 extern unsigned int bch_cutoff_writeback_sync;
0101
0102 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
0103 unsigned int cache_mode, bool would_skip)
0104 {
0105 unsigned int in_use = dc->disk.c->gc_stats.in_use;
0106
0107 if (cache_mode != CACHE_MODE_WRITEBACK ||
0108 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
0109 in_use > bch_cutoff_writeback_sync)
0110 return false;
0111
0112 if (bio_op(bio) == REQ_OP_DISCARD)
0113 return false;
0114
0115 if (dc->partial_stripes_expensive &&
0116 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
0117 bio_sectors(bio)))
0118 return true;
0119
0120 if (would_skip)
0121 return false;
0122
0123 return (op_is_sync(bio->bi_opf) ||
0124 bio->bi_opf & (REQ_META|REQ_PRIO) ||
0125 in_use <= bch_cutoff_writeback);
0126 }
0127
0128 static inline void bch_writeback_queue(struct cached_dev *dc)
0129 {
0130 if (!IS_ERR_OR_NULL(dc->writeback_thread))
0131 wake_up_process(dc->writeback_thread);
0132 }
0133
0134 static inline void bch_writeback_add(struct cached_dev *dc)
0135 {
0136 if (!atomic_read(&dc->has_dirty) &&
0137 !atomic_xchg(&dc->has_dirty, 1)) {
0138 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
0139 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
0140
0141 bch_write_bdev_super(dc, NULL);
0142 }
0143
0144 bch_writeback_queue(dc);
0145 }
0146 }
0147
0148 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
0149 uint64_t offset, int nr_sectors);
0150
0151 void bch_sectors_dirty_init(struct bcache_device *d);
0152 void bch_cached_dev_writeback_init(struct cached_dev *dc);
0153 int bch_cached_dev_writeback_start(struct cached_dev *dc);
0154
0155 #endif