0001
0002 #ifndef _BCACHE_H
0003 #define _BCACHE_H
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179 #define pr_fmt(fmt) "bcache: %s() " fmt, __func__
0180
0181 #include <linux/bio.h>
0182 #include <linux/kobject.h>
0183 #include <linux/list.h>
0184 #include <linux/mutex.h>
0185 #include <linux/rbtree.h>
0186 #include <linux/rwsem.h>
0187 #include <linux/refcount.h>
0188 #include <linux/types.h>
0189 #include <linux/workqueue.h>
0190 #include <linux/kthread.h>
0191
0192 #include "bcache_ondisk.h"
0193 #include "bset.h"
0194 #include "util.h"
0195 #include "closure.h"
0196
0197 struct bucket {
0198 atomic_t pin;
0199 uint16_t prio;
0200 uint8_t gen;
0201 uint8_t last_gc;
0202 uint16_t gc_mark;
0203 };
0204
0205
0206
0207
0208
0209
0210 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
0211 #define GC_MARK_RECLAIMABLE 1
0212 #define GC_MARK_DIRTY 2
0213 #define GC_MARK_METADATA 3
0214 #define GC_SECTORS_USED_SIZE 13
0215 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
0216 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
0217 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
0218
0219 #include "journal.h"
0220 #include "stats.h"
0221 struct search;
0222 struct btree;
0223 struct keybuf;
0224
0225 struct keybuf_key {
0226 struct rb_node node;
0227 BKEY_PADDED(key);
0228 void *private;
0229 };
0230
0231 struct keybuf {
0232 struct bkey last_scanned;
0233 spinlock_t lock;
0234
0235
0236
0237
0238
0239
0240 struct bkey start;
0241 struct bkey end;
0242
0243 struct rb_root keys;
0244
0245 #define KEYBUF_NR 500
0246 DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
0247 };
0248
0249 struct bcache_device {
0250 struct closure cl;
0251
0252 struct kobject kobj;
0253
0254 struct cache_set *c;
0255 unsigned int id;
0256 #define BCACHEDEVNAME_SIZE 12
0257 char name[BCACHEDEVNAME_SIZE];
0258
0259 struct gendisk *disk;
0260
0261 unsigned long flags;
0262 #define BCACHE_DEV_CLOSING 0
0263 #define BCACHE_DEV_DETACHING 1
0264 #define BCACHE_DEV_UNLINK_DONE 2
0265 #define BCACHE_DEV_WB_RUNNING 3
0266 #define BCACHE_DEV_RATE_DW_RUNNING 4
0267 int nr_stripes;
0268 unsigned int stripe_size;
0269 atomic_t *stripe_sectors_dirty;
0270 unsigned long *full_dirty_stripes;
0271
0272 struct bio_set bio_split;
0273
0274 unsigned int data_csum:1;
0275
0276 int (*cache_miss)(struct btree *b, struct search *s,
0277 struct bio *bio, unsigned int sectors);
0278 int (*ioctl)(struct bcache_device *d, fmode_t mode,
0279 unsigned int cmd, unsigned long arg);
0280 };
0281
0282 struct io {
0283
0284 struct hlist_node hash;
0285 struct list_head lru;
0286
0287 unsigned long jiffies;
0288 unsigned int sequential;
0289 sector_t last;
0290 };
0291
0292 enum stop_on_failure {
0293 BCH_CACHED_DEV_STOP_AUTO = 0,
0294 BCH_CACHED_DEV_STOP_ALWAYS,
0295 BCH_CACHED_DEV_STOP_MODE_MAX,
0296 };
0297
0298 struct cached_dev {
0299 struct list_head list;
0300 struct bcache_device disk;
0301 struct block_device *bdev;
0302
0303 struct cache_sb sb;
0304 struct cache_sb_disk *sb_disk;
0305 struct bio sb_bio;
0306 struct bio_vec sb_bv[1];
0307 struct closure sb_write;
0308 struct semaphore sb_write_mutex;
0309
0310
0311 refcount_t count;
0312 struct work_struct detach;
0313
0314
0315
0316
0317
0318 atomic_t running;
0319
0320
0321
0322
0323
0324 struct rw_semaphore writeback_lock;
0325
0326
0327
0328
0329
0330
0331 atomic_t has_dirty;
0332
0333 #define BCH_CACHE_READA_ALL 0
0334 #define BCH_CACHE_READA_META_ONLY 1
0335 unsigned int cache_readahead_policy;
0336 struct bch_ratelimit writeback_rate;
0337 struct delayed_work writeback_rate_update;
0338
0339
0340 struct semaphore in_flight;
0341 struct task_struct *writeback_thread;
0342 struct workqueue_struct *writeback_write_wq;
0343
0344 struct keybuf writeback_keys;
0345
0346 struct task_struct *status_update_thread;
0347
0348
0349
0350
0351
0352 struct closure_waitlist writeback_ordering_wait;
0353 atomic_t writeback_sequence_next;
0354
0355
0356 #define RECENT_IO_BITS 7
0357 #define RECENT_IO (1 << RECENT_IO_BITS)
0358 struct io io[RECENT_IO];
0359 struct hlist_head io_hash[RECENT_IO + 1];
0360 struct list_head io_lru;
0361 spinlock_t io_lock;
0362
0363 struct cache_accounting accounting;
0364
0365
0366 unsigned int sequential_cutoff;
0367
0368 unsigned int io_disable:1;
0369 unsigned int verify:1;
0370 unsigned int bypass_torture_test:1;
0371
0372 unsigned int partial_stripes_expensive:1;
0373 unsigned int writeback_metadata:1;
0374 unsigned int writeback_running:1;
0375 unsigned int writeback_consider_fragment:1;
0376 unsigned char writeback_percent;
0377 unsigned int writeback_delay;
0378
0379 uint64_t writeback_rate_target;
0380 int64_t writeback_rate_proportional;
0381 int64_t writeback_rate_integral;
0382 int64_t writeback_rate_integral_scaled;
0383 int32_t writeback_rate_change;
0384
0385 unsigned int writeback_rate_update_seconds;
0386 unsigned int writeback_rate_i_term_inverse;
0387 unsigned int writeback_rate_p_term_inverse;
0388 unsigned int writeback_rate_fp_term_low;
0389 unsigned int writeback_rate_fp_term_mid;
0390 unsigned int writeback_rate_fp_term_high;
0391 unsigned int writeback_rate_minimum;
0392
0393 enum stop_on_failure stop_when_cache_set_failed;
0394 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
0395 atomic_t io_errors;
0396 unsigned int error_limit;
0397 unsigned int offline_seconds;
0398
0399
0400
0401
0402
0403 #define BCH_WBRATE_UPDATE_MAX_SKIPS 15
0404 unsigned int rate_update_retry;
0405 };
0406
0407 enum alloc_reserve {
0408 RESERVE_BTREE,
0409 RESERVE_PRIO,
0410 RESERVE_MOVINGGC,
0411 RESERVE_NONE,
0412 RESERVE_NR,
0413 };
0414
0415 struct cache {
0416 struct cache_set *set;
0417 struct cache_sb sb;
0418 struct cache_sb_disk *sb_disk;
0419 struct bio sb_bio;
0420 struct bio_vec sb_bv[1];
0421
0422 struct kobject kobj;
0423 struct block_device *bdev;
0424
0425 struct task_struct *alloc_thread;
0426
0427 struct closure prio;
0428 struct prio_set *disk_buckets;
0429
0430
0431
0432
0433
0434
0435
0436
0437 uint64_t *prio_buckets;
0438 uint64_t *prio_last_buckets;
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 DECLARE_FIFO(long, free)[RESERVE_NR];
0450 DECLARE_FIFO(long, free_inc);
0451
0452 size_t fifo_last_bucket;
0453
0454
0455 struct bucket *buckets;
0456
0457 DECLARE_HEAP(struct bucket *, heap);
0458
0459
0460
0461
0462
0463
0464 unsigned int invalidate_needs_gc;
0465
0466 bool discard;
0467
0468 struct journal_device journal;
0469
0470
0471 #define IO_ERROR_SHIFT 20
0472 atomic_t io_errors;
0473 atomic_t io_count;
0474
0475 atomic_long_t meta_sectors_written;
0476 atomic_long_t btree_sectors_written;
0477 atomic_long_t sectors_written;
0478 };
0479
0480 struct gc_stat {
0481 size_t nodes;
0482 size_t nodes_pre;
0483 size_t key_bytes;
0484
0485 size_t nkeys;
0486 uint64_t data;
0487 unsigned int in_use;
0488 };
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 #define CACHE_SET_UNREGISTERING 0
0509 #define CACHE_SET_STOPPING 1
0510 #define CACHE_SET_RUNNING 2
0511 #define CACHE_SET_IO_DISABLE 3
0512
0513 struct cache_set {
0514 struct closure cl;
0515
0516 struct list_head list;
0517 struct kobject kobj;
0518 struct kobject internal;
0519 struct dentry *debug;
0520 struct cache_accounting accounting;
0521
0522 unsigned long flags;
0523 atomic_t idle_counter;
0524 atomic_t at_max_writeback_rate;
0525
0526 struct cache *cache;
0527
0528 struct bcache_device **devices;
0529 unsigned int devices_max_used;
0530 atomic_t attached_dev_nr;
0531 struct list_head cached_devs;
0532 uint64_t cached_dev_sectors;
0533 atomic_long_t flash_dev_dirty_sectors;
0534 struct closure caching;
0535
0536 struct closure sb_write;
0537 struct semaphore sb_write_mutex;
0538
0539 mempool_t search;
0540 mempool_t bio_meta;
0541 struct bio_set bio_split;
0542
0543
0544 struct shrinker shrink;
0545
0546
0547 struct mutex bucket_lock;
0548
0549
0550 unsigned short bucket_bits;
0551
0552
0553 unsigned short block_bits;
0554
0555
0556
0557
0558
0559 unsigned int btree_pages;
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 struct list_head btree_cache;
0578 struct list_head btree_cache_freeable;
0579 struct list_head btree_cache_freed;
0580
0581
0582 unsigned int btree_cache_used;
0583
0584
0585
0586
0587
0588
0589
0590 wait_queue_head_t btree_cache_wait;
0591 struct task_struct *btree_cache_alloc_lock;
0592 spinlock_t btree_cannibalize_lock;
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604 atomic_t prio_blocked;
0605 wait_queue_head_t bucket_wait;
0606
0607
0608
0609
0610
0611 atomic_t rescale;
0612
0613
0614
0615 atomic_t search_inflight;
0616
0617
0618
0619
0620
0621
0622 uint16_t min_prio;
0623
0624
0625
0626
0627
0628 uint8_t need_gc;
0629 struct gc_stat gc_stats;
0630 size_t nbuckets;
0631 size_t avail_nbuckets;
0632
0633 struct task_struct *gc_thread;
0634
0635 struct bkey gc_done;
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647 #define BCH_ENABLE_AUTO_GC 1
0648 #define BCH_DO_AUTO_GC 2
0649 uint8_t gc_after_writeback;
0650
0651
0652
0653
0654
0655 int gc_mark_valid;
0656
0657
0658 atomic_t sectors_to_gc;
0659 wait_queue_head_t gc_wait;
0660
0661 struct keybuf moving_gc_keys;
0662
0663 struct semaphore moving_in_flight;
0664
0665 struct workqueue_struct *moving_gc_wq;
0666
0667 struct btree *root;
0668
0669 #ifdef CONFIG_BCACHE_DEBUG
0670 struct btree *verify_data;
0671 struct bset *verify_ondisk;
0672 struct mutex verify_lock;
0673 #endif
0674
0675 uint8_t set_uuid[16];
0676 unsigned int nr_uuids;
0677 struct uuid_entry *uuids;
0678 BKEY_PADDED(uuid_bucket);
0679 struct closure uuid_write;
0680 struct semaphore uuid_write_mutex;
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 mempool_t fill_iter;
0691
0692 struct bset_sort_state sort;
0693
0694
0695 struct list_head data_buckets;
0696 spinlock_t data_bucket_lock;
0697
0698 struct journal journal;
0699
0700 #define CONGESTED_MAX 1024
0701 unsigned int congested_last_us;
0702 atomic_t congested;
0703
0704
0705 unsigned int congested_read_threshold_us;
0706 unsigned int congested_write_threshold_us;
0707
0708 struct time_stats btree_gc_time;
0709 struct time_stats btree_split_time;
0710 struct time_stats btree_read_time;
0711
0712 atomic_long_t cache_read_races;
0713 atomic_long_t writeback_keys_done;
0714 atomic_long_t writeback_keys_failed;
0715
0716 atomic_long_t reclaim;
0717 atomic_long_t reclaimed_journal_buckets;
0718 atomic_long_t flush_write;
0719
0720 enum {
0721 ON_ERROR_UNREGISTER,
0722 ON_ERROR_PANIC,
0723 } on_error;
0724 #define DEFAULT_IO_ERROR_LIMIT 8
0725 unsigned int error_limit;
0726 unsigned int error_decay;
0727
0728 unsigned short journal_delay_ms;
0729 bool expensive_debug_checks;
0730 unsigned int verify:1;
0731 unsigned int key_merging_disabled:1;
0732 unsigned int gc_always_rewrite:1;
0733 unsigned int shrinker_disabled:1;
0734 unsigned int copy_gc_enabled:1;
0735 unsigned int idle_max_writeback_rate_enabled:1;
0736
0737 #define BUCKET_HASH_BITS 12
0738 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
0739 };
0740
0741 struct bbio {
0742 unsigned int submit_time_us;
0743 union {
0744 struct bkey key;
0745 uint64_t _pad[3];
0746
0747
0748
0749
0750 };
0751 struct bio bio;
0752 };
0753
0754 #define BTREE_PRIO USHRT_MAX
0755 #define INITIAL_PRIO 32768U
0756
0757 #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
0758 #define btree_blocks(b) \
0759 ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
0760
0761 #define btree_default_blocks(c) \
0762 ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
0763
0764 #define bucket_bytes(ca) ((ca)->sb.bucket_size << 9)
0765 #define block_bytes(ca) ((ca)->sb.block_size << 9)
0766
0767 static inline unsigned int meta_bucket_pages(struct cache_sb *sb)
0768 {
0769 unsigned int n, max_pages;
0770
0771 max_pages = min_t(unsigned int,
0772 __rounddown_pow_of_two(USHRT_MAX) / PAGE_SECTORS,
0773 MAX_ORDER_NR_PAGES);
0774
0775 n = sb->bucket_size / PAGE_SECTORS;
0776 if (n > max_pages)
0777 n = max_pages;
0778
0779 return n;
0780 }
0781
0782 static inline unsigned int meta_bucket_bytes(struct cache_sb *sb)
0783 {
0784 return meta_bucket_pages(sb) << PAGE_SHIFT;
0785 }
0786
0787 #define prios_per_bucket(ca) \
0788 ((meta_bucket_bytes(&(ca)->sb) - sizeof(struct prio_set)) / \
0789 sizeof(struct bucket_disk))
0790
0791 #define prio_buckets(ca) \
0792 DIV_ROUND_UP((size_t) (ca)->sb.nbuckets, prios_per_bucket(ca))
0793
0794 static inline size_t sector_to_bucket(struct cache_set *c, sector_t s)
0795 {
0796 return s >> c->bucket_bits;
0797 }
0798
0799 static inline sector_t bucket_to_sector(struct cache_set *c, size_t b)
0800 {
0801 return ((sector_t) b) << c->bucket_bits;
0802 }
0803
0804 static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
0805 {
0806 return s & (c->cache->sb.bucket_size - 1);
0807 }
0808
0809 static inline size_t PTR_BUCKET_NR(struct cache_set *c,
0810 const struct bkey *k,
0811 unsigned int ptr)
0812 {
0813 return sector_to_bucket(c, PTR_OFFSET(k, ptr));
0814 }
0815
0816 static inline struct bucket *PTR_BUCKET(struct cache_set *c,
0817 const struct bkey *k,
0818 unsigned int ptr)
0819 {
0820 return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
0821 }
0822
0823 static inline uint8_t gen_after(uint8_t a, uint8_t b)
0824 {
0825 uint8_t r = a - b;
0826
0827 return r > 128U ? 0 : r;
0828 }
0829
0830 static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
0831 unsigned int i)
0832 {
0833 return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
0834 }
0835
0836 static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
0837 unsigned int i)
0838 {
0839 return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
0840 }
0841
0842
0843
0844
0845
0846
0847
0848 #define csum_set(i) \
0849 bch_crc64(((void *) (i)) + sizeof(uint64_t), \
0850 ((void *) bset_bkey_last(i)) - \
0851 (((void *) (i)) + sizeof(uint64_t)))
0852
0853
0854
0855 #define btree_bug(b, ...) \
0856 do { \
0857 if (bch_cache_set_error((b)->c, __VA_ARGS__)) \
0858 dump_stack(); \
0859 } while (0)
0860
0861 #define cache_bug(c, ...) \
0862 do { \
0863 if (bch_cache_set_error(c, __VA_ARGS__)) \
0864 dump_stack(); \
0865 } while (0)
0866
0867 #define btree_bug_on(cond, b, ...) \
0868 do { \
0869 if (cond) \
0870 btree_bug(b, __VA_ARGS__); \
0871 } while (0)
0872
0873 #define cache_bug_on(cond, c, ...) \
0874 do { \
0875 if (cond) \
0876 cache_bug(c, __VA_ARGS__); \
0877 } while (0)
0878
0879 #define cache_set_err_on(cond, c, ...) \
0880 do { \
0881 if (cond) \
0882 bch_cache_set_error(c, __VA_ARGS__); \
0883 } while (0)
0884
0885
0886
0887 #define for_each_bucket(b, ca) \
0888 for (b = (ca)->buckets + (ca)->sb.first_bucket; \
0889 b < (ca)->buckets + (ca)->sb.nbuckets; b++)
0890
0891 static inline void cached_dev_put(struct cached_dev *dc)
0892 {
0893 if (refcount_dec_and_test(&dc->count))
0894 schedule_work(&dc->detach);
0895 }
0896
0897 static inline bool cached_dev_get(struct cached_dev *dc)
0898 {
0899 if (!refcount_inc_not_zero(&dc->count))
0900 return false;
0901
0902
0903 smp_mb__after_atomic();
0904 return true;
0905 }
0906
0907
0908
0909
0910
0911
0912 static inline uint8_t bucket_gc_gen(struct bucket *b)
0913 {
0914 return b->gen - b->last_gc;
0915 }
0916
0917 #define BUCKET_GC_GEN_MAX 96U
0918
0919 #define kobj_attribute_write(n, fn) \
0920 static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
0921
0922 #define kobj_attribute_rw(n, show, store) \
0923 static struct kobj_attribute ksysfs_##n = \
0924 __ATTR(n, 0600, show, store)
0925
0926 static inline void wake_up_allocators(struct cache_set *c)
0927 {
0928 struct cache *ca = c->cache;
0929
0930 wake_up_process(ca->alloc_thread);
0931 }
0932
0933 static inline void closure_bio_submit(struct cache_set *c,
0934 struct bio *bio,
0935 struct closure *cl)
0936 {
0937 closure_get(cl);
0938 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
0939 bio->bi_status = BLK_STS_IOERR;
0940 bio_endio(bio);
0941 return;
0942 }
0943 submit_bio_noacct(bio);
0944 }
0945
0946
0947
0948
0949
0950
0951
0952 static inline void wait_for_kthread_stop(void)
0953 {
0954 while (!kthread_should_stop()) {
0955 set_current_state(TASK_INTERRUPTIBLE);
0956 schedule();
0957 }
0958 }
0959
0960
0961
0962 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
0963 void bch_count_io_errors(struct cache *ca, blk_status_t error,
0964 int is_read, const char *m);
0965 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
0966 blk_status_t error, const char *m);
0967 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
0968 blk_status_t error, const char *m);
0969 void bch_bbio_free(struct bio *bio, struct cache_set *c);
0970 struct bio *bch_bbio_alloc(struct cache_set *c);
0971
0972 void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
0973 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
0974 struct bkey *k, unsigned int ptr);
0975
0976 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
0977 void bch_rescale_priorities(struct cache_set *c, int sectors);
0978
0979 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
0980 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
0981
0982 void __bch_bucket_free(struct cache *ca, struct bucket *b);
0983 void bch_bucket_free(struct cache_set *c, struct bkey *k);
0984
0985 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
0986 int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
0987 struct bkey *k, bool wait);
0988 int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
0989 struct bkey *k, bool wait);
0990 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
0991 unsigned int sectors, unsigned int write_point,
0992 unsigned int write_prio, bool wait);
0993 bool bch_cached_dev_error(struct cached_dev *dc);
0994
0995 __printf(2, 3)
0996 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
0997
0998 int bch_prio_write(struct cache *ca, bool wait);
0999 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
1000
1001 extern struct workqueue_struct *bcache_wq;
1002 extern struct workqueue_struct *bch_journal_wq;
1003 extern struct workqueue_struct *bch_flush_wq;
1004 extern struct mutex bch_register_lock;
1005 extern struct list_head bch_cache_sets;
1006
1007 extern struct kobj_type bch_cached_dev_ktype;
1008 extern struct kobj_type bch_flash_dev_ktype;
1009 extern struct kobj_type bch_cache_set_ktype;
1010 extern struct kobj_type bch_cache_set_internal_ktype;
1011 extern struct kobj_type bch_cache_ktype;
1012
1013 void bch_cached_dev_release(struct kobject *kobj);
1014 void bch_flash_dev_release(struct kobject *kobj);
1015 void bch_cache_set_release(struct kobject *kobj);
1016 void bch_cache_release(struct kobject *kobj);
1017
1018 int bch_uuid_write(struct cache_set *c);
1019 void bcache_write_super(struct cache_set *c);
1020
1021 int bch_flash_dev_create(struct cache_set *c, uint64_t size);
1022
1023 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1024 uint8_t *set_uuid);
1025 void bch_cached_dev_detach(struct cached_dev *dc);
1026 int bch_cached_dev_run(struct cached_dev *dc);
1027 void bcache_device_stop(struct bcache_device *d);
1028
1029 void bch_cache_set_unregister(struct cache_set *c);
1030 void bch_cache_set_stop(struct cache_set *c);
1031
1032 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
1033 void bch_btree_cache_free(struct cache_set *c);
1034 int bch_btree_cache_alloc(struct cache_set *c);
1035 void bch_moving_init_cache_set(struct cache_set *c);
1036 int bch_open_buckets_alloc(struct cache_set *c);
1037 void bch_open_buckets_free(struct cache_set *c);
1038
1039 int bch_cache_allocator_start(struct cache *ca);
1040
1041 void bch_debug_exit(void);
1042 void bch_debug_init(void);
1043 void bch_request_exit(void);
1044 int bch_request_init(void);
1045 void bch_btree_exit(void);
1046 int bch_btree_init(void);
1047
1048 #endif