0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/dm-bufio.h>
0010
0011 #include <linux/device-mapper.h>
0012 #include <linux/dm-io.h>
0013 #include <linux/slab.h>
0014 #include <linux/sched/mm.h>
0015 #include <linux/jiffies.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/shrinker.h>
0018 #include <linux/module.h>
0019 #include <linux/rbtree.h>
0020 #include <linux/stacktrace.h>
0021 #include <linux/jump_label.h>
0022
0023 #define DM_MSG_PREFIX "bufio"
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #define DM_BUFIO_MIN_BUFFERS 8
0034
0035 #define DM_BUFIO_MEMORY_PERCENT 2
0036 #define DM_BUFIO_VMALLOC_PERCENT 25
0037 #define DM_BUFIO_WRITEBACK_RATIO 3
0038 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
0039
0040
0041
0042
0043 #define DM_BUFIO_WORK_TIMER_SECS 30
0044
0045
0046
0047
0048 #define DM_BUFIO_DEFAULT_AGE_SECS 300
0049
0050
0051
0052
0053 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
0054
0055
0056
0057
0058
0059 #define DM_BUFIO_WRITE_ALIGN 4096
0060
0061
0062
0063
0064 #define LIST_CLEAN 0
0065 #define LIST_DIRTY 1
0066 #define LIST_SIZE 2
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct dm_bufio_client {
0084 struct mutex lock;
0085 spinlock_t spinlock;
0086 bool no_sleep;
0087
0088 struct list_head lru[LIST_SIZE];
0089 unsigned long n_buffers[LIST_SIZE];
0090
0091 struct block_device *bdev;
0092 unsigned block_size;
0093 s8 sectors_per_block_bits;
0094 void (*alloc_callback)(struct dm_buffer *);
0095 void (*write_callback)(struct dm_buffer *);
0096 struct kmem_cache *slab_buffer;
0097 struct kmem_cache *slab_cache;
0098 struct dm_io_client *dm_io;
0099
0100 struct list_head reserved_buffers;
0101 unsigned need_reserved_buffers;
0102
0103 unsigned minimum_buffers;
0104
0105 struct rb_root buffer_tree;
0106 wait_queue_head_t free_buffer_wait;
0107
0108 sector_t start;
0109
0110 int async_write_error;
0111
0112 struct list_head client_list;
0113
0114 struct shrinker shrinker;
0115 struct work_struct shrink_work;
0116 atomic_long_t need_shrink;
0117 };
0118
0119
0120
0121
0122 #define B_READING 0
0123 #define B_WRITING 1
0124 #define B_DIRTY 2
0125
0126
0127
0128
0129
0130
0131 enum data_mode {
0132 DATA_MODE_SLAB = 0,
0133 DATA_MODE_GET_FREE_PAGES = 1,
0134 DATA_MODE_VMALLOC = 2,
0135 DATA_MODE_LIMIT = 3
0136 };
0137
0138 struct dm_buffer {
0139 struct rb_node node;
0140 struct list_head lru_list;
0141 struct list_head global_list;
0142 sector_t block;
0143 void *data;
0144 unsigned char data_mode;
0145 unsigned char list_mode;
0146 blk_status_t read_error;
0147 blk_status_t write_error;
0148 unsigned accessed;
0149 unsigned hold_count;
0150 unsigned long state;
0151 unsigned long last_accessed;
0152 unsigned dirty_start;
0153 unsigned dirty_end;
0154 unsigned write_start;
0155 unsigned write_end;
0156 struct dm_bufio_client *c;
0157 struct list_head write_list;
0158 void (*end_io)(struct dm_buffer *, blk_status_t);
0159 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
0160 #define MAX_STACK 10
0161 unsigned int stack_len;
0162 unsigned long stack_entries[MAX_STACK];
0163 #endif
0164 };
0165
0166 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
0167
0168
0169
0170 #define dm_bufio_in_request() (!!current->bio_list)
0171
0172 static void dm_bufio_lock(struct dm_bufio_client *c)
0173 {
0174 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
0175 spin_lock_bh(&c->spinlock);
0176 else
0177 mutex_lock_nested(&c->lock, dm_bufio_in_request());
0178 }
0179
0180 static int dm_bufio_trylock(struct dm_bufio_client *c)
0181 {
0182 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
0183 return spin_trylock_bh(&c->spinlock);
0184 else
0185 return mutex_trylock(&c->lock);
0186 }
0187
0188 static void dm_bufio_unlock(struct dm_bufio_client *c)
0189 {
0190 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
0191 spin_unlock_bh(&c->spinlock);
0192 else
0193 mutex_unlock(&c->lock);
0194 }
0195
0196
0197
0198
0199
0200
0201 static unsigned long dm_bufio_default_cache_size;
0202
0203
0204
0205
0206 static unsigned long dm_bufio_cache_size;
0207
0208
0209
0210
0211
0212 static unsigned long dm_bufio_cache_size_latch;
0213
0214 static DEFINE_SPINLOCK(global_spinlock);
0215
0216 static LIST_HEAD(global_queue);
0217
0218 static unsigned long global_num = 0;
0219
0220
0221
0222
0223 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
0224 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
0225
0226 static unsigned long dm_bufio_peak_allocated;
0227 static unsigned long dm_bufio_allocated_kmem_cache;
0228 static unsigned long dm_bufio_allocated_get_free_pages;
0229 static unsigned long dm_bufio_allocated_vmalloc;
0230 static unsigned long dm_bufio_current_allocated;
0231
0232
0233
0234
0235
0236
0237 static int dm_bufio_client_count;
0238
0239
0240
0241
0242 static LIST_HEAD(dm_bufio_all_clients);
0243
0244
0245
0246
0247 static DEFINE_MUTEX(dm_bufio_clients_lock);
0248
0249 static struct workqueue_struct *dm_bufio_wq;
0250 static struct delayed_work dm_bufio_cleanup_old_work;
0251 static struct work_struct dm_bufio_replacement_work;
0252
0253
0254 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
0255 static void buffer_record_stack(struct dm_buffer *b)
0256 {
0257 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
0258 }
0259 #endif
0260
0261
0262
0263
0264 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
0265 {
0266 struct rb_node *n = c->buffer_tree.rb_node;
0267 struct dm_buffer *b;
0268
0269 while (n) {
0270 b = container_of(n, struct dm_buffer, node);
0271
0272 if (b->block == block)
0273 return b;
0274
0275 n = block < b->block ? n->rb_left : n->rb_right;
0276 }
0277
0278 return NULL;
0279 }
0280
0281 static struct dm_buffer *__find_next(struct dm_bufio_client *c, sector_t block)
0282 {
0283 struct rb_node *n = c->buffer_tree.rb_node;
0284 struct dm_buffer *b;
0285 struct dm_buffer *best = NULL;
0286
0287 while (n) {
0288 b = container_of(n, struct dm_buffer, node);
0289
0290 if (b->block == block)
0291 return b;
0292
0293 if (block <= b->block) {
0294 n = n->rb_left;
0295 best = b;
0296 } else {
0297 n = n->rb_right;
0298 }
0299 }
0300
0301 return best;
0302 }
0303
0304 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
0305 {
0306 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
0307 struct dm_buffer *found;
0308
0309 while (*new) {
0310 found = container_of(*new, struct dm_buffer, node);
0311
0312 if (found->block == b->block) {
0313 BUG_ON(found != b);
0314 return;
0315 }
0316
0317 parent = *new;
0318 new = b->block < found->block ?
0319 &found->node.rb_left : &found->node.rb_right;
0320 }
0321
0322 rb_link_node(&b->node, parent, new);
0323 rb_insert_color(&b->node, &c->buffer_tree);
0324 }
0325
0326 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
0327 {
0328 rb_erase(&b->node, &c->buffer_tree);
0329 }
0330
0331
0332
0333 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
0334 {
0335 unsigned char data_mode;
0336 long diff;
0337
0338 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
0339 &dm_bufio_allocated_kmem_cache,
0340 &dm_bufio_allocated_get_free_pages,
0341 &dm_bufio_allocated_vmalloc,
0342 };
0343
0344 data_mode = b->data_mode;
0345 diff = (long)b->c->block_size;
0346 if (unlink)
0347 diff = -diff;
0348
0349 spin_lock(&global_spinlock);
0350
0351 *class_ptr[data_mode] += diff;
0352
0353 dm_bufio_current_allocated += diff;
0354
0355 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
0356 dm_bufio_peak_allocated = dm_bufio_current_allocated;
0357
0358 b->accessed = 1;
0359
0360 if (!unlink) {
0361 list_add(&b->global_list, &global_queue);
0362 global_num++;
0363 if (dm_bufio_current_allocated > dm_bufio_cache_size)
0364 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
0365 } else {
0366 list_del(&b->global_list);
0367 global_num--;
0368 }
0369
0370 spin_unlock(&global_spinlock);
0371 }
0372
0373
0374
0375
0376 static void __cache_size_refresh(void)
0377 {
0378 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
0379 BUG_ON(dm_bufio_client_count < 0);
0380
0381 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
0382
0383
0384
0385
0386 if (!dm_bufio_cache_size_latch) {
0387 (void)cmpxchg(&dm_bufio_cache_size, 0,
0388 dm_bufio_default_cache_size);
0389 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
0390 }
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
0415 unsigned char *data_mode)
0416 {
0417 if (unlikely(c->slab_cache != NULL)) {
0418 *data_mode = DATA_MODE_SLAB;
0419 return kmem_cache_alloc(c->slab_cache, gfp_mask);
0420 }
0421
0422 if (c->block_size <= KMALLOC_MAX_SIZE &&
0423 gfp_mask & __GFP_NORETRY) {
0424 *data_mode = DATA_MODE_GET_FREE_PAGES;
0425 return (void *)__get_free_pages(gfp_mask,
0426 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
0427 }
0428
0429 *data_mode = DATA_MODE_VMALLOC;
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 if (gfp_mask & __GFP_NORETRY) {
0441 unsigned noio_flag = memalloc_noio_save();
0442 void *ptr = __vmalloc(c->block_size, gfp_mask);
0443
0444 memalloc_noio_restore(noio_flag);
0445 return ptr;
0446 }
0447
0448 return __vmalloc(c->block_size, gfp_mask);
0449 }
0450
0451
0452
0453
0454 static void free_buffer_data(struct dm_bufio_client *c,
0455 void *data, unsigned char data_mode)
0456 {
0457 switch (data_mode) {
0458 case DATA_MODE_SLAB:
0459 kmem_cache_free(c->slab_cache, data);
0460 break;
0461
0462 case DATA_MODE_GET_FREE_PAGES:
0463 free_pages((unsigned long)data,
0464 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
0465 break;
0466
0467 case DATA_MODE_VMALLOC:
0468 vfree(data);
0469 break;
0470
0471 default:
0472 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
0473 data_mode);
0474 BUG();
0475 }
0476 }
0477
0478
0479
0480
0481 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
0482 {
0483 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
0484
0485 if (!b)
0486 return NULL;
0487
0488 b->c = c;
0489
0490 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
0491 if (!b->data) {
0492 kmem_cache_free(c->slab_buffer, b);
0493 return NULL;
0494 }
0495
0496 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
0497 b->stack_len = 0;
0498 #endif
0499 return b;
0500 }
0501
0502
0503
0504
0505 static void free_buffer(struct dm_buffer *b)
0506 {
0507 struct dm_bufio_client *c = b->c;
0508
0509 free_buffer_data(c, b->data, b->data_mode);
0510 kmem_cache_free(c->slab_buffer, b);
0511 }
0512
0513
0514
0515
0516 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
0517 {
0518 struct dm_bufio_client *c = b->c;
0519
0520 c->n_buffers[dirty]++;
0521 b->block = block;
0522 b->list_mode = dirty;
0523 list_add(&b->lru_list, &c->lru[dirty]);
0524 __insert(b->c, b);
0525 b->last_accessed = jiffies;
0526
0527 adjust_total_allocated(b, false);
0528 }
0529
0530
0531
0532
0533 static void __unlink_buffer(struct dm_buffer *b)
0534 {
0535 struct dm_bufio_client *c = b->c;
0536
0537 BUG_ON(!c->n_buffers[b->list_mode]);
0538
0539 c->n_buffers[b->list_mode]--;
0540 __remove(b->c, b);
0541 list_del(&b->lru_list);
0542
0543 adjust_total_allocated(b, true);
0544 }
0545
0546
0547
0548
0549 static void __relink_lru(struct dm_buffer *b, int dirty)
0550 {
0551 struct dm_bufio_client *c = b->c;
0552
0553 b->accessed = 1;
0554
0555 BUG_ON(!c->n_buffers[b->list_mode]);
0556
0557 c->n_buffers[b->list_mode]--;
0558 c->n_buffers[dirty]++;
0559 b->list_mode = dirty;
0560 list_move(&b->lru_list, &c->lru[dirty]);
0561 b->last_accessed = jiffies;
0562 }
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 static void dmio_complete(unsigned long error, void *context)
0587 {
0588 struct dm_buffer *b = context;
0589
0590 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
0591 }
0592
0593 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
0594 unsigned n_sectors, unsigned offset)
0595 {
0596 int r;
0597 struct dm_io_request io_req = {
0598 .bi_opf = op,
0599 .notify.fn = dmio_complete,
0600 .notify.context = b,
0601 .client = b->c->dm_io,
0602 };
0603 struct dm_io_region region = {
0604 .bdev = b->c->bdev,
0605 .sector = sector,
0606 .count = n_sectors,
0607 };
0608
0609 if (b->data_mode != DATA_MODE_VMALLOC) {
0610 io_req.mem.type = DM_IO_KMEM;
0611 io_req.mem.ptr.addr = (char *)b->data + offset;
0612 } else {
0613 io_req.mem.type = DM_IO_VMA;
0614 io_req.mem.ptr.vma = (char *)b->data + offset;
0615 }
0616
0617 r = dm_io(&io_req, 1, ®ion, NULL);
0618 if (unlikely(r))
0619 b->end_io(b, errno_to_blk_status(r));
0620 }
0621
0622 static void bio_complete(struct bio *bio)
0623 {
0624 struct dm_buffer *b = bio->bi_private;
0625 blk_status_t status = bio->bi_status;
0626 bio_uninit(bio);
0627 kfree(bio);
0628 b->end_io(b, status);
0629 }
0630
0631 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
0632 unsigned n_sectors, unsigned offset)
0633 {
0634 struct bio *bio;
0635 char *ptr;
0636 unsigned vec_size, len;
0637
0638 vec_size = b->c->block_size >> PAGE_SHIFT;
0639 if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
0640 vec_size += 2;
0641
0642 bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
0643 if (!bio) {
0644 dmio:
0645 use_dmio(b, op, sector, n_sectors, offset);
0646 return;
0647 }
0648 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, op);
0649 bio->bi_iter.bi_sector = sector;
0650 bio->bi_end_io = bio_complete;
0651 bio->bi_private = b;
0652
0653 ptr = (char *)b->data + offset;
0654 len = n_sectors << SECTOR_SHIFT;
0655
0656 do {
0657 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
0658 if (!bio_add_page(bio, virt_to_page(ptr), this_step,
0659 offset_in_page(ptr))) {
0660 bio_put(bio);
0661 goto dmio;
0662 }
0663
0664 len -= this_step;
0665 ptr += this_step;
0666 } while (len > 0);
0667
0668 submit_bio(bio);
0669 }
0670
0671 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
0672 {
0673 sector_t sector;
0674
0675 if (likely(c->sectors_per_block_bits >= 0))
0676 sector = block << c->sectors_per_block_bits;
0677 else
0678 sector = block * (c->block_size >> SECTOR_SHIFT);
0679 sector += c->start;
0680
0681 return sector;
0682 }
0683
0684 static void submit_io(struct dm_buffer *b, enum req_op op,
0685 void (*end_io)(struct dm_buffer *, blk_status_t))
0686 {
0687 unsigned n_sectors;
0688 sector_t sector;
0689 unsigned offset, end;
0690
0691 b->end_io = end_io;
0692
0693 sector = block_to_sector(b->c, b->block);
0694
0695 if (op != REQ_OP_WRITE) {
0696 n_sectors = b->c->block_size >> SECTOR_SHIFT;
0697 offset = 0;
0698 } else {
0699 if (b->c->write_callback)
0700 b->c->write_callback(b);
0701 offset = b->write_start;
0702 end = b->write_end;
0703 offset &= -DM_BUFIO_WRITE_ALIGN;
0704 end += DM_BUFIO_WRITE_ALIGN - 1;
0705 end &= -DM_BUFIO_WRITE_ALIGN;
0706 if (unlikely(end > b->c->block_size))
0707 end = b->c->block_size;
0708
0709 sector += offset >> SECTOR_SHIFT;
0710 n_sectors = (end - offset) >> SECTOR_SHIFT;
0711 }
0712
0713 if (b->data_mode != DATA_MODE_VMALLOC)
0714 use_bio(b, op, sector, n_sectors, offset);
0715 else
0716 use_dmio(b, op, sector, n_sectors, offset);
0717 }
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static void write_endio(struct dm_buffer *b, blk_status_t status)
0730 {
0731 b->write_error = status;
0732 if (unlikely(status)) {
0733 struct dm_bufio_client *c = b->c;
0734
0735 (void)cmpxchg(&c->async_write_error, 0,
0736 blk_status_to_errno(status));
0737 }
0738
0739 BUG_ON(!test_bit(B_WRITING, &b->state));
0740
0741 smp_mb__before_atomic();
0742 clear_bit(B_WRITING, &b->state);
0743 smp_mb__after_atomic();
0744
0745 wake_up_bit(&b->state, B_WRITING);
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static void __write_dirty_buffer(struct dm_buffer *b,
0758 struct list_head *write_list)
0759 {
0760 if (!test_bit(B_DIRTY, &b->state))
0761 return;
0762
0763 clear_bit(B_DIRTY, &b->state);
0764 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
0765
0766 b->write_start = b->dirty_start;
0767 b->write_end = b->dirty_end;
0768
0769 if (!write_list)
0770 submit_io(b, REQ_OP_WRITE, write_endio);
0771 else
0772 list_add_tail(&b->write_list, write_list);
0773 }
0774
0775 static void __flush_write_list(struct list_head *write_list)
0776 {
0777 struct blk_plug plug;
0778 blk_start_plug(&plug);
0779 while (!list_empty(write_list)) {
0780 struct dm_buffer *b =
0781 list_entry(write_list->next, struct dm_buffer, write_list);
0782 list_del(&b->write_list);
0783 submit_io(b, REQ_OP_WRITE, write_endio);
0784 cond_resched();
0785 }
0786 blk_finish_plug(&plug);
0787 }
0788
0789
0790
0791
0792
0793
0794 static void __make_buffer_clean(struct dm_buffer *b)
0795 {
0796 BUG_ON(b->hold_count);
0797
0798 if (!b->state)
0799 return;
0800
0801 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
0802 __write_dirty_buffer(b, NULL);
0803 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
0804 }
0805
0806
0807
0808
0809
0810 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
0811 {
0812 struct dm_buffer *b;
0813
0814 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
0815 BUG_ON(test_bit(B_WRITING, &b->state));
0816 BUG_ON(test_bit(B_DIRTY, &b->state));
0817
0818 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
0819 unlikely(test_bit(B_READING, &b->state)))
0820 continue;
0821
0822 if (!b->hold_count) {
0823 __make_buffer_clean(b);
0824 __unlink_buffer(b);
0825 return b;
0826 }
0827 cond_resched();
0828 }
0829
0830 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
0831 return NULL;
0832
0833 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
0834 BUG_ON(test_bit(B_READING, &b->state));
0835
0836 if (!b->hold_count) {
0837 __make_buffer_clean(b);
0838 __unlink_buffer(b);
0839 return b;
0840 }
0841 cond_resched();
0842 }
0843
0844 return NULL;
0845 }
0846
0847
0848
0849
0850
0851
0852
0853
0854 static void __wait_for_free_buffer(struct dm_bufio_client *c)
0855 {
0856 DECLARE_WAITQUEUE(wait, current);
0857
0858 add_wait_queue(&c->free_buffer_wait, &wait);
0859 set_current_state(TASK_UNINTERRUPTIBLE);
0860 dm_bufio_unlock(c);
0861
0862 io_schedule();
0863
0864 remove_wait_queue(&c->free_buffer_wait, &wait);
0865
0866 dm_bufio_lock(c);
0867 }
0868
0869 enum new_flag {
0870 NF_FRESH = 0,
0871 NF_READ = 1,
0872 NF_GET = 2,
0873 NF_PREFETCH = 3
0874 };
0875
0876
0877
0878
0879
0880
0881
0882 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
0883 {
0884 struct dm_buffer *b;
0885 bool tried_noio_alloc = false;
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 while (1) {
0901 if (dm_bufio_cache_size_latch != 1) {
0902 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
0903 if (b)
0904 return b;
0905 }
0906
0907 if (nf == NF_PREFETCH)
0908 return NULL;
0909
0910 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
0911 dm_bufio_unlock(c);
0912 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
0913 dm_bufio_lock(c);
0914 if (b)
0915 return b;
0916 tried_noio_alloc = true;
0917 }
0918
0919 if (!list_empty(&c->reserved_buffers)) {
0920 b = list_entry(c->reserved_buffers.next,
0921 struct dm_buffer, lru_list);
0922 list_del(&b->lru_list);
0923 c->need_reserved_buffers++;
0924
0925 return b;
0926 }
0927
0928 b = __get_unclaimed_buffer(c);
0929 if (b)
0930 return b;
0931
0932 __wait_for_free_buffer(c);
0933 }
0934 }
0935
0936 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
0937 {
0938 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
0939
0940 if (!b)
0941 return NULL;
0942
0943 if (c->alloc_callback)
0944 c->alloc_callback(b);
0945
0946 return b;
0947 }
0948
0949
0950
0951
0952 static void __free_buffer_wake(struct dm_buffer *b)
0953 {
0954 struct dm_bufio_client *c = b->c;
0955
0956 if (!c->need_reserved_buffers)
0957 free_buffer(b);
0958 else {
0959 list_add(&b->lru_list, &c->reserved_buffers);
0960 c->need_reserved_buffers--;
0961 }
0962
0963 wake_up(&c->free_buffer_wait);
0964 }
0965
0966 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
0967 struct list_head *write_list)
0968 {
0969 struct dm_buffer *b, *tmp;
0970
0971 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
0972 BUG_ON(test_bit(B_READING, &b->state));
0973
0974 if (!test_bit(B_DIRTY, &b->state) &&
0975 !test_bit(B_WRITING, &b->state)) {
0976 __relink_lru(b, LIST_CLEAN);
0977 continue;
0978 }
0979
0980 if (no_wait && test_bit(B_WRITING, &b->state))
0981 return;
0982
0983 __write_dirty_buffer(b, write_list);
0984 cond_resched();
0985 }
0986 }
0987
0988
0989
0990
0991
0992
0993 static void __check_watermark(struct dm_bufio_client *c,
0994 struct list_head *write_list)
0995 {
0996 if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
0997 __write_dirty_buffers_async(c, 1, write_list);
0998 }
0999
1000
1001
1002
1003
1004 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1005 enum new_flag nf, int *need_submit,
1006 struct list_head *write_list)
1007 {
1008 struct dm_buffer *b, *new_b = NULL;
1009
1010 *need_submit = 0;
1011
1012 b = __find(c, block);
1013 if (b)
1014 goto found_buffer;
1015
1016 if (nf == NF_GET)
1017 return NULL;
1018
1019 new_b = __alloc_buffer_wait(c, nf);
1020 if (!new_b)
1021 return NULL;
1022
1023
1024
1025
1026
1027 b = __find(c, block);
1028 if (b) {
1029 __free_buffer_wake(new_b);
1030 goto found_buffer;
1031 }
1032
1033 __check_watermark(c, write_list);
1034
1035 b = new_b;
1036 b->hold_count = 1;
1037 b->read_error = 0;
1038 b->write_error = 0;
1039 __link_buffer(b, block, LIST_CLEAN);
1040
1041 if (nf == NF_FRESH) {
1042 b->state = 0;
1043 return b;
1044 }
1045
1046 b->state = 1 << B_READING;
1047 *need_submit = 1;
1048
1049 return b;
1050
1051 found_buffer:
1052 if (nf == NF_PREFETCH)
1053 return NULL;
1054
1055
1056
1057
1058
1059
1060
1061 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1062 return NULL;
1063
1064 b->hold_count++;
1065 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1066 test_bit(B_WRITING, &b->state));
1067 return b;
1068 }
1069
1070
1071
1072
1073
1074 static void read_endio(struct dm_buffer *b, blk_status_t status)
1075 {
1076 b->read_error = status;
1077
1078 BUG_ON(!test_bit(B_READING, &b->state));
1079
1080 smp_mb__before_atomic();
1081 clear_bit(B_READING, &b->state);
1082 smp_mb__after_atomic();
1083
1084 wake_up_bit(&b->state, B_READING);
1085 }
1086
1087
1088
1089
1090
1091
1092
1093 static void *new_read(struct dm_bufio_client *c, sector_t block,
1094 enum new_flag nf, struct dm_buffer **bp)
1095 {
1096 int need_submit;
1097 struct dm_buffer *b;
1098
1099 LIST_HEAD(write_list);
1100
1101 dm_bufio_lock(c);
1102 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1103 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1104 if (b && b->hold_count == 1)
1105 buffer_record_stack(b);
1106 #endif
1107 dm_bufio_unlock(c);
1108
1109 __flush_write_list(&write_list);
1110
1111 if (!b)
1112 return NULL;
1113
1114 if (need_submit)
1115 submit_io(b, REQ_OP_READ, read_endio);
1116
1117 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1118
1119 if (b->read_error) {
1120 int error = blk_status_to_errno(b->read_error);
1121
1122 dm_bufio_release(b);
1123
1124 return ERR_PTR(error);
1125 }
1126
1127 *bp = b;
1128
1129 return b->data;
1130 }
1131
1132 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1133 struct dm_buffer **bp)
1134 {
1135 return new_read(c, block, NF_GET, bp);
1136 }
1137 EXPORT_SYMBOL_GPL(dm_bufio_get);
1138
1139 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1140 struct dm_buffer **bp)
1141 {
1142 BUG_ON(dm_bufio_in_request());
1143
1144 return new_read(c, block, NF_READ, bp);
1145 }
1146 EXPORT_SYMBOL_GPL(dm_bufio_read);
1147
1148 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1149 struct dm_buffer **bp)
1150 {
1151 BUG_ON(dm_bufio_in_request());
1152
1153 return new_read(c, block, NF_FRESH, bp);
1154 }
1155 EXPORT_SYMBOL_GPL(dm_bufio_new);
1156
1157 void dm_bufio_prefetch(struct dm_bufio_client *c,
1158 sector_t block, unsigned n_blocks)
1159 {
1160 struct blk_plug plug;
1161
1162 LIST_HEAD(write_list);
1163
1164 BUG_ON(dm_bufio_in_request());
1165
1166 blk_start_plug(&plug);
1167 dm_bufio_lock(c);
1168
1169 for (; n_blocks--; block++) {
1170 int need_submit;
1171 struct dm_buffer *b;
1172 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1173 &write_list);
1174 if (unlikely(!list_empty(&write_list))) {
1175 dm_bufio_unlock(c);
1176 blk_finish_plug(&plug);
1177 __flush_write_list(&write_list);
1178 blk_start_plug(&plug);
1179 dm_bufio_lock(c);
1180 }
1181 if (unlikely(b != NULL)) {
1182 dm_bufio_unlock(c);
1183
1184 if (need_submit)
1185 submit_io(b, REQ_OP_READ, read_endio);
1186 dm_bufio_release(b);
1187
1188 cond_resched();
1189
1190 if (!n_blocks)
1191 goto flush_plug;
1192 dm_bufio_lock(c);
1193 }
1194 }
1195
1196 dm_bufio_unlock(c);
1197
1198 flush_plug:
1199 blk_finish_plug(&plug);
1200 }
1201 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1202
1203 void dm_bufio_release(struct dm_buffer *b)
1204 {
1205 struct dm_bufio_client *c = b->c;
1206
1207 dm_bufio_lock(c);
1208
1209 BUG_ON(!b->hold_count);
1210
1211 b->hold_count--;
1212 if (!b->hold_count) {
1213 wake_up(&c->free_buffer_wait);
1214
1215
1216
1217
1218
1219
1220 if ((b->read_error || b->write_error) &&
1221 !test_bit(B_READING, &b->state) &&
1222 !test_bit(B_WRITING, &b->state) &&
1223 !test_bit(B_DIRTY, &b->state)) {
1224 __unlink_buffer(b);
1225 __free_buffer_wake(b);
1226 }
1227 }
1228
1229 dm_bufio_unlock(c);
1230 }
1231 EXPORT_SYMBOL_GPL(dm_bufio_release);
1232
1233 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1234 unsigned start, unsigned end)
1235 {
1236 struct dm_bufio_client *c = b->c;
1237
1238 BUG_ON(start >= end);
1239 BUG_ON(end > b->c->block_size);
1240
1241 dm_bufio_lock(c);
1242
1243 BUG_ON(test_bit(B_READING, &b->state));
1244
1245 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1246 b->dirty_start = start;
1247 b->dirty_end = end;
1248 __relink_lru(b, LIST_DIRTY);
1249 } else {
1250 if (start < b->dirty_start)
1251 b->dirty_start = start;
1252 if (end > b->dirty_end)
1253 b->dirty_end = end;
1254 }
1255
1256 dm_bufio_unlock(c);
1257 }
1258 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1259
1260 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1261 {
1262 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1263 }
1264 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1265
1266 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1267 {
1268 LIST_HEAD(write_list);
1269
1270 BUG_ON(dm_bufio_in_request());
1271
1272 dm_bufio_lock(c);
1273 __write_dirty_buffers_async(c, 0, &write_list);
1274 dm_bufio_unlock(c);
1275 __flush_write_list(&write_list);
1276 }
1277 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1278
1279
1280
1281
1282
1283
1284
1285
1286 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1287 {
1288 int a, f;
1289 unsigned long buffers_processed = 0;
1290 struct dm_buffer *b, *tmp;
1291
1292 LIST_HEAD(write_list);
1293
1294 dm_bufio_lock(c);
1295 __write_dirty_buffers_async(c, 0, &write_list);
1296 dm_bufio_unlock(c);
1297 __flush_write_list(&write_list);
1298 dm_bufio_lock(c);
1299
1300 again:
1301 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1302 int dropped_lock = 0;
1303
1304 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1305 buffers_processed++;
1306
1307 BUG_ON(test_bit(B_READING, &b->state));
1308
1309 if (test_bit(B_WRITING, &b->state)) {
1310 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1311 dropped_lock = 1;
1312 b->hold_count++;
1313 dm_bufio_unlock(c);
1314 wait_on_bit_io(&b->state, B_WRITING,
1315 TASK_UNINTERRUPTIBLE);
1316 dm_bufio_lock(c);
1317 b->hold_count--;
1318 } else
1319 wait_on_bit_io(&b->state, B_WRITING,
1320 TASK_UNINTERRUPTIBLE);
1321 }
1322
1323 if (!test_bit(B_DIRTY, &b->state) &&
1324 !test_bit(B_WRITING, &b->state))
1325 __relink_lru(b, LIST_CLEAN);
1326
1327 cond_resched();
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343 if (dropped_lock)
1344 goto again;
1345 }
1346 wake_up(&c->free_buffer_wait);
1347 dm_bufio_unlock(c);
1348
1349 a = xchg(&c->async_write_error, 0);
1350 f = dm_bufio_issue_flush(c);
1351 if (a)
1352 return a;
1353
1354 return f;
1355 }
1356 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1357
1358
1359
1360
1361 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1362 {
1363 struct dm_io_request io_req = {
1364 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
1365 .mem.type = DM_IO_KMEM,
1366 .mem.ptr.addr = NULL,
1367 .client = c->dm_io,
1368 };
1369 struct dm_io_region io_reg = {
1370 .bdev = c->bdev,
1371 .sector = 0,
1372 .count = 0,
1373 };
1374
1375 BUG_ON(dm_bufio_in_request());
1376
1377 return dm_io(&io_req, 1, &io_reg, NULL);
1378 }
1379 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1380
1381
1382
1383
1384 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
1385 {
1386 struct dm_io_request io_req = {
1387 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
1388 .mem.type = DM_IO_KMEM,
1389 .mem.ptr.addr = NULL,
1390 .client = c->dm_io,
1391 };
1392 struct dm_io_region io_reg = {
1393 .bdev = c->bdev,
1394 .sector = block_to_sector(c, block),
1395 .count = block_to_sector(c, count),
1396 };
1397
1398 BUG_ON(dm_bufio_in_request());
1399
1400 return dm_io(&io_req, 1, &io_reg, NULL);
1401 }
1402 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1417 {
1418 struct dm_bufio_client *c = b->c;
1419 struct dm_buffer *new;
1420
1421 BUG_ON(dm_bufio_in_request());
1422
1423 dm_bufio_lock(c);
1424
1425 retry:
1426 new = __find(c, new_block);
1427 if (new) {
1428 if (new->hold_count) {
1429 __wait_for_free_buffer(c);
1430 goto retry;
1431 }
1432
1433
1434
1435
1436
1437 __make_buffer_clean(new);
1438 __unlink_buffer(new);
1439 __free_buffer_wake(new);
1440 }
1441
1442 BUG_ON(!b->hold_count);
1443 BUG_ON(test_bit(B_READING, &b->state));
1444
1445 __write_dirty_buffer(b, NULL);
1446 if (b->hold_count == 1) {
1447 wait_on_bit_io(&b->state, B_WRITING,
1448 TASK_UNINTERRUPTIBLE);
1449 set_bit(B_DIRTY, &b->state);
1450 b->dirty_start = 0;
1451 b->dirty_end = c->block_size;
1452 __unlink_buffer(b);
1453 __link_buffer(b, new_block, LIST_DIRTY);
1454 } else {
1455 sector_t old_block;
1456 wait_on_bit_lock_io(&b->state, B_WRITING,
1457 TASK_UNINTERRUPTIBLE);
1458
1459
1460
1461
1462
1463
1464
1465 old_block = b->block;
1466 __unlink_buffer(b);
1467 __link_buffer(b, new_block, b->list_mode);
1468 submit_io(b, REQ_OP_WRITE, write_endio);
1469 wait_on_bit_io(&b->state, B_WRITING,
1470 TASK_UNINTERRUPTIBLE);
1471 __unlink_buffer(b);
1472 __link_buffer(b, old_block, b->list_mode);
1473 }
1474
1475 dm_bufio_unlock(c);
1476 dm_bufio_release(b);
1477 }
1478 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1479
1480 static void forget_buffer_locked(struct dm_buffer *b)
1481 {
1482 if (likely(!b->hold_count) && likely(!b->state)) {
1483 __unlink_buffer(b);
1484 __free_buffer_wake(b);
1485 }
1486 }
1487
1488
1489
1490
1491
1492
1493
1494 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1495 {
1496 struct dm_buffer *b;
1497
1498 dm_bufio_lock(c);
1499
1500 b = __find(c, block);
1501 if (b)
1502 forget_buffer_locked(b);
1503
1504 dm_bufio_unlock(c);
1505 }
1506 EXPORT_SYMBOL_GPL(dm_bufio_forget);
1507
1508 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
1509 {
1510 struct dm_buffer *b;
1511 sector_t end_block = block + n_blocks;
1512
1513 while (block < end_block) {
1514 dm_bufio_lock(c);
1515
1516 b = __find_next(c, block);
1517 if (b) {
1518 block = b->block + 1;
1519 forget_buffer_locked(b);
1520 }
1521
1522 dm_bufio_unlock(c);
1523
1524 if (!b)
1525 break;
1526 }
1527
1528 }
1529 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
1530
1531 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1532 {
1533 c->minimum_buffers = n;
1534 }
1535 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
1536
1537 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1538 {
1539 return c->block_size;
1540 }
1541 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1542
1543 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1544 {
1545 sector_t s = bdev_nr_sectors(c->bdev);
1546 if (s >= c->start)
1547 s -= c->start;
1548 else
1549 s = 0;
1550 if (likely(c->sectors_per_block_bits >= 0))
1551 s >>= c->sectors_per_block_bits;
1552 else
1553 sector_div(s, c->block_size >> SECTOR_SHIFT);
1554 return s;
1555 }
1556 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1557
1558 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
1559 {
1560 return c->dm_io;
1561 }
1562 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
1563
1564 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1565 {
1566 return b->block;
1567 }
1568 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1569
1570 void *dm_bufio_get_block_data(struct dm_buffer *b)
1571 {
1572 return b->data;
1573 }
1574 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1575
1576 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1577 {
1578 return b + 1;
1579 }
1580 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1581
1582 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1583 {
1584 return b->c;
1585 }
1586 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1587
1588 static void drop_buffers(struct dm_bufio_client *c)
1589 {
1590 struct dm_buffer *b;
1591 int i;
1592 bool warned = false;
1593
1594 BUG_ON(dm_bufio_in_request());
1595
1596
1597
1598
1599 dm_bufio_write_dirty_buffers_async(c);
1600
1601 dm_bufio_lock(c);
1602
1603 while ((b = __get_unclaimed_buffer(c)))
1604 __free_buffer_wake(b);
1605
1606 for (i = 0; i < LIST_SIZE; i++)
1607 list_for_each_entry(b, &c->lru[i], lru_list) {
1608 WARN_ON(!warned);
1609 warned = true;
1610 DMERR("leaked buffer %llx, hold count %u, list %d",
1611 (unsigned long long)b->block, b->hold_count, i);
1612 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1613 stack_trace_print(b->stack_entries, b->stack_len, 1);
1614
1615 b->hold_count = 0;
1616 #endif
1617 }
1618
1619 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1620 while ((b = __get_unclaimed_buffer(c)))
1621 __free_buffer_wake(b);
1622 #endif
1623
1624 for (i = 0; i < LIST_SIZE; i++)
1625 BUG_ON(!list_empty(&c->lru[i]));
1626
1627 dm_bufio_unlock(c);
1628 }
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1639 {
1640 if (!(gfp & __GFP_FS) ||
1641 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
1642 if (test_bit(B_READING, &b->state) ||
1643 test_bit(B_WRITING, &b->state) ||
1644 test_bit(B_DIRTY, &b->state))
1645 return false;
1646 }
1647
1648 if (b->hold_count)
1649 return false;
1650
1651 __make_buffer_clean(b);
1652 __unlink_buffer(b);
1653 __free_buffer_wake(b);
1654
1655 return true;
1656 }
1657
1658 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1659 {
1660 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1661 if (likely(c->sectors_per_block_bits >= 0))
1662 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
1663 else
1664 retain_bytes /= c->block_size;
1665 return retain_bytes;
1666 }
1667
1668 static void __scan(struct dm_bufio_client *c)
1669 {
1670 int l;
1671 struct dm_buffer *b, *tmp;
1672 unsigned long freed = 0;
1673 unsigned long count = c->n_buffers[LIST_CLEAN] +
1674 c->n_buffers[LIST_DIRTY];
1675 unsigned long retain_target = get_retain_buffers(c);
1676
1677 for (l = 0; l < LIST_SIZE; l++) {
1678 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1679 if (count - freed <= retain_target)
1680 atomic_long_set(&c->need_shrink, 0);
1681 if (!atomic_long_read(&c->need_shrink))
1682 return;
1683 if (__try_evict_buffer(b, GFP_KERNEL)) {
1684 atomic_long_dec(&c->need_shrink);
1685 freed++;
1686 }
1687 cond_resched();
1688 }
1689 }
1690 }
1691
1692 static void shrink_work(struct work_struct *w)
1693 {
1694 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
1695
1696 dm_bufio_lock(c);
1697 __scan(c);
1698 dm_bufio_unlock(c);
1699 }
1700
1701 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1702 {
1703 struct dm_bufio_client *c;
1704
1705 c = container_of(shrink, struct dm_bufio_client, shrinker);
1706 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
1707 queue_work(dm_bufio_wq, &c->shrink_work);
1708
1709 return sc->nr_to_scan;
1710 }
1711
1712 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1713 {
1714 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1715 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1716 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1717 unsigned long retain_target = get_retain_buffers(c);
1718 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
1719
1720 if (unlikely(count < retain_target))
1721 count = 0;
1722 else
1723 count -= retain_target;
1724
1725 if (unlikely(count < queued_for_cleanup))
1726 count = 0;
1727 else
1728 count -= queued_for_cleanup;
1729
1730 return count;
1731 }
1732
1733
1734
1735
1736 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1737 unsigned reserved_buffers, unsigned aux_size,
1738 void (*alloc_callback)(struct dm_buffer *),
1739 void (*write_callback)(struct dm_buffer *),
1740 unsigned int flags)
1741 {
1742 int r;
1743 struct dm_bufio_client *c;
1744 unsigned i;
1745 char slab_name[27];
1746
1747 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
1748 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
1749 r = -EINVAL;
1750 goto bad_client;
1751 }
1752
1753 c = kzalloc(sizeof(*c), GFP_KERNEL);
1754 if (!c) {
1755 r = -ENOMEM;
1756 goto bad_client;
1757 }
1758 c->buffer_tree = RB_ROOT;
1759
1760 c->bdev = bdev;
1761 c->block_size = block_size;
1762 if (is_power_of_2(block_size))
1763 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1764 else
1765 c->sectors_per_block_bits = -1;
1766
1767 c->alloc_callback = alloc_callback;
1768 c->write_callback = write_callback;
1769
1770 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
1771 c->no_sleep = true;
1772 static_branch_inc(&no_sleep_enabled);
1773 }
1774
1775 for (i = 0; i < LIST_SIZE; i++) {
1776 INIT_LIST_HEAD(&c->lru[i]);
1777 c->n_buffers[i] = 0;
1778 }
1779
1780 mutex_init(&c->lock);
1781 spin_lock_init(&c->spinlock);
1782 INIT_LIST_HEAD(&c->reserved_buffers);
1783 c->need_reserved_buffers = reserved_buffers;
1784
1785 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
1786
1787 init_waitqueue_head(&c->free_buffer_wait);
1788 c->async_write_error = 0;
1789
1790 c->dm_io = dm_io_client_create();
1791 if (IS_ERR(c->dm_io)) {
1792 r = PTR_ERR(c->dm_io);
1793 goto bad_dm_io;
1794 }
1795
1796 if (block_size <= KMALLOC_MAX_SIZE &&
1797 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
1798 unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
1799 snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
1800 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
1801 SLAB_RECLAIM_ACCOUNT, NULL);
1802 if (!c->slab_cache) {
1803 r = -ENOMEM;
1804 goto bad;
1805 }
1806 }
1807 if (aux_size)
1808 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
1809 else
1810 snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
1811 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
1812 0, SLAB_RECLAIM_ACCOUNT, NULL);
1813 if (!c->slab_buffer) {
1814 r = -ENOMEM;
1815 goto bad;
1816 }
1817
1818 while (c->need_reserved_buffers) {
1819 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1820
1821 if (!b) {
1822 r = -ENOMEM;
1823 goto bad;
1824 }
1825 __free_buffer_wake(b);
1826 }
1827
1828 INIT_WORK(&c->shrink_work, shrink_work);
1829 atomic_long_set(&c->need_shrink, 0);
1830
1831 c->shrinker.count_objects = dm_bufio_shrink_count;
1832 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1833 c->shrinker.seeks = 1;
1834 c->shrinker.batch = 0;
1835 r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
1836 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
1837 if (r)
1838 goto bad;
1839
1840 mutex_lock(&dm_bufio_clients_lock);
1841 dm_bufio_client_count++;
1842 list_add(&c->client_list, &dm_bufio_all_clients);
1843 __cache_size_refresh();
1844 mutex_unlock(&dm_bufio_clients_lock);
1845
1846 return c;
1847
1848 bad:
1849 while (!list_empty(&c->reserved_buffers)) {
1850 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1851 struct dm_buffer, lru_list);
1852 list_del(&b->lru_list);
1853 free_buffer(b);
1854 }
1855 kmem_cache_destroy(c->slab_cache);
1856 kmem_cache_destroy(c->slab_buffer);
1857 dm_io_client_destroy(c->dm_io);
1858 bad_dm_io:
1859 mutex_destroy(&c->lock);
1860 kfree(c);
1861 bad_client:
1862 return ERR_PTR(r);
1863 }
1864 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1865
1866
1867
1868
1869
1870 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1871 {
1872 unsigned i;
1873
1874 drop_buffers(c);
1875
1876 unregister_shrinker(&c->shrinker);
1877 flush_work(&c->shrink_work);
1878
1879 mutex_lock(&dm_bufio_clients_lock);
1880
1881 list_del(&c->client_list);
1882 dm_bufio_client_count--;
1883 __cache_size_refresh();
1884
1885 mutex_unlock(&dm_bufio_clients_lock);
1886
1887 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1888 BUG_ON(c->need_reserved_buffers);
1889
1890 while (!list_empty(&c->reserved_buffers)) {
1891 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1892 struct dm_buffer, lru_list);
1893 list_del(&b->lru_list);
1894 free_buffer(b);
1895 }
1896
1897 for (i = 0; i < LIST_SIZE; i++)
1898 if (c->n_buffers[i])
1899 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1900
1901 for (i = 0; i < LIST_SIZE; i++)
1902 BUG_ON(c->n_buffers[i]);
1903
1904 kmem_cache_destroy(c->slab_cache);
1905 kmem_cache_destroy(c->slab_buffer);
1906 dm_io_client_destroy(c->dm_io);
1907 mutex_destroy(&c->lock);
1908 if (c->no_sleep)
1909 static_branch_dec(&no_sleep_enabled);
1910 kfree(c);
1911 }
1912 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1913
1914 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1915 {
1916 c->start = start;
1917 }
1918 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1919
1920 static unsigned get_max_age_hz(void)
1921 {
1922 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1923
1924 if (max_age > UINT_MAX / HZ)
1925 max_age = UINT_MAX / HZ;
1926
1927 return max_age * HZ;
1928 }
1929
1930 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1931 {
1932 return time_after_eq(jiffies, b->last_accessed + age_hz);
1933 }
1934
1935 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1936 {
1937 struct dm_buffer *b, *tmp;
1938 unsigned long retain_target = get_retain_buffers(c);
1939 unsigned long count;
1940 LIST_HEAD(write_list);
1941
1942 dm_bufio_lock(c);
1943
1944 __check_watermark(c, &write_list);
1945 if (unlikely(!list_empty(&write_list))) {
1946 dm_bufio_unlock(c);
1947 __flush_write_list(&write_list);
1948 dm_bufio_lock(c);
1949 }
1950
1951 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1952 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1953 if (count <= retain_target)
1954 break;
1955
1956 if (!older_than(b, age_hz))
1957 break;
1958
1959 if (__try_evict_buffer(b, 0))
1960 count--;
1961
1962 cond_resched();
1963 }
1964
1965 dm_bufio_unlock(c);
1966 }
1967
1968 static void do_global_cleanup(struct work_struct *w)
1969 {
1970 struct dm_bufio_client *locked_client = NULL;
1971 struct dm_bufio_client *current_client;
1972 struct dm_buffer *b;
1973 unsigned spinlock_hold_count;
1974 unsigned long threshold = dm_bufio_cache_size -
1975 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
1976 unsigned long loops = global_num * 2;
1977
1978 mutex_lock(&dm_bufio_clients_lock);
1979
1980 while (1) {
1981 cond_resched();
1982
1983 spin_lock(&global_spinlock);
1984 if (unlikely(dm_bufio_current_allocated <= threshold))
1985 break;
1986
1987 spinlock_hold_count = 0;
1988 get_next:
1989 if (!loops--)
1990 break;
1991 if (unlikely(list_empty(&global_queue)))
1992 break;
1993 b = list_entry(global_queue.prev, struct dm_buffer, global_list);
1994
1995 if (b->accessed) {
1996 b->accessed = 0;
1997 list_move(&b->global_list, &global_queue);
1998 if (likely(++spinlock_hold_count < 16))
1999 goto get_next;
2000 spin_unlock(&global_spinlock);
2001 continue;
2002 }
2003
2004 current_client = b->c;
2005 if (unlikely(current_client != locked_client)) {
2006 if (locked_client)
2007 dm_bufio_unlock(locked_client);
2008
2009 if (!dm_bufio_trylock(current_client)) {
2010 spin_unlock(&global_spinlock);
2011 dm_bufio_lock(current_client);
2012 locked_client = current_client;
2013 continue;
2014 }
2015
2016 locked_client = current_client;
2017 }
2018
2019 spin_unlock(&global_spinlock);
2020
2021 if (unlikely(!__try_evict_buffer(b, GFP_KERNEL))) {
2022 spin_lock(&global_spinlock);
2023 list_move(&b->global_list, &global_queue);
2024 spin_unlock(&global_spinlock);
2025 }
2026 }
2027
2028 spin_unlock(&global_spinlock);
2029
2030 if (locked_client)
2031 dm_bufio_unlock(locked_client);
2032
2033 mutex_unlock(&dm_bufio_clients_lock);
2034 }
2035
2036 static void cleanup_old_buffers(void)
2037 {
2038 unsigned long max_age_hz = get_max_age_hz();
2039 struct dm_bufio_client *c;
2040
2041 mutex_lock(&dm_bufio_clients_lock);
2042
2043 __cache_size_refresh();
2044
2045 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2046 __evict_old_buffers(c, max_age_hz);
2047
2048 mutex_unlock(&dm_bufio_clients_lock);
2049 }
2050
2051 static void work_fn(struct work_struct *w)
2052 {
2053 cleanup_old_buffers();
2054
2055 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2056 DM_BUFIO_WORK_TIMER_SECS * HZ);
2057 }
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067 static int __init dm_bufio_init(void)
2068 {
2069 __u64 mem;
2070
2071 dm_bufio_allocated_kmem_cache = 0;
2072 dm_bufio_allocated_get_free_pages = 0;
2073 dm_bufio_allocated_vmalloc = 0;
2074 dm_bufio_current_allocated = 0;
2075
2076 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2077 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2078
2079 if (mem > ULONG_MAX)
2080 mem = ULONG_MAX;
2081
2082 #ifdef CONFIG_MMU
2083 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2084 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2085 #endif
2086
2087 dm_bufio_default_cache_size = mem;
2088
2089 mutex_lock(&dm_bufio_clients_lock);
2090 __cache_size_refresh();
2091 mutex_unlock(&dm_bufio_clients_lock);
2092
2093 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2094 if (!dm_bufio_wq)
2095 return -ENOMEM;
2096
2097 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2098 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2099 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2100 DM_BUFIO_WORK_TIMER_SECS * HZ);
2101
2102 return 0;
2103 }
2104
2105
2106
2107
2108 static void __exit dm_bufio_exit(void)
2109 {
2110 int bug = 0;
2111
2112 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2113 destroy_workqueue(dm_bufio_wq);
2114
2115 if (dm_bufio_client_count) {
2116 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2117 __func__, dm_bufio_client_count);
2118 bug = 1;
2119 }
2120
2121 if (dm_bufio_current_allocated) {
2122 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2123 __func__, dm_bufio_current_allocated);
2124 bug = 1;
2125 }
2126
2127 if (dm_bufio_allocated_get_free_pages) {
2128 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2129 __func__, dm_bufio_allocated_get_free_pages);
2130 bug = 1;
2131 }
2132
2133 if (dm_bufio_allocated_vmalloc) {
2134 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2135 __func__, dm_bufio_allocated_vmalloc);
2136 bug = 1;
2137 }
2138
2139 BUG_ON(bug);
2140 }
2141
2142 module_init(dm_bufio_init)
2143 module_exit(dm_bufio_exit)
2144
2145 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
2146 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2147
2148 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2149 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2150
2151 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2152 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2153
2154 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2155 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2156
2157 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2158 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2159
2160 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2161 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2162
2163 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2164 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2165
2166 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2167 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2168
2169 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2170 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2171 MODULE_LICENSE("GPL");