0001
0002
0003
0004
0005
0006 #include <linux/mm.h>
0007 #include <linux/bio.h>
0008 #include <linux/err.h>
0009 #include <linux/hash.h>
0010 #include <linux/list.h>
0011 #include <linux/log2.h>
0012 #include <linux/init.h>
0013 #include <linux/slab.h>
0014 #include <linux/wait.h>
0015 #include <linux/dm-io.h>
0016 #include <linux/mutex.h>
0017 #include <linux/atomic.h>
0018 #include <linux/bitops.h>
0019 #include <linux/blkdev.h>
0020 #include <linux/kdev_t.h>
0021 #include <linux/kernel.h>
0022 #include <linux/module.h>
0023 #include <linux/jiffies.h>
0024 #include <linux/mempool.h>
0025 #include <linux/spinlock.h>
0026 #include <linux/blk_types.h>
0027 #include <linux/dm-kcopyd.h>
0028 #include <linux/workqueue.h>
0029 #include <linux/backing-dev.h>
0030 #include <linux/device-mapper.h>
0031
0032 #include "dm.h"
0033 #include "dm-clone-metadata.h"
0034
0035 #define DM_MSG_PREFIX "clone"
0036
0037
0038
0039
0040 #define MIN_REGION_SIZE (1 << 3)
0041 #define MAX_REGION_SIZE (1 << 21)
0042
0043 #define MIN_HYDRATIONS 256
0044 #define DEFAULT_HYDRATION_THRESHOLD 1
0045 #define DEFAULT_HYDRATION_BATCH_SIZE 1
0046
0047 #define COMMIT_PERIOD HZ
0048
0049
0050
0051
0052 #define HASH_TABLE_BITS 15
0053
0054 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(clone_hydration_throttle,
0055 "A percentage of time allocated for hydrating regions");
0056
0057
0058 static struct kmem_cache *_hydration_cache;
0059
0060
0061 enum clone_metadata_mode {
0062 CM_WRITE,
0063 CM_READ_ONLY,
0064 CM_FAIL,
0065 };
0066
0067 struct hash_table_bucket;
0068
0069 struct clone {
0070 struct dm_target *ti;
0071
0072 struct dm_dev *metadata_dev;
0073 struct dm_dev *dest_dev;
0074 struct dm_dev *source_dev;
0075
0076 unsigned long nr_regions;
0077 sector_t region_size;
0078 unsigned int region_shift;
0079
0080
0081
0082
0083
0084 struct mutex commit_lock;
0085
0086 struct dm_clone_metadata *cmd;
0087
0088
0089 struct hash_table_bucket *ht;
0090
0091 atomic_t ios_in_flight;
0092
0093 wait_queue_head_t hydration_stopped;
0094
0095 mempool_t hydration_pool;
0096
0097 unsigned long last_commit_jiffies;
0098
0099
0100
0101
0102
0103
0104
0105
0106 spinlock_t lock;
0107 struct bio_list deferred_bios;
0108 struct bio_list deferred_discard_bios;
0109 struct bio_list deferred_flush_bios;
0110 struct bio_list deferred_flush_completions;
0111
0112
0113 unsigned int hydration_threshold;
0114
0115
0116 unsigned int hydration_batch_size;
0117
0118
0119 unsigned long hydration_offset;
0120
0121 atomic_t hydrations_in_flight;
0122
0123
0124
0125
0126
0127 unsigned int nr_ctr_args;
0128 const char **ctr_args;
0129
0130 struct workqueue_struct *wq;
0131 struct work_struct worker;
0132 struct delayed_work waker;
0133
0134 struct dm_kcopyd_client *kcopyd_client;
0135
0136 enum clone_metadata_mode mode;
0137 unsigned long flags;
0138 };
0139
0140
0141
0142
0143 #define DM_CLONE_DISCARD_PASSDOWN 0
0144 #define DM_CLONE_HYDRATION_ENABLED 1
0145 #define DM_CLONE_HYDRATION_SUSPENDED 2
0146
0147
0148
0149
0150
0151
0152 static enum clone_metadata_mode get_clone_mode(struct clone *clone)
0153 {
0154 return READ_ONCE(clone->mode);
0155 }
0156
0157 static const char *clone_device_name(struct clone *clone)
0158 {
0159 return dm_table_device_name(clone->ti->table);
0160 }
0161
0162 static void __set_clone_mode(struct clone *clone, enum clone_metadata_mode new_mode)
0163 {
0164 static const char * const descs[] = {
0165 "read-write",
0166 "read-only",
0167 "fail"
0168 };
0169
0170 enum clone_metadata_mode old_mode = get_clone_mode(clone);
0171
0172
0173 if (old_mode == CM_FAIL)
0174 new_mode = CM_FAIL;
0175
0176 switch (new_mode) {
0177 case CM_FAIL:
0178 case CM_READ_ONLY:
0179 dm_clone_metadata_set_read_only(clone->cmd);
0180 break;
0181
0182 case CM_WRITE:
0183 dm_clone_metadata_set_read_write(clone->cmd);
0184 break;
0185 }
0186
0187 WRITE_ONCE(clone->mode, new_mode);
0188
0189 if (new_mode != old_mode) {
0190 dm_table_event(clone->ti->table);
0191 DMINFO("%s: Switching to %s mode", clone_device_name(clone),
0192 descs[(int)new_mode]);
0193 }
0194 }
0195
0196 static void __abort_transaction(struct clone *clone)
0197 {
0198 const char *dev_name = clone_device_name(clone);
0199
0200 if (get_clone_mode(clone) >= CM_READ_ONLY)
0201 return;
0202
0203 DMERR("%s: Aborting current metadata transaction", dev_name);
0204 if (dm_clone_metadata_abort(clone->cmd)) {
0205 DMERR("%s: Failed to abort metadata transaction", dev_name);
0206 __set_clone_mode(clone, CM_FAIL);
0207 }
0208 }
0209
0210 static void __reload_in_core_bitset(struct clone *clone)
0211 {
0212 const char *dev_name = clone_device_name(clone);
0213
0214 if (get_clone_mode(clone) == CM_FAIL)
0215 return;
0216
0217
0218 DMINFO("%s: Reloading on-disk bitmap", dev_name);
0219 if (dm_clone_reload_in_core_bitset(clone->cmd)) {
0220 DMERR("%s: Failed to reload on-disk bitmap", dev_name);
0221 __set_clone_mode(clone, CM_FAIL);
0222 }
0223 }
0224
0225 static void __metadata_operation_failed(struct clone *clone, const char *op, int r)
0226 {
0227 DMERR("%s: Metadata operation `%s' failed: error = %d",
0228 clone_device_name(clone), op, r);
0229
0230 __abort_transaction(clone);
0231 __set_clone_mode(clone, CM_READ_ONLY);
0232
0233
0234
0235
0236
0237
0238 __reload_in_core_bitset(clone);
0239 }
0240
0241
0242
0243
0244 static inline void wakeup_hydration_waiters(struct clone *clone)
0245 {
0246 wake_up_all(&clone->hydration_stopped);
0247 }
0248
0249 static inline void wake_worker(struct clone *clone)
0250 {
0251 queue_work(clone->wq, &clone->worker);
0252 }
0253
0254
0255
0256
0257
0258
0259 static inline void remap_to_source(struct clone *clone, struct bio *bio)
0260 {
0261 bio_set_dev(bio, clone->source_dev->bdev);
0262 }
0263
0264 static inline void remap_to_dest(struct clone *clone, struct bio *bio)
0265 {
0266 bio_set_dev(bio, clone->dest_dev->bdev);
0267 }
0268
0269 static bool bio_triggers_commit(struct clone *clone, struct bio *bio)
0270 {
0271 return op_is_flush(bio->bi_opf) &&
0272 dm_clone_changed_this_transaction(clone->cmd);
0273 }
0274
0275
0276 static inline sector_t region_to_sector(struct clone *clone, unsigned long region_nr)
0277 {
0278 return ((sector_t)region_nr << clone->region_shift);
0279 }
0280
0281
0282 static inline unsigned long bio_to_region(struct clone *clone, struct bio *bio)
0283 {
0284 return (bio->bi_iter.bi_sector >> clone->region_shift);
0285 }
0286
0287
0288 static void bio_region_range(struct clone *clone, struct bio *bio,
0289 unsigned long *rs, unsigned long *nr_regions)
0290 {
0291 unsigned long end;
0292
0293 *rs = dm_sector_div_up(bio->bi_iter.bi_sector, clone->region_size);
0294 end = bio_end_sector(bio) >> clone->region_shift;
0295
0296 if (*rs >= end)
0297 *nr_regions = 0;
0298 else
0299 *nr_regions = end - *rs;
0300 }
0301
0302
0303 static inline bool is_overwrite_bio(struct clone *clone, struct bio *bio)
0304 {
0305 return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size);
0306 }
0307
0308 static void fail_bios(struct bio_list *bios, blk_status_t status)
0309 {
0310 struct bio *bio;
0311
0312 while ((bio = bio_list_pop(bios))) {
0313 bio->bi_status = status;
0314 bio_endio(bio);
0315 }
0316 }
0317
0318 static void submit_bios(struct bio_list *bios)
0319 {
0320 struct bio *bio;
0321 struct blk_plug plug;
0322
0323 blk_start_plug(&plug);
0324
0325 while ((bio = bio_list_pop(bios)))
0326 submit_bio_noacct(bio);
0327
0328 blk_finish_plug(&plug);
0329 }
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339 static void issue_bio(struct clone *clone, struct bio *bio)
0340 {
0341 if (!bio_triggers_commit(clone, bio)) {
0342 submit_bio_noacct(bio);
0343 return;
0344 }
0345
0346
0347
0348
0349
0350 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
0351 bio_io_error(bio);
0352 return;
0353 }
0354
0355
0356
0357
0358
0359 spin_lock_irq(&clone->lock);
0360 bio_list_add(&clone->deferred_flush_bios, bio);
0361 spin_unlock_irq(&clone->lock);
0362
0363 wake_worker(clone);
0364 }
0365
0366
0367
0368
0369
0370
0371
0372 static void remap_and_issue(struct clone *clone, struct bio *bio)
0373 {
0374 remap_to_dest(clone, bio);
0375 issue_bio(clone, bio);
0376 }
0377
0378
0379
0380
0381
0382
0383
0384
0385 static void issue_deferred_bios(struct clone *clone, struct bio_list *bios)
0386 {
0387 struct bio *bio;
0388 unsigned long flags;
0389 struct bio_list flush_bios = BIO_EMPTY_LIST;
0390 struct bio_list normal_bios = BIO_EMPTY_LIST;
0391
0392 if (bio_list_empty(bios))
0393 return;
0394
0395 while ((bio = bio_list_pop(bios))) {
0396 if (bio_triggers_commit(clone, bio))
0397 bio_list_add(&flush_bios, bio);
0398 else
0399 bio_list_add(&normal_bios, bio);
0400 }
0401
0402 spin_lock_irqsave(&clone->lock, flags);
0403 bio_list_merge(&clone->deferred_bios, &normal_bios);
0404 bio_list_merge(&clone->deferred_flush_bios, &flush_bios);
0405 spin_unlock_irqrestore(&clone->lock, flags);
0406
0407 wake_worker(clone);
0408 }
0409
0410 static void complete_overwrite_bio(struct clone *clone, struct bio *bio)
0411 {
0412 unsigned long flags;
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423
0424 if (!(bio->bi_opf & REQ_FUA)) {
0425 bio_endio(bio);
0426 return;
0427 }
0428
0429
0430
0431
0432
0433 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
0434 bio_io_error(bio);
0435 return;
0436 }
0437
0438
0439
0440
0441
0442 spin_lock_irqsave(&clone->lock, flags);
0443 bio_list_add(&clone->deferred_flush_completions, bio);
0444 spin_unlock_irqrestore(&clone->lock, flags);
0445
0446 wake_worker(clone);
0447 }
0448
0449 static void trim_bio(struct bio *bio, sector_t sector, unsigned int len)
0450 {
0451 bio->bi_iter.bi_sector = sector;
0452 bio->bi_iter.bi_size = to_bytes(len);
0453 }
0454
0455 static void complete_discard_bio(struct clone *clone, struct bio *bio, bool success)
0456 {
0457 unsigned long rs, nr_regions;
0458
0459
0460
0461
0462
0463
0464 if (test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags) && success) {
0465 remap_to_dest(clone, bio);
0466 bio_region_range(clone, bio, &rs, &nr_regions);
0467 trim_bio(bio, region_to_sector(clone, rs),
0468 nr_regions << clone->region_shift);
0469 submit_bio_noacct(bio);
0470 } else
0471 bio_endio(bio);
0472 }
0473
0474 static void process_discard_bio(struct clone *clone, struct bio *bio)
0475 {
0476 unsigned long rs, nr_regions;
0477
0478 bio_region_range(clone, bio, &rs, &nr_regions);
0479 if (!nr_regions) {
0480 bio_endio(bio);
0481 return;
0482 }
0483
0484 if (WARN_ON(rs >= clone->nr_regions || (rs + nr_regions) < rs ||
0485 (rs + nr_regions) > clone->nr_regions)) {
0486 DMERR("%s: Invalid range (%lu + %lu, total regions %lu) for discard (%llu + %u)",
0487 clone_device_name(clone), rs, nr_regions,
0488 clone->nr_regions,
0489 (unsigned long long)bio->bi_iter.bi_sector,
0490 bio_sectors(bio));
0491 bio_endio(bio);
0492 return;
0493 }
0494
0495
0496
0497
0498
0499 if (dm_clone_is_range_hydrated(clone->cmd, rs, nr_regions)) {
0500 complete_discard_bio(clone, bio, true);
0501 return;
0502 }
0503
0504
0505
0506
0507
0508
0509 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
0510 bio_endio(bio);
0511 return;
0512 }
0513
0514
0515
0516
0517 spin_lock_irq(&clone->lock);
0518 bio_list_add(&clone->deferred_discard_bios, bio);
0519 spin_unlock_irq(&clone->lock);
0520
0521 wake_worker(clone);
0522 }
0523
0524
0525
0526
0527
0528
0529 struct dm_clone_region_hydration {
0530 struct clone *clone;
0531 unsigned long region_nr;
0532
0533 struct bio *overwrite_bio;
0534 bio_end_io_t *overwrite_bio_end_io;
0535
0536 struct bio_list deferred_bios;
0537
0538 blk_status_t status;
0539
0540
0541 struct list_head list;
0542
0543
0544 struct hlist_node h;
0545 };
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557 struct hash_table_bucket {
0558 struct hlist_head head;
0559
0560
0561 spinlock_t lock;
0562 };
0563
0564 #define bucket_lock_irqsave(bucket, flags) \
0565 spin_lock_irqsave(&(bucket)->lock, flags)
0566
0567 #define bucket_unlock_irqrestore(bucket, flags) \
0568 spin_unlock_irqrestore(&(bucket)->lock, flags)
0569
0570 #define bucket_lock_irq(bucket) \
0571 spin_lock_irq(&(bucket)->lock)
0572
0573 #define bucket_unlock_irq(bucket) \
0574 spin_unlock_irq(&(bucket)->lock)
0575
0576 static int hash_table_init(struct clone *clone)
0577 {
0578 unsigned int i, sz;
0579 struct hash_table_bucket *bucket;
0580
0581 sz = 1 << HASH_TABLE_BITS;
0582
0583 clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
0584 if (!clone->ht)
0585 return -ENOMEM;
0586
0587 for (i = 0; i < sz; i++) {
0588 bucket = clone->ht + i;
0589
0590 INIT_HLIST_HEAD(&bucket->head);
0591 spin_lock_init(&bucket->lock);
0592 }
0593
0594 return 0;
0595 }
0596
0597 static void hash_table_exit(struct clone *clone)
0598 {
0599 kvfree(clone->ht);
0600 }
0601
0602 static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
0603 unsigned long region_nr)
0604 {
0605 return &clone->ht[hash_long(region_nr, HASH_TABLE_BITS)];
0606 }
0607
0608
0609
0610
0611
0612
0613 static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
0614 unsigned long region_nr)
0615 {
0616 struct dm_clone_region_hydration *hd;
0617
0618 hlist_for_each_entry(hd, &bucket->head, h) {
0619 if (hd->region_nr == region_nr)
0620 return hd;
0621 }
0622
0623 return NULL;
0624 }
0625
0626
0627
0628
0629
0630
0631 static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
0632 struct dm_clone_region_hydration *hd)
0633 {
0634 hlist_add_head(&hd->h, &bucket->head);
0635 }
0636
0637
0638
0639
0640
0641
0642
0643
0644 static struct dm_clone_region_hydration *
0645 __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
0646 struct dm_clone_region_hydration *hd)
0647 {
0648 struct dm_clone_region_hydration *hd2;
0649
0650 hd2 = __hash_find(bucket, hd->region_nr);
0651 if (hd2)
0652 return hd2;
0653
0654 __insert_region_hydration(bucket, hd);
0655
0656 return hd;
0657 }
0658
0659
0660
0661
0662 static struct dm_clone_region_hydration *alloc_hydration(struct clone *clone)
0663 {
0664 struct dm_clone_region_hydration *hd;
0665
0666
0667
0668
0669
0670 hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO);
0671 hd->clone = clone;
0672
0673 return hd;
0674 }
0675
0676 static inline void free_hydration(struct dm_clone_region_hydration *hd)
0677 {
0678 mempool_free(hd, &hd->clone->hydration_pool);
0679 }
0680
0681
0682 static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr)
0683 {
0684 hd->region_nr = region_nr;
0685 hd->overwrite_bio = NULL;
0686 bio_list_init(&hd->deferred_bios);
0687 hd->status = 0;
0688
0689 INIT_LIST_HEAD(&hd->list);
0690 INIT_HLIST_NODE(&hd->h);
0691 }
0692
0693
0694
0695
0696
0697
0698
0699 static int hydration_update_metadata(struct dm_clone_region_hydration *hd)
0700 {
0701 int r = 0;
0702 unsigned long flags;
0703 struct hash_table_bucket *bucket;
0704 struct clone *clone = hd->clone;
0705
0706 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
0707 r = -EPERM;
0708
0709
0710 if (likely(!r) && hd->status == BLK_STS_OK)
0711 r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr);
0712
0713 bucket = get_hash_table_bucket(clone, hd->region_nr);
0714
0715
0716 bucket_lock_irqsave(bucket, flags);
0717 hlist_del(&hd->h);
0718 bucket_unlock_irqrestore(bucket, flags);
0719
0720 return r;
0721 }
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733 static void hydration_complete(struct dm_clone_region_hydration *hd)
0734 {
0735 int r;
0736 blk_status_t status;
0737 struct clone *clone = hd->clone;
0738
0739 r = hydration_update_metadata(hd);
0740
0741 if (hd->status == BLK_STS_OK && likely(!r)) {
0742 if (hd->overwrite_bio)
0743 complete_overwrite_bio(clone, hd->overwrite_bio);
0744
0745 issue_deferred_bios(clone, &hd->deferred_bios);
0746 } else {
0747 status = r ? BLK_STS_IOERR : hd->status;
0748
0749 if (hd->overwrite_bio)
0750 bio_list_add(&hd->deferred_bios, hd->overwrite_bio);
0751
0752 fail_bios(&hd->deferred_bios, status);
0753 }
0754
0755 free_hydration(hd);
0756
0757 if (atomic_dec_and_test(&clone->hydrations_in_flight))
0758 wakeup_hydration_waiters(clone);
0759 }
0760
0761 static void hydration_kcopyd_callback(int read_err, unsigned long write_err, void *context)
0762 {
0763 blk_status_t status;
0764
0765 struct dm_clone_region_hydration *tmp, *hd = context;
0766 struct clone *clone = hd->clone;
0767
0768 LIST_HEAD(batched_hydrations);
0769
0770 if (read_err || write_err) {
0771 DMERR_LIMIT("%s: hydration failed", clone_device_name(clone));
0772 status = BLK_STS_IOERR;
0773 } else {
0774 status = BLK_STS_OK;
0775 }
0776 list_splice_tail(&hd->list, &batched_hydrations);
0777
0778 hd->status = status;
0779 hydration_complete(hd);
0780
0781
0782 list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) {
0783 hd->status = status;
0784 hydration_complete(hd);
0785 }
0786
0787
0788 if (test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
0789 !atomic_read(&clone->ios_in_flight))
0790 wake_worker(clone);
0791 }
0792
0793 static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions)
0794 {
0795 unsigned long region_start, region_end;
0796 sector_t tail_size, region_size, total_size;
0797 struct dm_io_region from, to;
0798 struct clone *clone = hd->clone;
0799
0800 if (WARN_ON(!nr_regions))
0801 return;
0802
0803 region_size = clone->region_size;
0804 region_start = hd->region_nr;
0805 region_end = region_start + nr_regions - 1;
0806
0807 total_size = region_to_sector(clone, nr_regions - 1);
0808
0809 if (region_end == clone->nr_regions - 1) {
0810
0811
0812
0813
0814 tail_size = clone->ti->len & (region_size - 1);
0815 if (!tail_size)
0816 tail_size = region_size;
0817 } else {
0818 tail_size = region_size;
0819 }
0820
0821 total_size += tail_size;
0822
0823 from.bdev = clone->source_dev->bdev;
0824 from.sector = region_to_sector(clone, region_start);
0825 from.count = total_size;
0826
0827 to.bdev = clone->dest_dev->bdev;
0828 to.sector = from.sector;
0829 to.count = from.count;
0830
0831
0832 atomic_add(nr_regions, &clone->hydrations_in_flight);
0833 dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0,
0834 hydration_kcopyd_callback, hd);
0835 }
0836
0837 static void overwrite_endio(struct bio *bio)
0838 {
0839 struct dm_clone_region_hydration *hd = bio->bi_private;
0840
0841 bio->bi_end_io = hd->overwrite_bio_end_io;
0842 hd->status = bio->bi_status;
0843
0844 hydration_complete(hd);
0845 }
0846
0847 static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio)
0848 {
0849
0850
0851
0852
0853
0854 hd->overwrite_bio = bio;
0855 hd->overwrite_bio_end_io = bio->bi_end_io;
0856
0857 bio->bi_end_io = overwrite_endio;
0858 bio->bi_private = hd;
0859
0860 atomic_inc(&hd->clone->hydrations_in_flight);
0861 submit_bio_noacct(bio);
0862 }
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873
0874 static void hydrate_bio_region(struct clone *clone, struct bio *bio)
0875 {
0876 unsigned long region_nr;
0877 struct hash_table_bucket *bucket;
0878 struct dm_clone_region_hydration *hd, *hd2;
0879
0880 region_nr = bio_to_region(clone, bio);
0881 bucket = get_hash_table_bucket(clone, region_nr);
0882
0883 bucket_lock_irq(bucket);
0884
0885 hd = __hash_find(bucket, region_nr);
0886 if (hd) {
0887
0888 bio_list_add(&hd->deferred_bios, bio);
0889 bucket_unlock_irq(bucket);
0890 return;
0891 }
0892
0893 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
0894
0895 bucket_unlock_irq(bucket);
0896 issue_bio(clone, bio);
0897 return;
0898 }
0899
0900
0901
0902
0903
0904 bucket_unlock_irq(bucket);
0905
0906 hd = alloc_hydration(clone);
0907 hydration_init(hd, region_nr);
0908
0909 bucket_lock_irq(bucket);
0910
0911
0912 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
0913 bucket_unlock_irq(bucket);
0914 free_hydration(hd);
0915 issue_bio(clone, bio);
0916 return;
0917 }
0918
0919 hd2 = __find_or_insert_region_hydration(bucket, hd);
0920 if (hd2 != hd) {
0921
0922 bio_list_add(&hd2->deferred_bios, bio);
0923 bucket_unlock_irq(bucket);
0924 free_hydration(hd);
0925 return;
0926 }
0927
0928
0929
0930
0931
0932
0933 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
0934 hlist_del(&hd->h);
0935 bucket_unlock_irq(bucket);
0936 free_hydration(hd);
0937 bio_io_error(bio);
0938 return;
0939 }
0940
0941
0942
0943
0944
0945
0946
0947
0948 if (is_overwrite_bio(clone, bio)) {
0949 bucket_unlock_irq(bucket);
0950 hydration_overwrite(hd, bio);
0951 } else {
0952 bio_list_add(&hd->deferred_bios, bio);
0953 bucket_unlock_irq(bucket);
0954 hydration_copy(hd, 1);
0955 }
0956 }
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 struct batch_info {
0974 struct dm_clone_region_hydration *head;
0975 unsigned int nr_batched_regions;
0976 };
0977
0978 static void __batch_hydration(struct batch_info *batch,
0979 struct dm_clone_region_hydration *hd)
0980 {
0981 struct clone *clone = hd->clone;
0982 unsigned int max_batch_size = READ_ONCE(clone->hydration_batch_size);
0983
0984 if (batch->head) {
0985
0986 if (batch->nr_batched_regions < max_batch_size &&
0987 (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) {
0988 list_add_tail(&hd->list, &batch->head->list);
0989 batch->nr_batched_regions++;
0990 hd = NULL;
0991 }
0992
0993
0994 if (batch->nr_batched_regions >= max_batch_size || hd) {
0995 hydration_copy(batch->head, batch->nr_batched_regions);
0996 batch->head = NULL;
0997 batch->nr_batched_regions = 0;
0998 }
0999 }
1000
1001 if (!hd)
1002 return;
1003
1004
1005 if (max_batch_size <= 1) {
1006 hydration_copy(hd, 1);
1007 return;
1008 }
1009
1010
1011 BUG_ON(!list_empty(&hd->list));
1012 batch->head = hd;
1013 batch->nr_batched_regions = 1;
1014 }
1015
1016 static unsigned long __start_next_hydration(struct clone *clone,
1017 unsigned long offset,
1018 struct batch_info *batch)
1019 {
1020 struct hash_table_bucket *bucket;
1021 struct dm_clone_region_hydration *hd;
1022 unsigned long nr_regions = clone->nr_regions;
1023
1024 hd = alloc_hydration(clone);
1025
1026
1027 do {
1028 offset = dm_clone_find_next_unhydrated_region(clone->cmd, offset);
1029 if (offset == nr_regions)
1030 break;
1031
1032 bucket = get_hash_table_bucket(clone, offset);
1033 bucket_lock_irq(bucket);
1034
1035 if (!dm_clone_is_region_hydrated(clone->cmd, offset) &&
1036 !__hash_find(bucket, offset)) {
1037 hydration_init(hd, offset);
1038 __insert_region_hydration(bucket, hd);
1039 bucket_unlock_irq(bucket);
1040
1041
1042 __batch_hydration(batch, hd);
1043
1044 return (offset + 1);
1045 }
1046
1047 bucket_unlock_irq(bucket);
1048
1049 } while (++offset < nr_regions);
1050
1051 if (hd)
1052 free_hydration(hd);
1053
1054 return offset;
1055 }
1056
1057
1058
1059
1060
1061 static void do_hydration(struct clone *clone)
1062 {
1063 unsigned int current_volume;
1064 unsigned long offset, nr_regions = clone->nr_regions;
1065
1066 struct batch_info batch = {
1067 .head = NULL,
1068 .nr_batched_regions = 0,
1069 };
1070
1071 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1072 return;
1073
1074 if (dm_clone_is_hydration_done(clone->cmd))
1075 return;
1076
1077
1078
1079
1080 atomic_inc(&clone->hydrations_in_flight);
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 smp_mb__after_atomic();
1091
1092 offset = clone->hydration_offset;
1093 while (likely(!test_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags)) &&
1094 !atomic_read(&clone->ios_in_flight) &&
1095 test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags) &&
1096 offset < nr_regions) {
1097 current_volume = atomic_read(&clone->hydrations_in_flight);
1098 current_volume += batch.nr_batched_regions;
1099
1100 if (current_volume > READ_ONCE(clone->hydration_threshold))
1101 break;
1102
1103 offset = __start_next_hydration(clone, offset, &batch);
1104 }
1105
1106 if (batch.head)
1107 hydration_copy(batch.head, batch.nr_batched_regions);
1108
1109 if (offset >= nr_regions)
1110 offset = 0;
1111
1112 clone->hydration_offset = offset;
1113
1114 if (atomic_dec_and_test(&clone->hydrations_in_flight))
1115 wakeup_hydration_waiters(clone);
1116 }
1117
1118
1119
1120 static bool need_commit_due_to_time(struct clone *clone)
1121 {
1122 return !time_in_range(jiffies, clone->last_commit_jiffies,
1123 clone->last_commit_jiffies + COMMIT_PERIOD);
1124 }
1125
1126
1127
1128
1129 static int commit_metadata(struct clone *clone, bool *dest_dev_flushed)
1130 {
1131 int r = 0;
1132
1133 if (dest_dev_flushed)
1134 *dest_dev_flushed = false;
1135
1136 mutex_lock(&clone->commit_lock);
1137
1138 if (!dm_clone_changed_this_transaction(clone->cmd))
1139 goto out;
1140
1141 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY)) {
1142 r = -EPERM;
1143 goto out;
1144 }
1145
1146 r = dm_clone_metadata_pre_commit(clone->cmd);
1147 if (unlikely(r)) {
1148 __metadata_operation_failed(clone, "dm_clone_metadata_pre_commit", r);
1149 goto out;
1150 }
1151
1152 r = blkdev_issue_flush(clone->dest_dev->bdev);
1153 if (unlikely(r)) {
1154 __metadata_operation_failed(clone, "flush destination device", r);
1155 goto out;
1156 }
1157
1158 if (dest_dev_flushed)
1159 *dest_dev_flushed = true;
1160
1161 r = dm_clone_metadata_commit(clone->cmd);
1162 if (unlikely(r)) {
1163 __metadata_operation_failed(clone, "dm_clone_metadata_commit", r);
1164 goto out;
1165 }
1166
1167 if (dm_clone_is_hydration_done(clone->cmd))
1168 dm_table_event(clone->ti->table);
1169 out:
1170 mutex_unlock(&clone->commit_lock);
1171
1172 return r;
1173 }
1174
1175 static void process_deferred_discards(struct clone *clone)
1176 {
1177 int r = -EPERM;
1178 struct bio *bio;
1179 struct blk_plug plug;
1180 unsigned long rs, nr_regions;
1181 struct bio_list discards = BIO_EMPTY_LIST;
1182
1183 spin_lock_irq(&clone->lock);
1184 bio_list_merge(&discards, &clone->deferred_discard_bios);
1185 bio_list_init(&clone->deferred_discard_bios);
1186 spin_unlock_irq(&clone->lock);
1187
1188 if (bio_list_empty(&discards))
1189 return;
1190
1191 if (unlikely(get_clone_mode(clone) >= CM_READ_ONLY))
1192 goto out;
1193
1194
1195 bio_list_for_each(bio, &discards) {
1196 bio_region_range(clone, bio, &rs, &nr_regions);
1197
1198
1199
1200
1201
1202 r = dm_clone_cond_set_range(clone->cmd, rs, nr_regions);
1203 if (unlikely(r))
1204 break;
1205 }
1206 out:
1207 blk_start_plug(&plug);
1208 while ((bio = bio_list_pop(&discards)))
1209 complete_discard_bio(clone, bio, r == 0);
1210 blk_finish_plug(&plug);
1211 }
1212
1213 static void process_deferred_bios(struct clone *clone)
1214 {
1215 struct bio_list bios = BIO_EMPTY_LIST;
1216
1217 spin_lock_irq(&clone->lock);
1218 bio_list_merge(&bios, &clone->deferred_bios);
1219 bio_list_init(&clone->deferred_bios);
1220 spin_unlock_irq(&clone->lock);
1221
1222 if (bio_list_empty(&bios))
1223 return;
1224
1225 submit_bios(&bios);
1226 }
1227
1228 static void process_deferred_flush_bios(struct clone *clone)
1229 {
1230 struct bio *bio;
1231 bool dest_dev_flushed;
1232 struct bio_list bios = BIO_EMPTY_LIST;
1233 struct bio_list bio_completions = BIO_EMPTY_LIST;
1234
1235
1236
1237
1238
1239 spin_lock_irq(&clone->lock);
1240 bio_list_merge(&bios, &clone->deferred_flush_bios);
1241 bio_list_init(&clone->deferred_flush_bios);
1242
1243 bio_list_merge(&bio_completions, &clone->deferred_flush_completions);
1244 bio_list_init(&clone->deferred_flush_completions);
1245 spin_unlock_irq(&clone->lock);
1246
1247 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
1248 !(dm_clone_changed_this_transaction(clone->cmd) && need_commit_due_to_time(clone)))
1249 return;
1250
1251 if (commit_metadata(clone, &dest_dev_flushed)) {
1252 bio_list_merge(&bios, &bio_completions);
1253
1254 while ((bio = bio_list_pop(&bios)))
1255 bio_io_error(bio);
1256
1257 return;
1258 }
1259
1260 clone->last_commit_jiffies = jiffies;
1261
1262 while ((bio = bio_list_pop(&bio_completions)))
1263 bio_endio(bio);
1264
1265 while ((bio = bio_list_pop(&bios))) {
1266 if ((bio->bi_opf & REQ_PREFLUSH) && dest_dev_flushed) {
1267
1268
1269
1270
1271 bio_endio(bio);
1272 } else {
1273 submit_bio_noacct(bio);
1274 }
1275 }
1276 }
1277
1278 static void do_worker(struct work_struct *work)
1279 {
1280 struct clone *clone = container_of(work, typeof(*clone), worker);
1281
1282 process_deferred_bios(clone);
1283 process_deferred_discards(clone);
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 process_deferred_flush_bios(clone);
1295
1296
1297 do_hydration(clone);
1298 }
1299
1300
1301
1302
1303
1304
1305 static void do_waker(struct work_struct *work)
1306 {
1307 struct clone *clone = container_of(to_delayed_work(work), struct clone, waker);
1308
1309 wake_worker(clone);
1310 queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD);
1311 }
1312
1313
1314
1315
1316
1317
1318 static int clone_map(struct dm_target *ti, struct bio *bio)
1319 {
1320 struct clone *clone = ti->private;
1321 unsigned long region_nr;
1322
1323 atomic_inc(&clone->ios_in_flight);
1324
1325 if (unlikely(get_clone_mode(clone) == CM_FAIL))
1326 return DM_MAPIO_KILL;
1327
1328
1329
1330
1331
1332
1333
1334
1335 if (bio->bi_opf & REQ_PREFLUSH) {
1336 remap_and_issue(clone, bio);
1337 return DM_MAPIO_SUBMITTED;
1338 }
1339
1340 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1341
1342
1343
1344
1345
1346
1347 if (bio_op(bio) == REQ_OP_DISCARD) {
1348 process_discard_bio(clone, bio);
1349 return DM_MAPIO_SUBMITTED;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362 region_nr = bio_to_region(clone, bio);
1363 if (dm_clone_is_region_hydrated(clone->cmd, region_nr)) {
1364 remap_and_issue(clone, bio);
1365 return DM_MAPIO_SUBMITTED;
1366 } else if (bio_data_dir(bio) == READ) {
1367 remap_to_source(clone, bio);
1368 return DM_MAPIO_REMAPPED;
1369 }
1370
1371 remap_to_dest(clone, bio);
1372 hydrate_bio_region(clone, bio);
1373
1374 return DM_MAPIO_SUBMITTED;
1375 }
1376
1377 static int clone_endio(struct dm_target *ti, struct bio *bio, blk_status_t *error)
1378 {
1379 struct clone *clone = ti->private;
1380
1381 atomic_dec(&clone->ios_in_flight);
1382
1383 return DM_ENDIO_DONE;
1384 }
1385
1386 static void emit_flags(struct clone *clone, char *result, unsigned int maxlen,
1387 ssize_t *sz_ptr)
1388 {
1389 ssize_t sz = *sz_ptr;
1390 unsigned int count;
1391
1392 count = !test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1393 count += !test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1394
1395 DMEMIT("%u ", count);
1396
1397 if (!test_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
1398 DMEMIT("no_hydration ");
1399
1400 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
1401 DMEMIT("no_discard_passdown ");
1402
1403 *sz_ptr = sz;
1404 }
1405
1406 static void emit_core_args(struct clone *clone, char *result,
1407 unsigned int maxlen, ssize_t *sz_ptr)
1408 {
1409 ssize_t sz = *sz_ptr;
1410 unsigned int count = 4;
1411
1412 DMEMIT("%u hydration_threshold %u hydration_batch_size %u ", count,
1413 READ_ONCE(clone->hydration_threshold),
1414 READ_ONCE(clone->hydration_batch_size));
1415
1416 *sz_ptr = sz;
1417 }
1418
1419
1420
1421
1422
1423
1424
1425
1426 static void clone_status(struct dm_target *ti, status_type_t type,
1427 unsigned int status_flags, char *result,
1428 unsigned int maxlen)
1429 {
1430 int r;
1431 unsigned int i;
1432 ssize_t sz = 0;
1433 dm_block_t nr_free_metadata_blocks = 0;
1434 dm_block_t nr_metadata_blocks = 0;
1435 char buf[BDEVNAME_SIZE];
1436 struct clone *clone = ti->private;
1437
1438 switch (type) {
1439 case STATUSTYPE_INFO:
1440 if (get_clone_mode(clone) == CM_FAIL) {
1441 DMEMIT("Fail");
1442 break;
1443 }
1444
1445
1446 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
1447 (void) commit_metadata(clone, NULL);
1448
1449 r = dm_clone_get_free_metadata_block_count(clone->cmd, &nr_free_metadata_blocks);
1450
1451 if (r) {
1452 DMERR("%s: dm_clone_get_free_metadata_block_count returned %d",
1453 clone_device_name(clone), r);
1454 goto error;
1455 }
1456
1457 r = dm_clone_get_metadata_dev_size(clone->cmd, &nr_metadata_blocks);
1458
1459 if (r) {
1460 DMERR("%s: dm_clone_get_metadata_dev_size returned %d",
1461 clone_device_name(clone), r);
1462 goto error;
1463 }
1464
1465 DMEMIT("%u %llu/%llu %llu %u/%lu %u ",
1466 DM_CLONE_METADATA_BLOCK_SIZE,
1467 (unsigned long long)(nr_metadata_blocks - nr_free_metadata_blocks),
1468 (unsigned long long)nr_metadata_blocks,
1469 (unsigned long long)clone->region_size,
1470 dm_clone_nr_of_hydrated_regions(clone->cmd),
1471 clone->nr_regions,
1472 atomic_read(&clone->hydrations_in_flight));
1473
1474 emit_flags(clone, result, maxlen, &sz);
1475 emit_core_args(clone, result, maxlen, &sz);
1476
1477 switch (get_clone_mode(clone)) {
1478 case CM_WRITE:
1479 DMEMIT("rw");
1480 break;
1481 case CM_READ_ONLY:
1482 DMEMIT("ro");
1483 break;
1484 case CM_FAIL:
1485 DMEMIT("Fail");
1486 }
1487
1488 break;
1489
1490 case STATUSTYPE_TABLE:
1491 format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
1492 DMEMIT("%s ", buf);
1493
1494 format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
1495 DMEMIT("%s ", buf);
1496
1497 format_dev_t(buf, clone->source_dev->bdev->bd_dev);
1498 DMEMIT("%s", buf);
1499
1500 for (i = 0; i < clone->nr_ctr_args; i++)
1501 DMEMIT(" %s", clone->ctr_args[i]);
1502 break;
1503
1504 case STATUSTYPE_IMA:
1505 *result = '\0';
1506 break;
1507 }
1508
1509 return;
1510
1511 error:
1512 DMEMIT("Error");
1513 }
1514
1515 static sector_t get_dev_size(struct dm_dev *dev)
1516 {
1517 return bdev_nr_sectors(dev->bdev);
1518 }
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 static int parse_feature_args(struct dm_arg_set *as, struct clone *clone)
1542 {
1543 int r;
1544 unsigned int argc;
1545 const char *arg_name;
1546 struct dm_target *ti = clone->ti;
1547
1548 const struct dm_arg args = {
1549 .min = 0,
1550 .max = 2,
1551 .error = "Invalid number of feature arguments"
1552 };
1553
1554
1555 if (!as->argc)
1556 return 0;
1557
1558 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1559 if (r)
1560 return r;
1561
1562 while (argc) {
1563 arg_name = dm_shift_arg(as);
1564 argc--;
1565
1566 if (!strcasecmp(arg_name, "no_hydration")) {
1567 __clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1568 } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
1569 __clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1570 } else {
1571 ti->error = "Invalid feature argument";
1572 return -EINVAL;
1573 }
1574 }
1575
1576 return 0;
1577 }
1578
1579 static int parse_core_args(struct dm_arg_set *as, struct clone *clone)
1580 {
1581 int r;
1582 unsigned int argc;
1583 unsigned int value;
1584 const char *arg_name;
1585 struct dm_target *ti = clone->ti;
1586
1587 const struct dm_arg args = {
1588 .min = 0,
1589 .max = 4,
1590 .error = "Invalid number of core arguments"
1591 };
1592
1593
1594 clone->hydration_batch_size = DEFAULT_HYDRATION_BATCH_SIZE;
1595 clone->hydration_threshold = DEFAULT_HYDRATION_THRESHOLD;
1596
1597
1598 if (!as->argc)
1599 return 0;
1600
1601 r = dm_read_arg_group(&args, as, &argc, &ti->error);
1602 if (r)
1603 return r;
1604
1605 if (argc & 1) {
1606 ti->error = "Number of core arguments must be even";
1607 return -EINVAL;
1608 }
1609
1610 while (argc) {
1611 arg_name = dm_shift_arg(as);
1612 argc -= 2;
1613
1614 if (!strcasecmp(arg_name, "hydration_threshold")) {
1615 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1616 ti->error = "Invalid value for argument `hydration_threshold'";
1617 return -EINVAL;
1618 }
1619 clone->hydration_threshold = value;
1620 } else if (!strcasecmp(arg_name, "hydration_batch_size")) {
1621 if (kstrtouint(dm_shift_arg(as), 10, &value)) {
1622 ti->error = "Invalid value for argument `hydration_batch_size'";
1623 return -EINVAL;
1624 }
1625 clone->hydration_batch_size = value;
1626 } else {
1627 ti->error = "Invalid core argument";
1628 return -EINVAL;
1629 }
1630 }
1631
1632 return 0;
1633 }
1634
1635 static int parse_region_size(struct clone *clone, struct dm_arg_set *as, char **error)
1636 {
1637 int r;
1638 unsigned int region_size;
1639 struct dm_arg arg;
1640
1641 arg.min = MIN_REGION_SIZE;
1642 arg.max = MAX_REGION_SIZE;
1643 arg.error = "Invalid region size";
1644
1645 r = dm_read_arg(&arg, as, ®ion_size, error);
1646 if (r)
1647 return r;
1648
1649
1650 if (!is_power_of_2(region_size)) {
1651 *error = "Region size is not a power of 2";
1652 return -EINVAL;
1653 }
1654
1655
1656 if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
1657 region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
1658 *error = "Region size is not a multiple of device logical block size";
1659 return -EINVAL;
1660 }
1661
1662 clone->region_size = region_size;
1663
1664 return 0;
1665 }
1666
1667 static int validate_nr_regions(unsigned long n, char **error)
1668 {
1669
1670
1671
1672
1673 if (n > (1UL << 31)) {
1674 *error = "Too many regions. Consider increasing the region size";
1675 return -EINVAL;
1676 }
1677
1678 return 0;
1679 }
1680
1681 static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1682 {
1683 int r;
1684 sector_t metadata_dev_size;
1685
1686 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1687 &clone->metadata_dev);
1688 if (r) {
1689 *error = "Error opening metadata device";
1690 return r;
1691 }
1692
1693 metadata_dev_size = get_dev_size(clone->metadata_dev);
1694 if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
1695 DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
1696 clone->metadata_dev->bdev, DM_CLONE_METADATA_MAX_SECTORS);
1697
1698 return 0;
1699 }
1700
1701 static int parse_dest_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1702 {
1703 int r;
1704 sector_t dest_dev_size;
1705
1706 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1707 &clone->dest_dev);
1708 if (r) {
1709 *error = "Error opening destination device";
1710 return r;
1711 }
1712
1713 dest_dev_size = get_dev_size(clone->dest_dev);
1714 if (dest_dev_size < clone->ti->len) {
1715 dm_put_device(clone->ti, clone->dest_dev);
1716 *error = "Device size larger than destination device";
1717 return -EINVAL;
1718 }
1719
1720 return 0;
1721 }
1722
1723 static int parse_source_dev(struct clone *clone, struct dm_arg_set *as, char **error)
1724 {
1725 int r;
1726 sector_t source_dev_size;
1727
1728 r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ,
1729 &clone->source_dev);
1730 if (r) {
1731 *error = "Error opening source device";
1732 return r;
1733 }
1734
1735 source_dev_size = get_dev_size(clone->source_dev);
1736 if (source_dev_size < clone->ti->len) {
1737 dm_put_device(clone->ti, clone->source_dev);
1738 *error = "Device size larger than source device";
1739 return -EINVAL;
1740 }
1741
1742 return 0;
1743 }
1744
1745 static int copy_ctr_args(struct clone *clone, int argc, const char **argv, char **error)
1746 {
1747 unsigned int i;
1748 const char **copy;
1749
1750 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
1751 if (!copy)
1752 goto error;
1753
1754 for (i = 0; i < argc; i++) {
1755 copy[i] = kstrdup(argv[i], GFP_KERNEL);
1756
1757 if (!copy[i]) {
1758 while (i--)
1759 kfree(copy[i]);
1760 kfree(copy);
1761 goto error;
1762 }
1763 }
1764
1765 clone->nr_ctr_args = argc;
1766 clone->ctr_args = copy;
1767 return 0;
1768
1769 error:
1770 *error = "Failed to allocate memory for table line";
1771 return -ENOMEM;
1772 }
1773
1774 static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1775 {
1776 int r;
1777 sector_t nr_regions;
1778 struct clone *clone;
1779 struct dm_arg_set as;
1780
1781 if (argc < 4) {
1782 ti->error = "Invalid number of arguments";
1783 return -EINVAL;
1784 }
1785
1786 as.argc = argc;
1787 as.argv = argv;
1788
1789 clone = kzalloc(sizeof(*clone), GFP_KERNEL);
1790 if (!clone) {
1791 ti->error = "Failed to allocate clone structure";
1792 return -ENOMEM;
1793 }
1794
1795 clone->ti = ti;
1796
1797
1798 __set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
1799 __set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1800 __set_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
1801
1802 r = parse_metadata_dev(clone, &as, &ti->error);
1803 if (r)
1804 goto out_with_clone;
1805
1806 r = parse_dest_dev(clone, &as, &ti->error);
1807 if (r)
1808 goto out_with_meta_dev;
1809
1810 r = parse_source_dev(clone, &as, &ti->error);
1811 if (r)
1812 goto out_with_dest_dev;
1813
1814 r = parse_region_size(clone, &as, &ti->error);
1815 if (r)
1816 goto out_with_source_dev;
1817
1818 clone->region_shift = __ffs(clone->region_size);
1819 nr_regions = dm_sector_div_up(ti->len, clone->region_size);
1820
1821
1822 if (nr_regions != (unsigned long)nr_regions) {
1823 ti->error = "Too many regions. Consider increasing the region size";
1824 r = -EOVERFLOW;
1825 goto out_with_source_dev;
1826 }
1827
1828 clone->nr_regions = nr_regions;
1829
1830 r = validate_nr_regions(clone->nr_regions, &ti->error);
1831 if (r)
1832 goto out_with_source_dev;
1833
1834 r = dm_set_target_max_io_len(ti, clone->region_size);
1835 if (r) {
1836 ti->error = "Failed to set max io len";
1837 goto out_with_source_dev;
1838 }
1839
1840 r = parse_feature_args(&as, clone);
1841 if (r)
1842 goto out_with_source_dev;
1843
1844 r = parse_core_args(&as, clone);
1845 if (r)
1846 goto out_with_source_dev;
1847
1848
1849 clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
1850 clone->region_size);
1851 if (IS_ERR(clone->cmd)) {
1852 ti->error = "Failed to load metadata";
1853 r = PTR_ERR(clone->cmd);
1854 goto out_with_source_dev;
1855 }
1856
1857 __set_clone_mode(clone, CM_WRITE);
1858
1859 if (get_clone_mode(clone) != CM_WRITE) {
1860 ti->error = "Unable to get write access to metadata, please check/repair metadata";
1861 r = -EPERM;
1862 goto out_with_metadata;
1863 }
1864
1865 clone->last_commit_jiffies = jiffies;
1866
1867
1868 r = hash_table_init(clone);
1869 if (r) {
1870 ti->error = "Failed to allocate hydration hash table";
1871 goto out_with_metadata;
1872 }
1873
1874 atomic_set(&clone->ios_in_flight, 0);
1875 init_waitqueue_head(&clone->hydration_stopped);
1876 spin_lock_init(&clone->lock);
1877 bio_list_init(&clone->deferred_bios);
1878 bio_list_init(&clone->deferred_discard_bios);
1879 bio_list_init(&clone->deferred_flush_bios);
1880 bio_list_init(&clone->deferred_flush_completions);
1881 clone->hydration_offset = 0;
1882 atomic_set(&clone->hydrations_in_flight, 0);
1883
1884 clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
1885 if (!clone->wq) {
1886 ti->error = "Failed to allocate workqueue";
1887 r = -ENOMEM;
1888 goto out_with_ht;
1889 }
1890
1891 INIT_WORK(&clone->worker, do_worker);
1892 INIT_DELAYED_WORK(&clone->waker, do_waker);
1893
1894 clone->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1895 if (IS_ERR(clone->kcopyd_client)) {
1896 r = PTR_ERR(clone->kcopyd_client);
1897 goto out_with_wq;
1898 }
1899
1900 r = mempool_init_slab_pool(&clone->hydration_pool, MIN_HYDRATIONS,
1901 _hydration_cache);
1902 if (r) {
1903 ti->error = "Failed to create dm_clone_region_hydration memory pool";
1904 goto out_with_kcopyd;
1905 }
1906
1907
1908 r = copy_ctr_args(clone, argc - 3, (const char **)argv + 3, &ti->error);
1909 if (r)
1910 goto out_with_mempool;
1911
1912 mutex_init(&clone->commit_lock);
1913
1914
1915 ti->num_flush_bios = 1;
1916 ti->flush_supported = true;
1917
1918
1919 ti->discards_supported = true;
1920 ti->num_discard_bios = 1;
1921
1922 ti->private = clone;
1923
1924 return 0;
1925
1926 out_with_mempool:
1927 mempool_exit(&clone->hydration_pool);
1928 out_with_kcopyd:
1929 dm_kcopyd_client_destroy(clone->kcopyd_client);
1930 out_with_wq:
1931 destroy_workqueue(clone->wq);
1932 out_with_ht:
1933 hash_table_exit(clone);
1934 out_with_metadata:
1935 dm_clone_metadata_close(clone->cmd);
1936 out_with_source_dev:
1937 dm_put_device(ti, clone->source_dev);
1938 out_with_dest_dev:
1939 dm_put_device(ti, clone->dest_dev);
1940 out_with_meta_dev:
1941 dm_put_device(ti, clone->metadata_dev);
1942 out_with_clone:
1943 kfree(clone);
1944
1945 return r;
1946 }
1947
1948 static void clone_dtr(struct dm_target *ti)
1949 {
1950 unsigned int i;
1951 struct clone *clone = ti->private;
1952
1953 mutex_destroy(&clone->commit_lock);
1954
1955 for (i = 0; i < clone->nr_ctr_args; i++)
1956 kfree(clone->ctr_args[i]);
1957 kfree(clone->ctr_args);
1958
1959 mempool_exit(&clone->hydration_pool);
1960 dm_kcopyd_client_destroy(clone->kcopyd_client);
1961 destroy_workqueue(clone->wq);
1962 hash_table_exit(clone);
1963 dm_clone_metadata_close(clone->cmd);
1964 dm_put_device(ti, clone->source_dev);
1965 dm_put_device(ti, clone->dest_dev);
1966 dm_put_device(ti, clone->metadata_dev);
1967
1968 kfree(clone);
1969 }
1970
1971
1972
1973 static void clone_postsuspend(struct dm_target *ti)
1974 {
1975 struct clone *clone = ti->private;
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992 cancel_delayed_work_sync(&clone->waker);
1993
1994 set_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
1995
1996
1997
1998
1999
2000
2001
2002
2003 smp_mb__after_atomic();
2004
2005 wait_event(clone->hydration_stopped, !atomic_read(&clone->hydrations_in_flight));
2006 flush_workqueue(clone->wq);
2007
2008 (void) commit_metadata(clone, NULL);
2009 }
2010
2011 static void clone_resume(struct dm_target *ti)
2012 {
2013 struct clone *clone = ti->private;
2014
2015 clear_bit(DM_CLONE_HYDRATION_SUSPENDED, &clone->flags);
2016 do_waker(&clone->waker.work);
2017 }
2018
2019
2020
2021
2022
2023 static void disable_passdown_if_not_supported(struct clone *clone)
2024 {
2025 struct block_device *dest_dev = clone->dest_dev->bdev;
2026 struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
2027 const char *reason = NULL;
2028
2029 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
2030 return;
2031
2032 if (!bdev_max_discard_sectors(dest_dev))
2033 reason = "discard unsupported";
2034 else if (dest_limits->max_discard_sectors < clone->region_size)
2035 reason = "max discard sectors smaller than a region";
2036
2037 if (reason) {
2038 DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
2039 dest_dev, reason);
2040 clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
2041 }
2042 }
2043
2044 static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
2045 {
2046 struct block_device *dest_bdev = clone->dest_dev->bdev;
2047 struct queue_limits *dest_limits = &bdev_get_queue(dest_bdev)->limits;
2048
2049 if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
2050
2051 limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
2052 limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
2053 return;
2054 }
2055
2056
2057
2058
2059
2060
2061 limits->max_discard_sectors = dest_limits->max_discard_sectors;
2062 limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
2063 limits->discard_granularity = dest_limits->discard_granularity;
2064 limits->discard_alignment = dest_limits->discard_alignment;
2065 limits->discard_misaligned = dest_limits->discard_misaligned;
2066 limits->max_discard_segments = dest_limits->max_discard_segments;
2067 }
2068
2069 static void clone_io_hints(struct dm_target *ti, struct queue_limits *limits)
2070 {
2071 struct clone *clone = ti->private;
2072 u64 io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2073
2074
2075
2076
2077
2078 if (io_opt_sectors < clone->region_size ||
2079 do_div(io_opt_sectors, clone->region_size)) {
2080 blk_limits_io_min(limits, clone->region_size << SECTOR_SHIFT);
2081 blk_limits_io_opt(limits, clone->region_size << SECTOR_SHIFT);
2082 }
2083
2084 disable_passdown_if_not_supported(clone);
2085 set_discard_limits(clone, limits);
2086 }
2087
2088 static int clone_iterate_devices(struct dm_target *ti,
2089 iterate_devices_callout_fn fn, void *data)
2090 {
2091 int ret;
2092 struct clone *clone = ti->private;
2093 struct dm_dev *dest_dev = clone->dest_dev;
2094 struct dm_dev *source_dev = clone->source_dev;
2095
2096 ret = fn(ti, source_dev, 0, ti->len, data);
2097 if (!ret)
2098 ret = fn(ti, dest_dev, 0, ti->len, data);
2099 return ret;
2100 }
2101
2102
2103
2104
2105 static void set_hydration_threshold(struct clone *clone, unsigned int nr_regions)
2106 {
2107 WRITE_ONCE(clone->hydration_threshold, nr_regions);
2108
2109
2110
2111
2112
2113
2114 wake_worker(clone);
2115 }
2116
2117 static void set_hydration_batch_size(struct clone *clone, unsigned int nr_regions)
2118 {
2119 WRITE_ONCE(clone->hydration_batch_size, nr_regions);
2120 }
2121
2122 static void enable_hydration(struct clone *clone)
2123 {
2124 if (!test_and_set_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags))
2125 wake_worker(clone);
2126 }
2127
2128 static void disable_hydration(struct clone *clone)
2129 {
2130 clear_bit(DM_CLONE_HYDRATION_ENABLED, &clone->flags);
2131 }
2132
2133 static int clone_message(struct dm_target *ti, unsigned int argc, char **argv,
2134 char *result, unsigned int maxlen)
2135 {
2136 struct clone *clone = ti->private;
2137 unsigned int value;
2138
2139 if (!argc)
2140 return -EINVAL;
2141
2142 if (!strcasecmp(argv[0], "enable_hydration")) {
2143 enable_hydration(clone);
2144 return 0;
2145 }
2146
2147 if (!strcasecmp(argv[0], "disable_hydration")) {
2148 disable_hydration(clone);
2149 return 0;
2150 }
2151
2152 if (argc != 2)
2153 return -EINVAL;
2154
2155 if (!strcasecmp(argv[0], "hydration_threshold")) {
2156 if (kstrtouint(argv[1], 10, &value))
2157 return -EINVAL;
2158
2159 set_hydration_threshold(clone, value);
2160
2161 return 0;
2162 }
2163
2164 if (!strcasecmp(argv[0], "hydration_batch_size")) {
2165 if (kstrtouint(argv[1], 10, &value))
2166 return -EINVAL;
2167
2168 set_hydration_batch_size(clone, value);
2169
2170 return 0;
2171 }
2172
2173 DMERR("%s: Unsupported message `%s'", clone_device_name(clone), argv[0]);
2174 return -EINVAL;
2175 }
2176
2177 static struct target_type clone_target = {
2178 .name = "clone",
2179 .version = {1, 0, 0},
2180 .module = THIS_MODULE,
2181 .ctr = clone_ctr,
2182 .dtr = clone_dtr,
2183 .map = clone_map,
2184 .end_io = clone_endio,
2185 .postsuspend = clone_postsuspend,
2186 .resume = clone_resume,
2187 .status = clone_status,
2188 .message = clone_message,
2189 .io_hints = clone_io_hints,
2190 .iterate_devices = clone_iterate_devices,
2191 };
2192
2193
2194
2195
2196 static int __init dm_clone_init(void)
2197 {
2198 int r;
2199
2200 _hydration_cache = KMEM_CACHE(dm_clone_region_hydration, 0);
2201 if (!_hydration_cache)
2202 return -ENOMEM;
2203
2204 r = dm_register_target(&clone_target);
2205 if (r < 0) {
2206 DMERR("Failed to register clone target");
2207 return r;
2208 }
2209
2210 return 0;
2211 }
2212
2213 static void __exit dm_clone_exit(void)
2214 {
2215 dm_unregister_target(&clone_target);
2216
2217 kmem_cache_destroy(_hydration_cache);
2218 _hydration_cache = NULL;
2219 }
2220
2221
2222 module_init(dm_clone_init);
2223 module_exit(dm_clone_exit);
2224
2225 MODULE_DESCRIPTION(DM_NAME " clone target");
2226 MODULE_AUTHOR("Nikos Tsironis <ntsironis@arrikto.com>");
2227 MODULE_LICENSE("GPL");