0001
0002
0003
0004
0005
0006
0007
0008 #include "dm-bio-record.h"
0009
0010 #include <linux/init.h>
0011 #include <linux/mempool.h>
0012 #include <linux/module.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/slab.h>
0015 #include <linux/workqueue.h>
0016 #include <linux/device-mapper.h>
0017 #include <linux/dm-io.h>
0018 #include <linux/dm-dirty-log.h>
0019 #include <linux/dm-kcopyd.h>
0020 #include <linux/dm-region-hash.h>
0021
0022 #define DM_MSG_PREFIX "raid1"
0023
0024 #define MAX_RECOVERY 1
0025
0026 #define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
0027
0028 #define DM_RAID1_HANDLE_ERRORS 0x01
0029 #define DM_RAID1_KEEP_LOG 0x02
0030 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
0031 #define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
0032
0033 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
0034
0035
0036
0037
0038 enum dm_raid1_error {
0039 DM_RAID1_WRITE_ERROR,
0040 DM_RAID1_FLUSH_ERROR,
0041 DM_RAID1_SYNC_ERROR,
0042 DM_RAID1_READ_ERROR
0043 };
0044
0045 struct mirror {
0046 struct mirror_set *ms;
0047 atomic_t error_count;
0048 unsigned long error_type;
0049 struct dm_dev *dev;
0050 sector_t offset;
0051 };
0052
0053 struct mirror_set {
0054 struct dm_target *ti;
0055 struct list_head list;
0056
0057 uint64_t features;
0058
0059 spinlock_t lock;
0060 struct bio_list reads;
0061 struct bio_list writes;
0062 struct bio_list failures;
0063 struct bio_list holds;
0064
0065 struct dm_region_hash *rh;
0066 struct dm_kcopyd_client *kcopyd_client;
0067 struct dm_io_client *io_client;
0068
0069
0070 region_t nr_regions;
0071 int in_sync;
0072 int log_failure;
0073 int leg_failure;
0074 atomic_t suspend;
0075
0076 atomic_t default_mirror;
0077
0078 struct workqueue_struct *kmirrord_wq;
0079 struct work_struct kmirrord_work;
0080 struct timer_list timer;
0081 unsigned long timer_pending;
0082
0083 struct work_struct trigger_event;
0084
0085 unsigned nr_mirrors;
0086 struct mirror mirror[];
0087 };
0088
0089 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
0090 "A percentage of time allocated for raid resynchronization");
0091
0092 static void wakeup_mirrord(void *context)
0093 {
0094 struct mirror_set *ms = context;
0095
0096 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
0097 }
0098
0099 static void delayed_wake_fn(struct timer_list *t)
0100 {
0101 struct mirror_set *ms = from_timer(ms, t, timer);
0102
0103 clear_bit(0, &ms->timer_pending);
0104 wakeup_mirrord(ms);
0105 }
0106
0107 static void delayed_wake(struct mirror_set *ms)
0108 {
0109 if (test_and_set_bit(0, &ms->timer_pending))
0110 return;
0111
0112 ms->timer.expires = jiffies + HZ / 5;
0113 add_timer(&ms->timer);
0114 }
0115
0116 static void wakeup_all_recovery_waiters(void *context)
0117 {
0118 wake_up_all(&_kmirrord_recovery_stopped);
0119 }
0120
0121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
0122 {
0123 unsigned long flags;
0124 int should_wake = 0;
0125 struct bio_list *bl;
0126
0127 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
0128 spin_lock_irqsave(&ms->lock, flags);
0129 should_wake = !(bl->head);
0130 bio_list_add(bl, bio);
0131 spin_unlock_irqrestore(&ms->lock, flags);
0132
0133 if (should_wake)
0134 wakeup_mirrord(ms);
0135 }
0136
0137 static void dispatch_bios(void *context, struct bio_list *bio_list)
0138 {
0139 struct mirror_set *ms = context;
0140 struct bio *bio;
0141
0142 while ((bio = bio_list_pop(bio_list)))
0143 queue_bio(ms, bio, WRITE);
0144 }
0145
0146 struct dm_raid1_bio_record {
0147 struct mirror *m;
0148
0149 struct dm_bio_details details;
0150 region_t write_region;
0151 };
0152
0153
0154
0155
0156 #define DEFAULT_MIRROR 0
0157
0158
0159
0160
0161
0162
0163 static struct mirror *bio_get_m(struct bio *bio)
0164 {
0165 return (struct mirror *) bio->bi_next;
0166 }
0167
0168 static void bio_set_m(struct bio *bio, struct mirror *m)
0169 {
0170 bio->bi_next = (struct bio *) m;
0171 }
0172
0173 static struct mirror *get_default_mirror(struct mirror_set *ms)
0174 {
0175 return &ms->mirror[atomic_read(&ms->default_mirror)];
0176 }
0177
0178 static void set_default_mirror(struct mirror *m)
0179 {
0180 struct mirror_set *ms = m->ms;
0181 struct mirror *m0 = &(ms->mirror[0]);
0182
0183 atomic_set(&ms->default_mirror, m - m0);
0184 }
0185
0186 static struct mirror *get_valid_mirror(struct mirror_set *ms)
0187 {
0188 struct mirror *m;
0189
0190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
0191 if (!atomic_read(&m->error_count))
0192 return m;
0193
0194 return NULL;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
0212 {
0213 struct mirror_set *ms = m->ms;
0214 struct mirror *new;
0215
0216 ms->leg_failure = 1;
0217
0218
0219
0220
0221
0222
0223 atomic_inc(&m->error_count);
0224
0225 if (test_and_set_bit(error_type, &m->error_type))
0226 return;
0227
0228 if (!errors_handled(ms))
0229 return;
0230
0231 if (m != get_default_mirror(ms))
0232 goto out;
0233
0234 if (!ms->in_sync && !keep_log(ms)) {
0235
0236
0237
0238
0239 DMERR("Primary mirror (%s) failed while out-of-sync: "
0240 "Reads may fail.", m->dev->name);
0241 goto out;
0242 }
0243
0244 new = get_valid_mirror(ms);
0245 if (new)
0246 set_default_mirror(new);
0247 else
0248 DMWARN("All sides of mirror have failed.");
0249
0250 out:
0251 schedule_work(&ms->trigger_event);
0252 }
0253
0254 static int mirror_flush(struct dm_target *ti)
0255 {
0256 struct mirror_set *ms = ti->private;
0257 unsigned long error_bits;
0258
0259 unsigned int i;
0260 struct dm_io_region io[MAX_NR_MIRRORS];
0261 struct mirror *m;
0262 struct dm_io_request io_req = {
0263 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
0264 .mem.type = DM_IO_KMEM,
0265 .mem.ptr.addr = NULL,
0266 .client = ms->io_client,
0267 };
0268
0269 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
0270 io[i].bdev = m->dev->bdev;
0271 io[i].sector = 0;
0272 io[i].count = 0;
0273 }
0274
0275 error_bits = -1;
0276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
0277 if (unlikely(error_bits != 0)) {
0278 for (i = 0; i < ms->nr_mirrors; i++)
0279 if (test_bit(i, &error_bits))
0280 fail_mirror(ms->mirror + i,
0281 DM_RAID1_FLUSH_ERROR);
0282 return -EIO;
0283 }
0284
0285 return 0;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295 static void recovery_complete(int read_err, unsigned long write_err,
0296 void *context)
0297 {
0298 struct dm_region *reg = context;
0299 struct mirror_set *ms = dm_rh_region_context(reg);
0300 int m, bit = 0;
0301
0302 if (read_err) {
0303
0304 DMERR_LIMIT("Unable to read primary mirror during recovery");
0305 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
0306 }
0307
0308 if (write_err) {
0309 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
0310 write_err);
0311
0312
0313
0314
0315 for (m = 0; m < ms->nr_mirrors; m++) {
0316 if (&ms->mirror[m] == get_default_mirror(ms))
0317 continue;
0318 if (test_bit(bit, &write_err))
0319 fail_mirror(ms->mirror + m,
0320 DM_RAID1_SYNC_ERROR);
0321 bit++;
0322 }
0323 }
0324
0325 dm_rh_recovery_end(reg, !(read_err || write_err));
0326 }
0327
0328 static void recover(struct mirror_set *ms, struct dm_region *reg)
0329 {
0330 unsigned i;
0331 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
0332 struct mirror *m;
0333 unsigned long flags = 0;
0334 region_t key = dm_rh_get_region_key(reg);
0335 sector_t region_size = dm_rh_get_region_size(ms->rh);
0336
0337
0338 m = get_default_mirror(ms);
0339 from.bdev = m->dev->bdev;
0340 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
0341 if (key == (ms->nr_regions - 1)) {
0342
0343
0344
0345
0346 from.count = ms->ti->len & (region_size - 1);
0347 if (!from.count)
0348 from.count = region_size;
0349 } else
0350 from.count = region_size;
0351
0352
0353 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
0354 if (&ms->mirror[i] == get_default_mirror(ms))
0355 continue;
0356
0357 m = ms->mirror + i;
0358 dest->bdev = m->dev->bdev;
0359 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
0360 dest->count = from.count;
0361 dest++;
0362 }
0363
0364
0365 if (!errors_handled(ms))
0366 flags |= BIT(DM_KCOPYD_IGNORE_ERROR);
0367
0368 dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
0369 flags, recovery_complete, reg);
0370 }
0371
0372 static void reset_ms_flags(struct mirror_set *ms)
0373 {
0374 unsigned int m;
0375
0376 ms->leg_failure = 0;
0377 for (m = 0; m < ms->nr_mirrors; m++) {
0378 atomic_set(&(ms->mirror[m].error_count), 0);
0379 ms->mirror[m].error_type = 0;
0380 }
0381 }
0382
0383 static void do_recovery(struct mirror_set *ms)
0384 {
0385 struct dm_region *reg;
0386 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
0387
0388
0389
0390
0391 dm_rh_recovery_prepare(ms->rh);
0392
0393
0394
0395
0396 while ((reg = dm_rh_recovery_start(ms->rh)))
0397 recover(ms, reg);
0398
0399
0400
0401
0402 if (!ms->in_sync &&
0403 (log->type->get_sync_count(log) == ms->nr_regions)) {
0404
0405 dm_table_event(ms->ti->table);
0406 ms->in_sync = 1;
0407 reset_ms_flags(ms);
0408 }
0409 }
0410
0411
0412
0413
0414 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
0415 {
0416 struct mirror *m = get_default_mirror(ms);
0417
0418 do {
0419 if (likely(!atomic_read(&m->error_count)))
0420 return m;
0421
0422 if (m-- == ms->mirror)
0423 m += ms->nr_mirrors;
0424 } while (m != get_default_mirror(ms));
0425
0426 return NULL;
0427 }
0428
0429 static int default_ok(struct mirror *m)
0430 {
0431 struct mirror *default_mirror = get_default_mirror(m->ms);
0432
0433 return !atomic_read(&default_mirror->error_count);
0434 }
0435
0436 static int mirror_available(struct mirror_set *ms, struct bio *bio)
0437 {
0438 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
0439 region_t region = dm_rh_bio_to_region(ms->rh, bio);
0440
0441 if (log->type->in_sync(log, region, 0))
0442 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
0443
0444 return 0;
0445 }
0446
0447
0448
0449
0450 static sector_t map_sector(struct mirror *m, struct bio *bio)
0451 {
0452 if (unlikely(!bio->bi_iter.bi_size))
0453 return 0;
0454 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
0455 }
0456
0457 static void map_bio(struct mirror *m, struct bio *bio)
0458 {
0459 bio_set_dev(bio, m->dev->bdev);
0460 bio->bi_iter.bi_sector = map_sector(m, bio);
0461 }
0462
0463 static void map_region(struct dm_io_region *io, struct mirror *m,
0464 struct bio *bio)
0465 {
0466 io->bdev = m->dev->bdev;
0467 io->sector = map_sector(m, bio);
0468 io->count = bio_sectors(bio);
0469 }
0470
0471 static void hold_bio(struct mirror_set *ms, struct bio *bio)
0472 {
0473
0474
0475
0476
0477 spin_lock_irq(&ms->lock);
0478
0479 if (atomic_read(&ms->suspend)) {
0480 spin_unlock_irq(&ms->lock);
0481
0482
0483
0484
0485 if (dm_noflush_suspending(ms->ti))
0486 bio->bi_status = BLK_STS_DM_REQUEUE;
0487 else
0488 bio->bi_status = BLK_STS_IOERR;
0489
0490 bio_endio(bio);
0491 return;
0492 }
0493
0494
0495
0496
0497 bio_list_add(&ms->holds, bio);
0498 spin_unlock_irq(&ms->lock);
0499 }
0500
0501
0502
0503
0504 static void read_callback(unsigned long error, void *context)
0505 {
0506 struct bio *bio = context;
0507 struct mirror *m;
0508
0509 m = bio_get_m(bio);
0510 bio_set_m(bio, NULL);
0511
0512 if (likely(!error)) {
0513 bio_endio(bio);
0514 return;
0515 }
0516
0517 fail_mirror(m, DM_RAID1_READ_ERROR);
0518
0519 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
0520 DMWARN_LIMIT("Read failure on mirror device %s. "
0521 "Trying alternative device.",
0522 m->dev->name);
0523 queue_bio(m->ms, bio, bio_data_dir(bio));
0524 return;
0525 }
0526
0527 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
0528 m->dev->name);
0529 bio_io_error(bio);
0530 }
0531
0532
0533 static void read_async_bio(struct mirror *m, struct bio *bio)
0534 {
0535 struct dm_io_region io;
0536 struct dm_io_request io_req = {
0537 .bi_opf = REQ_OP_READ,
0538 .mem.type = DM_IO_BIO,
0539 .mem.ptr.bio = bio,
0540 .notify.fn = read_callback,
0541 .notify.context = bio,
0542 .client = m->ms->io_client,
0543 };
0544
0545 map_region(&io, m, bio);
0546 bio_set_m(bio, m);
0547 BUG_ON(dm_io(&io_req, 1, &io, NULL));
0548 }
0549
0550 static inline int region_in_sync(struct mirror_set *ms, region_t region,
0551 int may_block)
0552 {
0553 int state = dm_rh_get_state(ms->rh, region, may_block);
0554 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
0555 }
0556
0557 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
0558 {
0559 region_t region;
0560 struct bio *bio;
0561 struct mirror *m;
0562
0563 while ((bio = bio_list_pop(reads))) {
0564 region = dm_rh_bio_to_region(ms->rh, bio);
0565 m = get_default_mirror(ms);
0566
0567
0568
0569
0570 if (likely(region_in_sync(ms, region, 1)))
0571 m = choose_mirror(ms, bio->bi_iter.bi_sector);
0572 else if (m && atomic_read(&m->error_count))
0573 m = NULL;
0574
0575 if (likely(m))
0576 read_async_bio(m, bio);
0577 else
0578 bio_io_error(bio);
0579 }
0580 }
0581
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 static void write_callback(unsigned long error, void *context)
0595 {
0596 unsigned i;
0597 struct bio *bio = (struct bio *) context;
0598 struct mirror_set *ms;
0599 int should_wake = 0;
0600 unsigned long flags;
0601
0602 ms = bio_get_m(bio)->ms;
0603 bio_set_m(bio, NULL);
0604
0605
0606
0607
0608
0609
0610
0611 if (likely(!error)) {
0612 bio_endio(bio);
0613 return;
0614 }
0615
0616
0617
0618
0619
0620 if (bio_op(bio) == REQ_OP_DISCARD) {
0621 bio->bi_status = BLK_STS_NOTSUPP;
0622 bio_endio(bio);
0623 return;
0624 }
0625
0626 for (i = 0; i < ms->nr_mirrors; i++)
0627 if (test_bit(i, &error))
0628 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
0629
0630
0631
0632
0633
0634
0635 spin_lock_irqsave(&ms->lock, flags);
0636 if (!ms->failures.head)
0637 should_wake = 1;
0638 bio_list_add(&ms->failures, bio);
0639 spin_unlock_irqrestore(&ms->lock, flags);
0640 if (should_wake)
0641 wakeup_mirrord(ms);
0642 }
0643
0644 static void do_write(struct mirror_set *ms, struct bio *bio)
0645 {
0646 unsigned int i;
0647 struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
0648 struct mirror *m;
0649 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH);
0650 struct dm_io_request io_req = {
0651 .bi_opf = REQ_OP_WRITE | op_flags,
0652 .mem.type = DM_IO_BIO,
0653 .mem.ptr.bio = bio,
0654 .notify.fn = write_callback,
0655 .notify.context = bio,
0656 .client = ms->io_client,
0657 };
0658
0659 if (bio_op(bio) == REQ_OP_DISCARD) {
0660 io_req.bi_opf = REQ_OP_DISCARD | op_flags;
0661 io_req.mem.type = DM_IO_KMEM;
0662 io_req.mem.ptr.addr = NULL;
0663 }
0664
0665 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
0666 map_region(dest++, m, bio);
0667
0668
0669
0670
0671
0672 bio_set_m(bio, get_default_mirror(ms));
0673
0674 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
0675 }
0676
0677 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
0678 {
0679 int state;
0680 struct bio *bio;
0681 struct bio_list sync, nosync, recover, *this_list = NULL;
0682 struct bio_list requeue;
0683 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
0684 region_t region;
0685
0686 if (!writes->head)
0687 return;
0688
0689
0690
0691
0692 bio_list_init(&sync);
0693 bio_list_init(&nosync);
0694 bio_list_init(&recover);
0695 bio_list_init(&requeue);
0696
0697 while ((bio = bio_list_pop(writes))) {
0698 if ((bio->bi_opf & REQ_PREFLUSH) ||
0699 (bio_op(bio) == REQ_OP_DISCARD)) {
0700 bio_list_add(&sync, bio);
0701 continue;
0702 }
0703
0704 region = dm_rh_bio_to_region(ms->rh, bio);
0705
0706 if (log->type->is_remote_recovering &&
0707 log->type->is_remote_recovering(log, region)) {
0708 bio_list_add(&requeue, bio);
0709 continue;
0710 }
0711
0712 state = dm_rh_get_state(ms->rh, region, 1);
0713 switch (state) {
0714 case DM_RH_CLEAN:
0715 case DM_RH_DIRTY:
0716 this_list = &sync;
0717 break;
0718
0719 case DM_RH_NOSYNC:
0720 this_list = &nosync;
0721 break;
0722
0723 case DM_RH_RECOVERING:
0724 this_list = &recover;
0725 break;
0726 }
0727
0728 bio_list_add(this_list, bio);
0729 }
0730
0731
0732
0733
0734
0735 if (unlikely(requeue.head)) {
0736 spin_lock_irq(&ms->lock);
0737 bio_list_merge(&ms->writes, &requeue);
0738 spin_unlock_irq(&ms->lock);
0739 delayed_wake(ms);
0740 }
0741
0742
0743
0744
0745
0746
0747 dm_rh_inc_pending(ms->rh, &sync);
0748 dm_rh_inc_pending(ms->rh, &nosync);
0749
0750
0751
0752
0753
0754
0755 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
0756
0757
0758
0759
0760 if (unlikely(ms->log_failure) && errors_handled(ms)) {
0761 spin_lock_irq(&ms->lock);
0762 bio_list_merge(&ms->failures, &sync);
0763 spin_unlock_irq(&ms->lock);
0764 wakeup_mirrord(ms);
0765 } else
0766 while ((bio = bio_list_pop(&sync)))
0767 do_write(ms, bio);
0768
0769 while ((bio = bio_list_pop(&recover)))
0770 dm_rh_delay(ms->rh, bio);
0771
0772 while ((bio = bio_list_pop(&nosync))) {
0773 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
0774 spin_lock_irq(&ms->lock);
0775 bio_list_add(&ms->failures, bio);
0776 spin_unlock_irq(&ms->lock);
0777 wakeup_mirrord(ms);
0778 } else {
0779 map_bio(get_default_mirror(ms), bio);
0780 submit_bio_noacct(bio);
0781 }
0782 }
0783 }
0784
0785 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
0786 {
0787 struct bio *bio;
0788
0789 if (likely(!failures->head))
0790 return;
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809 while ((bio = bio_list_pop(failures))) {
0810 if (!ms->log_failure) {
0811 ms->in_sync = 0;
0812 dm_rh_mark_nosync(ms->rh, bio);
0813 }
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827
0828 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
0829 bio_io_error(bio);
0830 else if (errors_handled(ms) && !keep_log(ms))
0831 hold_bio(ms, bio);
0832 else
0833 bio_endio(bio);
0834 }
0835 }
0836
0837 static void trigger_event(struct work_struct *work)
0838 {
0839 struct mirror_set *ms =
0840 container_of(work, struct mirror_set, trigger_event);
0841
0842 dm_table_event(ms->ti->table);
0843 }
0844
0845
0846
0847
0848 static void do_mirror(struct work_struct *work)
0849 {
0850 struct mirror_set *ms = container_of(work, struct mirror_set,
0851 kmirrord_work);
0852 struct bio_list reads, writes, failures;
0853 unsigned long flags;
0854
0855 spin_lock_irqsave(&ms->lock, flags);
0856 reads = ms->reads;
0857 writes = ms->writes;
0858 failures = ms->failures;
0859 bio_list_init(&ms->reads);
0860 bio_list_init(&ms->writes);
0861 bio_list_init(&ms->failures);
0862 spin_unlock_irqrestore(&ms->lock, flags);
0863
0864 dm_rh_update_states(ms->rh, errors_handled(ms));
0865 do_recovery(ms);
0866 do_reads(ms, &reads);
0867 do_writes(ms, &writes);
0868 do_failures(ms, &failures);
0869 }
0870
0871
0872
0873
0874 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
0875 uint32_t region_size,
0876 struct dm_target *ti,
0877 struct dm_dirty_log *dl)
0878 {
0879 struct mirror_set *ms =
0880 kzalloc(struct_size(ms, mirror, nr_mirrors), GFP_KERNEL);
0881
0882 if (!ms) {
0883 ti->error = "Cannot allocate mirror context";
0884 return NULL;
0885 }
0886
0887 spin_lock_init(&ms->lock);
0888 bio_list_init(&ms->reads);
0889 bio_list_init(&ms->writes);
0890 bio_list_init(&ms->failures);
0891 bio_list_init(&ms->holds);
0892
0893 ms->ti = ti;
0894 ms->nr_mirrors = nr_mirrors;
0895 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
0896 ms->in_sync = 0;
0897 ms->log_failure = 0;
0898 ms->leg_failure = 0;
0899 atomic_set(&ms->suspend, 0);
0900 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
0901
0902 ms->io_client = dm_io_client_create();
0903 if (IS_ERR(ms->io_client)) {
0904 ti->error = "Error creating dm_io client";
0905 kfree(ms);
0906 return NULL;
0907 }
0908
0909 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
0910 wakeup_all_recovery_waiters,
0911 ms->ti->begin, MAX_RECOVERY,
0912 dl, region_size, ms->nr_regions);
0913 if (IS_ERR(ms->rh)) {
0914 ti->error = "Error creating dirty region hash";
0915 dm_io_client_destroy(ms->io_client);
0916 kfree(ms);
0917 return NULL;
0918 }
0919
0920 return ms;
0921 }
0922
0923 static void free_context(struct mirror_set *ms, struct dm_target *ti,
0924 unsigned int m)
0925 {
0926 while (m--)
0927 dm_put_device(ti, ms->mirror[m].dev);
0928
0929 dm_io_client_destroy(ms->io_client);
0930 dm_region_hash_destroy(ms->rh);
0931 kfree(ms);
0932 }
0933
0934 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
0935 unsigned int mirror, char **argv)
0936 {
0937 unsigned long long offset;
0938 char dummy;
0939 int ret;
0940
0941 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1 ||
0942 offset != (sector_t)offset) {
0943 ti->error = "Invalid offset";
0944 return -EINVAL;
0945 }
0946
0947 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
0948 &ms->mirror[mirror].dev);
0949 if (ret) {
0950 ti->error = "Device lookup failure";
0951 return ret;
0952 }
0953
0954 ms->mirror[mirror].ms = ms;
0955 atomic_set(&(ms->mirror[mirror].error_count), 0);
0956 ms->mirror[mirror].error_type = 0;
0957 ms->mirror[mirror].offset = offset;
0958
0959 return 0;
0960 }
0961
0962
0963
0964
0965 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
0966 unsigned argc, char **argv,
0967 unsigned *args_used)
0968 {
0969 unsigned param_count;
0970 struct dm_dirty_log *dl;
0971 char dummy;
0972
0973 if (argc < 2) {
0974 ti->error = "Insufficient mirror log arguments";
0975 return NULL;
0976 }
0977
0978 if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {
0979 ti->error = "Invalid mirror log argument count";
0980 return NULL;
0981 }
0982
0983 *args_used = 2 + param_count;
0984
0985 if (argc < *args_used) {
0986 ti->error = "Insufficient mirror log arguments";
0987 return NULL;
0988 }
0989
0990 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
0991 argv + 2);
0992 if (!dl) {
0993 ti->error = "Error creating mirror dirty log";
0994 return NULL;
0995 }
0996
0997 return dl;
0998 }
0999
1000 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1001 unsigned *args_used)
1002 {
1003 unsigned num_features;
1004 struct dm_target *ti = ms->ti;
1005 char dummy;
1006 int i;
1007
1008 *args_used = 0;
1009
1010 if (!argc)
1011 return 0;
1012
1013 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
1014 ti->error = "Invalid number of features";
1015 return -EINVAL;
1016 }
1017
1018 argc--;
1019 argv++;
1020 (*args_used)++;
1021
1022 if (num_features > argc) {
1023 ti->error = "Not enough arguments to support feature count";
1024 return -EINVAL;
1025 }
1026
1027 for (i = 0; i < num_features; i++) {
1028 if (!strcmp("handle_errors", argv[0]))
1029 ms->features |= DM_RAID1_HANDLE_ERRORS;
1030 else if (!strcmp("keep_log", argv[0]))
1031 ms->features |= DM_RAID1_KEEP_LOG;
1032 else {
1033 ti->error = "Unrecognised feature requested";
1034 return -EINVAL;
1035 }
1036
1037 argc--;
1038 argv++;
1039 (*args_used)++;
1040 }
1041 if (!errors_handled(ms) && keep_log(ms)) {
1042 ti->error = "keep_log feature requires the handle_errors feature";
1043 return -EINVAL;
1044 }
1045
1046 return 0;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1062 {
1063 int r;
1064 unsigned int nr_mirrors, m, args_used;
1065 struct mirror_set *ms;
1066 struct dm_dirty_log *dl;
1067 char dummy;
1068
1069 dl = create_dirty_log(ti, argc, argv, &args_used);
1070 if (!dl)
1071 return -EINVAL;
1072
1073 argv += args_used;
1074 argc -= args_used;
1075
1076 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1077 nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
1078 ti->error = "Invalid number of mirrors";
1079 dm_dirty_log_destroy(dl);
1080 return -EINVAL;
1081 }
1082
1083 argv++, argc--;
1084
1085 if (argc < nr_mirrors * 2) {
1086 ti->error = "Too few mirror arguments";
1087 dm_dirty_log_destroy(dl);
1088 return -EINVAL;
1089 }
1090
1091 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1092 if (!ms) {
1093 dm_dirty_log_destroy(dl);
1094 return -ENOMEM;
1095 }
1096
1097
1098 for (m = 0; m < nr_mirrors; m++) {
1099 r = get_mirror(ms, ti, m, argv);
1100 if (r) {
1101 free_context(ms, ti, m);
1102 return r;
1103 }
1104 argv += 2;
1105 argc -= 2;
1106 }
1107
1108 ti->private = ms;
1109
1110 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1111 if (r)
1112 goto err_free_context;
1113
1114 ti->num_flush_bios = 1;
1115 ti->num_discard_bios = 1;
1116 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
1117
1118 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
1119 if (!ms->kmirrord_wq) {
1120 DMERR("couldn't start kmirrord");
1121 r = -ENOMEM;
1122 goto err_free_context;
1123 }
1124 INIT_WORK(&ms->kmirrord_work, do_mirror);
1125 timer_setup(&ms->timer, delayed_wake_fn, 0);
1126 ms->timer_pending = 0;
1127 INIT_WORK(&ms->trigger_event, trigger_event);
1128
1129 r = parse_features(ms, argc, argv, &args_used);
1130 if (r)
1131 goto err_destroy_wq;
1132
1133 argv += args_used;
1134 argc -= args_used;
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 if (argc) {
1146 ti->error = "Too many mirror arguments";
1147 r = -EINVAL;
1148 goto err_destroy_wq;
1149 }
1150
1151 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1152 if (IS_ERR(ms->kcopyd_client)) {
1153 r = PTR_ERR(ms->kcopyd_client);
1154 goto err_destroy_wq;
1155 }
1156
1157 wakeup_mirrord(ms);
1158 return 0;
1159
1160 err_destroy_wq:
1161 destroy_workqueue(ms->kmirrord_wq);
1162 err_free_context:
1163 free_context(ms, ti, ms->nr_mirrors);
1164 return r;
1165 }
1166
1167 static void mirror_dtr(struct dm_target *ti)
1168 {
1169 struct mirror_set *ms = (struct mirror_set *) ti->private;
1170
1171 del_timer_sync(&ms->timer);
1172 flush_workqueue(ms->kmirrord_wq);
1173 flush_work(&ms->trigger_event);
1174 dm_kcopyd_client_destroy(ms->kcopyd_client);
1175 destroy_workqueue(ms->kmirrord_wq);
1176 free_context(ms, ti, ms->nr_mirrors);
1177 }
1178
1179
1180
1181
1182 static int mirror_map(struct dm_target *ti, struct bio *bio)
1183 {
1184 int r, rw = bio_data_dir(bio);
1185 struct mirror *m;
1186 struct mirror_set *ms = ti->private;
1187 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1188 struct dm_raid1_bio_record *bio_record =
1189 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1190
1191 bio_record->details.bi_bdev = NULL;
1192
1193 if (rw == WRITE) {
1194
1195 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
1196 queue_bio(ms, bio, rw);
1197 return DM_MAPIO_SUBMITTED;
1198 }
1199
1200 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1201 if (r < 0 && r != -EWOULDBLOCK)
1202 return DM_MAPIO_KILL;
1203
1204
1205
1206
1207 if (!r || (r == -EWOULDBLOCK)) {
1208 if (bio->bi_opf & REQ_RAHEAD)
1209 return DM_MAPIO_KILL;
1210
1211 queue_bio(ms, bio, rw);
1212 return DM_MAPIO_SUBMITTED;
1213 }
1214
1215
1216
1217
1218
1219 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1220 if (unlikely(!m))
1221 return DM_MAPIO_KILL;
1222
1223 dm_bio_record(&bio_record->details, bio);
1224 bio_record->m = m;
1225
1226 map_bio(m, bio);
1227
1228 return DM_MAPIO_REMAPPED;
1229 }
1230
1231 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1232 blk_status_t *error)
1233 {
1234 int rw = bio_data_dir(bio);
1235 struct mirror_set *ms = (struct mirror_set *) ti->private;
1236 struct mirror *m = NULL;
1237 struct dm_bio_details *bd = NULL;
1238 struct dm_raid1_bio_record *bio_record =
1239 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1240
1241
1242
1243
1244 if (rw == WRITE) {
1245 if (!(bio->bi_opf & REQ_PREFLUSH) &&
1246 bio_op(bio) != REQ_OP_DISCARD)
1247 dm_rh_dec(ms->rh, bio_record->write_region);
1248 return DM_ENDIO_DONE;
1249 }
1250
1251 if (*error == BLK_STS_NOTSUPP)
1252 goto out;
1253
1254 if (bio->bi_opf & REQ_RAHEAD)
1255 goto out;
1256
1257 if (unlikely(*error)) {
1258 if (!bio_record->details.bi_bdev) {
1259
1260
1261
1262
1263
1264 DMERR_LIMIT("Mirror read failed.");
1265 return DM_ENDIO_DONE;
1266 }
1267
1268 m = bio_record->m;
1269
1270 DMERR("Mirror read failed from %s. Trying alternative device.",
1271 m->dev->name);
1272
1273 fail_mirror(m, DM_RAID1_READ_ERROR);
1274
1275
1276
1277
1278
1279 if (default_ok(m) || mirror_available(ms, bio)) {
1280 bd = &bio_record->details;
1281
1282 dm_bio_restore(bd, bio);
1283 bio_record->details.bi_bdev = NULL;
1284 bio->bi_status = 0;
1285
1286 queue_bio(ms, bio, rw);
1287 return DM_ENDIO_INCOMPLETE;
1288 }
1289 DMERR("All replicated volumes dead, failing I/O");
1290 }
1291
1292 out:
1293 bio_record->details.bi_bdev = NULL;
1294
1295 return DM_ENDIO_DONE;
1296 }
1297
1298 static void mirror_presuspend(struct dm_target *ti)
1299 {
1300 struct mirror_set *ms = (struct mirror_set *) ti->private;
1301 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1302
1303 struct bio_list holds;
1304 struct bio *bio;
1305
1306 atomic_set(&ms->suspend, 1);
1307
1308
1309
1310
1311
1312
1313
1314 spin_lock_irq(&ms->lock);
1315 holds = ms->holds;
1316 bio_list_init(&ms->holds);
1317 spin_unlock_irq(&ms->lock);
1318
1319 while ((bio = bio_list_pop(&holds)))
1320 hold_bio(ms, bio);
1321
1322
1323
1324
1325
1326 dm_rh_stop_recovery(ms->rh);
1327
1328 wait_event(_kmirrord_recovery_stopped,
1329 !dm_rh_recovery_in_flight(ms->rh));
1330
1331 if (log->type->presuspend && log->type->presuspend(log))
1332
1333 DMWARN("log presuspend failed");
1334
1335
1336
1337
1338
1339
1340
1341 flush_workqueue(ms->kmirrord_wq);
1342 }
1343
1344 static void mirror_postsuspend(struct dm_target *ti)
1345 {
1346 struct mirror_set *ms = ti->private;
1347 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1348
1349 if (log->type->postsuspend && log->type->postsuspend(log))
1350
1351 DMWARN("log postsuspend failed");
1352 }
1353
1354 static void mirror_resume(struct dm_target *ti)
1355 {
1356 struct mirror_set *ms = ti->private;
1357 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1358
1359 atomic_set(&ms->suspend, 0);
1360 if (log->type->resume && log->type->resume(log))
1361
1362 DMWARN("log resume failed");
1363 dm_rh_start_recovery(ms->rh);
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 static char device_status_char(struct mirror *m)
1380 {
1381 if (!atomic_read(&(m->error_count)))
1382 return 'A';
1383
1384 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1385 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1386 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1387 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1388 }
1389
1390
1391 static void mirror_status(struct dm_target *ti, status_type_t type,
1392 unsigned status_flags, char *result, unsigned maxlen)
1393 {
1394 unsigned int m, sz = 0;
1395 int num_feature_args = 0;
1396 struct mirror_set *ms = (struct mirror_set *) ti->private;
1397 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1398 char buffer[MAX_NR_MIRRORS + 1];
1399
1400 switch (type) {
1401 case STATUSTYPE_INFO:
1402 DMEMIT("%d ", ms->nr_mirrors);
1403 for (m = 0; m < ms->nr_mirrors; m++) {
1404 DMEMIT("%s ", ms->mirror[m].dev->name);
1405 buffer[m] = device_status_char(&(ms->mirror[m]));
1406 }
1407 buffer[m] = '\0';
1408
1409 DMEMIT("%llu/%llu 1 %s ",
1410 (unsigned long long)log->type->get_sync_count(log),
1411 (unsigned long long)ms->nr_regions, buffer);
1412
1413 sz += log->type->status(log, type, result+sz, maxlen-sz);
1414
1415 break;
1416
1417 case STATUSTYPE_TABLE:
1418 sz = log->type->status(log, type, result, maxlen);
1419
1420 DMEMIT("%d", ms->nr_mirrors);
1421 for (m = 0; m < ms->nr_mirrors; m++)
1422 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1423 (unsigned long long)ms->mirror[m].offset);
1424
1425 num_feature_args += !!errors_handled(ms);
1426 num_feature_args += !!keep_log(ms);
1427 if (num_feature_args) {
1428 DMEMIT(" %d", num_feature_args);
1429 if (errors_handled(ms))
1430 DMEMIT(" handle_errors");
1431 if (keep_log(ms))
1432 DMEMIT(" keep_log");
1433 }
1434
1435 break;
1436
1437 case STATUSTYPE_IMA:
1438 DMEMIT_TARGET_NAME_VERSION(ti->type);
1439 DMEMIT(",nr_mirrors=%d", ms->nr_mirrors);
1440 for (m = 0; m < ms->nr_mirrors; m++) {
1441 DMEMIT(",mirror_device_%d=%s", m, ms->mirror[m].dev->name);
1442 DMEMIT(",mirror_device_%d_status=%c",
1443 m, device_status_char(&(ms->mirror[m])));
1444 }
1445
1446 DMEMIT(",handle_errors=%c", errors_handled(ms) ? 'y' : 'n');
1447 DMEMIT(",keep_log=%c", keep_log(ms) ? 'y' : 'n');
1448
1449 DMEMIT(",log_type_status=");
1450 sz += log->type->status(log, type, result+sz, maxlen-sz);
1451 DMEMIT(";");
1452 break;
1453 }
1454 }
1455
1456 static int mirror_iterate_devices(struct dm_target *ti,
1457 iterate_devices_callout_fn fn, void *data)
1458 {
1459 struct mirror_set *ms = ti->private;
1460 int ret = 0;
1461 unsigned i;
1462
1463 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1464 ret = fn(ti, ms->mirror[i].dev,
1465 ms->mirror[i].offset, ti->len, data);
1466
1467 return ret;
1468 }
1469
1470 static struct target_type mirror_target = {
1471 .name = "mirror",
1472 .version = {1, 14, 0},
1473 .module = THIS_MODULE,
1474 .ctr = mirror_ctr,
1475 .dtr = mirror_dtr,
1476 .map = mirror_map,
1477 .end_io = mirror_end_io,
1478 .presuspend = mirror_presuspend,
1479 .postsuspend = mirror_postsuspend,
1480 .resume = mirror_resume,
1481 .status = mirror_status,
1482 .iterate_devices = mirror_iterate_devices,
1483 };
1484
1485 static int __init dm_mirror_init(void)
1486 {
1487 int r;
1488
1489 r = dm_register_target(&mirror_target);
1490 if (r < 0) {
1491 DMERR("Failed to register mirror target");
1492 goto bad_target;
1493 }
1494
1495 return 0;
1496
1497 bad_target:
1498 return r;
1499 }
1500
1501 static void __exit dm_mirror_exit(void)
1502 {
1503 dm_unregister_target(&mirror_target);
1504 }
1505
1506
1507 module_init(dm_mirror_init);
1508 module_exit(dm_mirror_exit);
1509
1510 MODULE_DESCRIPTION(DM_NAME " mirror target");
1511 MODULE_AUTHOR("Joe Thornber");
1512 MODULE_LICENSE("GPL");