0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/slab.h>
0009 #include <linux/module.h>
0010
0011 #include "md.h"
0012 #include "raid1.h"
0013 #include "raid5.h"
0014 #include "raid10.h"
0015 #include "md-bitmap.h"
0016
0017 #include <linux/device-mapper.h>
0018
0019 #define DM_MSG_PREFIX "raid"
0020 #define MAX_RAID_DEVICES 253
0021
0022
0023
0024
0025 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096)
0026
0027
0028
0029
0030 #define MIN_RAID456_JOURNAL_SPACE (4*2048)
0031
0032 static bool devices_handle_discard_safely = false;
0033
0034
0035
0036
0037
0038 #define FirstUse 10
0039
0040 struct raid_dev {
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053 struct dm_dev *meta_dev;
0054 struct dm_dev *data_dev;
0055 struct md_rdev rdev;
0056 };
0057
0058
0059
0060
0061
0062
0063
0064 #define __CTR_FLAG_SYNC 0
0065 #define __CTR_FLAG_NOSYNC 1
0066 #define __CTR_FLAG_REBUILD 2
0067 #define __CTR_FLAG_DAEMON_SLEEP 3
0068 #define __CTR_FLAG_MIN_RECOVERY_RATE 4
0069 #define __CTR_FLAG_MAX_RECOVERY_RATE 5
0070 #define __CTR_FLAG_MAX_WRITE_BEHIND 6
0071 #define __CTR_FLAG_WRITE_MOSTLY 7
0072 #define __CTR_FLAG_STRIPE_CACHE 8
0073 #define __CTR_FLAG_REGION_SIZE 9
0074 #define __CTR_FLAG_RAID10_COPIES 10
0075 #define __CTR_FLAG_RAID10_FORMAT 11
0076
0077 #define __CTR_FLAG_DELTA_DISKS 12
0078 #define __CTR_FLAG_DATA_OFFSET 13
0079 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14
0080
0081
0082 #define __CTR_FLAG_JOURNAL_DEV 15
0083
0084
0085 #define __CTR_FLAG_JOURNAL_MODE 16
0086
0087
0088
0089
0090 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC)
0091 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC)
0092 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD)
0093 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP)
0094 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE)
0095 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE)
0096 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND)
0097 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY)
0098 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE)
0099 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE)
0100 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES)
0101 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT)
0102 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS)
0103 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET)
0104 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS)
0105 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
0106 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
0107
0108
0109
0110
0111
0112
0113
0114 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
0115
0116
0117 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \
0118 CTR_FLAG_RAID10_USE_NEAR_SETS)
0119
0120
0121 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
0122 CTR_FLAG_WRITE_MOSTLY | \
0123 CTR_FLAG_DAEMON_SLEEP | \
0124 CTR_FLAG_MIN_RECOVERY_RATE | \
0125 CTR_FLAG_MAX_RECOVERY_RATE | \
0126 CTR_FLAG_MAX_WRITE_BEHIND | \
0127 CTR_FLAG_STRIPE_CACHE | \
0128 CTR_FLAG_REGION_SIZE | \
0129 CTR_FLAG_RAID10_COPIES | \
0130 CTR_FLAG_RAID10_FORMAT | \
0131 CTR_FLAG_DELTA_DISKS | \
0132 CTR_FLAG_DATA_OFFSET | \
0133 CTR_FLAG_JOURNAL_DEV | \
0134 CTR_FLAG_JOURNAL_MODE)
0135
0136
0137
0138
0139 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET)
0140
0141
0142 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
0143 CTR_FLAG_REBUILD | \
0144 CTR_FLAG_WRITE_MOSTLY | \
0145 CTR_FLAG_DAEMON_SLEEP | \
0146 CTR_FLAG_MIN_RECOVERY_RATE | \
0147 CTR_FLAG_MAX_RECOVERY_RATE | \
0148 CTR_FLAG_MAX_WRITE_BEHIND | \
0149 CTR_FLAG_REGION_SIZE | \
0150 CTR_FLAG_DELTA_DISKS | \
0151 CTR_FLAG_DATA_OFFSET)
0152
0153
0154 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
0155 CTR_FLAG_REBUILD | \
0156 CTR_FLAG_DAEMON_SLEEP | \
0157 CTR_FLAG_MIN_RECOVERY_RATE | \
0158 CTR_FLAG_MAX_RECOVERY_RATE | \
0159 CTR_FLAG_REGION_SIZE | \
0160 CTR_FLAG_RAID10_COPIES | \
0161 CTR_FLAG_RAID10_FORMAT | \
0162 CTR_FLAG_DELTA_DISKS | \
0163 CTR_FLAG_DATA_OFFSET | \
0164 CTR_FLAG_RAID10_USE_NEAR_SETS)
0165
0166
0167
0168
0169
0170
0171
0172
0173 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \
0174 CTR_FLAG_REBUILD | \
0175 CTR_FLAG_DAEMON_SLEEP | \
0176 CTR_FLAG_MIN_RECOVERY_RATE | \
0177 CTR_FLAG_MAX_RECOVERY_RATE | \
0178 CTR_FLAG_STRIPE_CACHE | \
0179 CTR_FLAG_REGION_SIZE | \
0180 CTR_FLAG_DELTA_DISKS | \
0181 CTR_FLAG_DATA_OFFSET | \
0182 CTR_FLAG_JOURNAL_DEV | \
0183 CTR_FLAG_JOURNAL_MODE)
0184
0185 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \
0186 CTR_FLAG_REBUILD | \
0187 CTR_FLAG_DAEMON_SLEEP | \
0188 CTR_FLAG_MIN_RECOVERY_RATE | \
0189 CTR_FLAG_MAX_RECOVERY_RATE | \
0190 CTR_FLAG_STRIPE_CACHE | \
0191 CTR_FLAG_REGION_SIZE | \
0192 CTR_FLAG_DELTA_DISKS | \
0193 CTR_FLAG_DATA_OFFSET | \
0194 CTR_FLAG_JOURNAL_DEV | \
0195 CTR_FLAG_JOURNAL_MODE)
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 #define RT_FLAG_RS_PRERESUMED 0
0207 #define RT_FLAG_RS_RESUMED 1
0208 #define RT_FLAG_RS_BITMAP_LOADED 2
0209 #define RT_FLAG_UPDATE_SBS 3
0210 #define RT_FLAG_RESHAPE_RS 4
0211 #define RT_FLAG_RS_SUSPENDED 5
0212 #define RT_FLAG_RS_IN_SYNC 6
0213 #define RT_FLAG_RS_RESYNCING 7
0214 #define RT_FLAG_RS_GROW 8
0215
0216
0217 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
0218
0219
0220
0221
0222 struct rs_layout {
0223 int new_level;
0224 int new_layout;
0225 int new_chunk_sectors;
0226 };
0227
0228 struct raid_set {
0229 struct dm_target *ti;
0230
0231 uint32_t stripe_cache_entries;
0232 unsigned long ctr_flags;
0233 unsigned long runtime_flags;
0234
0235 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS];
0236
0237 int raid_disks;
0238 int delta_disks;
0239 int data_offset;
0240 int raid10_copies;
0241 int requested_bitmap_chunk_sectors;
0242
0243 struct mddev md;
0244 struct raid_type *raid_type;
0245
0246 sector_t array_sectors;
0247 sector_t dev_sectors;
0248
0249
0250 struct journal_dev {
0251 struct dm_dev *dev;
0252 struct md_rdev rdev;
0253 int mode;
0254 } journal_dev;
0255
0256 struct raid_dev dev[];
0257 };
0258
0259 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l)
0260 {
0261 struct mddev *mddev = &rs->md;
0262
0263 l->new_level = mddev->new_level;
0264 l->new_layout = mddev->new_layout;
0265 l->new_chunk_sectors = mddev->new_chunk_sectors;
0266 }
0267
0268 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
0269 {
0270 struct mddev *mddev = &rs->md;
0271
0272 mddev->new_level = l->new_level;
0273 mddev->new_layout = l->new_layout;
0274 mddev->new_chunk_sectors = l->new_chunk_sectors;
0275 }
0276
0277
0278 #define ALGORITHM_RAID10_DEFAULT 0
0279 #define ALGORITHM_RAID10_NEAR 1
0280 #define ALGORITHM_RAID10_OFFSET 2
0281 #define ALGORITHM_RAID10_FAR 3
0282
0283
0284 static struct raid_type {
0285 const char *name;
0286 const char *descr;
0287 const unsigned int parity_devs;
0288 const unsigned int minimal_devs;
0289 const unsigned int level;
0290 const unsigned int algorithm;
0291 } raid_types[] = {
0292 {"raid0", "raid0 (striping)", 0, 2, 0, 0 },
0293 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 },
0294 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR},
0295 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
0296 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
0297 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
0298 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
0299 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
0300 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
0301 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
0302 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
0303 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
0304 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
0305 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
0306 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE},
0307 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6},
0308 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6},
0309 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6},
0310 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6},
0311 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6}
0312 };
0313
0314
0315 static bool __within_range(long v, long min, long max)
0316 {
0317 return v >= min && v <= max;
0318 }
0319
0320
0321 static struct arg_name_flag {
0322 const unsigned long flag;
0323 const char *name;
0324 } __arg_name_flags[] = {
0325 { CTR_FLAG_SYNC, "sync"},
0326 { CTR_FLAG_NOSYNC, "nosync"},
0327 { CTR_FLAG_REBUILD, "rebuild"},
0328 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
0329 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
0330 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
0331 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
0332 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"},
0333 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
0334 { CTR_FLAG_REGION_SIZE, "region_size"},
0335 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
0336 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
0337 { CTR_FLAG_DATA_OFFSET, "data_offset"},
0338 { CTR_FLAG_DELTA_DISKS, "delta_disks"},
0339 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"},
0340 { CTR_FLAG_JOURNAL_DEV, "journal_dev" },
0341 { CTR_FLAG_JOURNAL_MODE, "journal_mode" },
0342 };
0343
0344
0345 static const char *dm_raid_arg_name_by_flag(const uint32_t flag)
0346 {
0347 if (hweight32(flag) == 1) {
0348 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags);
0349
0350 while (anf-- > __arg_name_flags)
0351 if (flag & anf->flag)
0352 return anf->name;
0353
0354 } else
0355 DMERR("%s called with more than one flag!", __func__);
0356
0357 return NULL;
0358 }
0359
0360
0361 static struct {
0362 const int mode;
0363 const char *param;
0364 } _raid456_journal_mode[] = {
0365 { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
0366 { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
0367 };
0368
0369
0370 static int dm_raid_journal_mode_to_md(const char *mode)
0371 {
0372 int m = ARRAY_SIZE(_raid456_journal_mode);
0373
0374 while (m--)
0375 if (!strcasecmp(mode, _raid456_journal_mode[m].param))
0376 return _raid456_journal_mode[m].mode;
0377
0378 return -EINVAL;
0379 }
0380
0381
0382 static const char *md_journal_mode_to_dm_raid(const int mode)
0383 {
0384 int m = ARRAY_SIZE(_raid456_journal_mode);
0385
0386 while (m--)
0387 if (mode == _raid456_journal_mode[m].mode)
0388 return _raid456_journal_mode[m].param;
0389
0390 return "unknown";
0391 }
0392
0393
0394
0395
0396
0397
0398
0399 static bool rs_is_raid0(struct raid_set *rs)
0400 {
0401 return !rs->md.level;
0402 }
0403
0404
0405 static bool rs_is_raid1(struct raid_set *rs)
0406 {
0407 return rs->md.level == 1;
0408 }
0409
0410
0411 static bool rs_is_raid10(struct raid_set *rs)
0412 {
0413 return rs->md.level == 10;
0414 }
0415
0416
0417 static bool rs_is_raid6(struct raid_set *rs)
0418 {
0419 return rs->md.level == 6;
0420 }
0421
0422
0423 static bool rs_is_raid456(struct raid_set *rs)
0424 {
0425 return __within_range(rs->md.level, 4, 6);
0426 }
0427
0428
0429 static bool __is_raid10_far(int layout);
0430 static bool rs_is_reshapable(struct raid_set *rs)
0431 {
0432 return rs_is_raid456(rs) ||
0433 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout));
0434 }
0435
0436
0437 static bool rs_is_recovering(struct raid_set *rs)
0438 {
0439 return rs->md.recovery_cp < rs->md.dev_sectors;
0440 }
0441
0442
0443 static bool rs_is_reshaping(struct raid_set *rs)
0444 {
0445 return rs->md.reshape_position != MaxSector;
0446 }
0447
0448
0449
0450
0451
0452
0453 static bool rt_is_raid0(struct raid_type *rt)
0454 {
0455 return !rt->level;
0456 }
0457
0458
0459 static bool rt_is_raid1(struct raid_type *rt)
0460 {
0461 return rt->level == 1;
0462 }
0463
0464
0465 static bool rt_is_raid10(struct raid_type *rt)
0466 {
0467 return rt->level == 10;
0468 }
0469
0470
0471 static bool rt_is_raid45(struct raid_type *rt)
0472 {
0473 return __within_range(rt->level, 4, 5);
0474 }
0475
0476
0477 static bool rt_is_raid6(struct raid_type *rt)
0478 {
0479 return rt->level == 6;
0480 }
0481
0482
0483 static bool rt_is_raid456(struct raid_type *rt)
0484 {
0485 return __within_range(rt->level, 4, 6);
0486 }
0487
0488
0489
0490 static unsigned long __valid_flags(struct raid_set *rs)
0491 {
0492 if (rt_is_raid0(rs->raid_type))
0493 return RAID0_VALID_FLAGS;
0494 else if (rt_is_raid1(rs->raid_type))
0495 return RAID1_VALID_FLAGS;
0496 else if (rt_is_raid10(rs->raid_type))
0497 return RAID10_VALID_FLAGS;
0498 else if (rt_is_raid45(rs->raid_type))
0499 return RAID45_VALID_FLAGS;
0500 else if (rt_is_raid6(rs->raid_type))
0501 return RAID6_VALID_FLAGS;
0502
0503 return 0;
0504 }
0505
0506
0507
0508
0509
0510
0511 static int rs_check_for_valid_flags(struct raid_set *rs)
0512 {
0513 if (rs->ctr_flags & ~__valid_flags(rs)) {
0514 rs->ti->error = "Invalid flags combination";
0515 return -EINVAL;
0516 }
0517
0518 return 0;
0519 }
0520
0521
0522 #define RAID10_OFFSET (1 << 16)
0523 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17)
0524 #define RAID10_USE_FAR_SETS (1 << 18)
0525 #define RAID10_FAR_COPIES_SHIFT 8
0526
0527
0528 static unsigned int __raid10_near_copies(int layout)
0529 {
0530 return layout & 0xFF;
0531 }
0532
0533
0534 static unsigned int __raid10_far_copies(int layout)
0535 {
0536 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT);
0537 }
0538
0539
0540 static bool __is_raid10_offset(int layout)
0541 {
0542 return !!(layout & RAID10_OFFSET);
0543 }
0544
0545
0546 static bool __is_raid10_near(int layout)
0547 {
0548 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1;
0549 }
0550
0551
0552 static bool __is_raid10_far(int layout)
0553 {
0554 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1;
0555 }
0556
0557
0558 static const char *raid10_md_layout_to_format(int layout)
0559 {
0560
0561
0562
0563
0564
0565
0566 if (__is_raid10_offset(layout))
0567 return "offset";
0568
0569 if (__raid10_near_copies(layout) > 1)
0570 return "near";
0571
0572 if (__raid10_far_copies(layout) > 1)
0573 return "far";
0574
0575 return "unknown";
0576 }
0577
0578
0579 static int raid10_name_to_format(const char *name)
0580 {
0581 if (!strcasecmp(name, "near"))
0582 return ALGORITHM_RAID10_NEAR;
0583 else if (!strcasecmp(name, "offset"))
0584 return ALGORITHM_RAID10_OFFSET;
0585 else if (!strcasecmp(name, "far"))
0586 return ALGORITHM_RAID10_FAR;
0587
0588 return -EINVAL;
0589 }
0590
0591
0592 static unsigned int raid10_md_layout_to_copies(int layout)
0593 {
0594 return max(__raid10_near_copies(layout), __raid10_far_copies(layout));
0595 }
0596
0597
0598 static int raid10_format_to_md_layout(struct raid_set *rs,
0599 unsigned int algorithm,
0600 unsigned int copies)
0601 {
0602 unsigned int n = 1, f = 1, r = 0;
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612 if (algorithm == ALGORITHM_RAID10_DEFAULT ||
0613 algorithm == ALGORITHM_RAID10_NEAR)
0614 n = copies;
0615
0616 else if (algorithm == ALGORITHM_RAID10_OFFSET) {
0617 f = copies;
0618 r = RAID10_OFFSET;
0619 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
0620 r |= RAID10_USE_FAR_SETS;
0621
0622 } else if (algorithm == ALGORITHM_RAID10_FAR) {
0623 f = copies;
0624 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags))
0625 r |= RAID10_USE_FAR_SETS;
0626
0627 } else
0628 return -EINVAL;
0629
0630 return r | (f << RAID10_FAR_COPIES_SHIFT) | n;
0631 }
0632
0633
0634
0635 static bool __got_raid10(struct raid_type *rtp, const int layout)
0636 {
0637 if (rtp->level == 10) {
0638 switch (rtp->algorithm) {
0639 case ALGORITHM_RAID10_DEFAULT:
0640 case ALGORITHM_RAID10_NEAR:
0641 return __is_raid10_near(layout);
0642 case ALGORITHM_RAID10_OFFSET:
0643 return __is_raid10_offset(layout);
0644 case ALGORITHM_RAID10_FAR:
0645 return __is_raid10_far(layout);
0646 default:
0647 break;
0648 }
0649 }
0650
0651 return false;
0652 }
0653
0654
0655 static struct raid_type *get_raid_type(const char *name)
0656 {
0657 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
0658
0659 while (rtp-- > raid_types)
0660 if (!strcasecmp(rtp->name, name))
0661 return rtp;
0662
0663 return NULL;
0664 }
0665
0666
0667 static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
0668 {
0669 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types);
0670
0671 while (rtp-- > raid_types) {
0672
0673 if (rtp->level == level &&
0674 (__got_raid10(rtp, layout) || rtp->algorithm == layout))
0675 return rtp;
0676 }
0677
0678 return NULL;
0679 }
0680
0681
0682 static void rs_set_rdev_sectors(struct raid_set *rs)
0683 {
0684 struct mddev *mddev = &rs->md;
0685 struct md_rdev *rdev;
0686
0687
0688
0689
0690
0691 rdev_for_each(rdev, mddev)
0692 if (!test_bit(Journal, &rdev->flags))
0693 rdev->sectors = mddev->dev_sectors;
0694 }
0695
0696
0697
0698
0699 static void rs_set_capacity(struct raid_set *rs)
0700 {
0701 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
0702
0703 set_capacity_and_notify(gendisk, rs->md.array_sectors);
0704 }
0705
0706
0707
0708
0709
0710 static void rs_set_cur(struct raid_set *rs)
0711 {
0712 struct mddev *mddev = &rs->md;
0713
0714 mddev->new_level = mddev->level;
0715 mddev->new_layout = mddev->layout;
0716 mddev->new_chunk_sectors = mddev->chunk_sectors;
0717 }
0718
0719
0720
0721
0722
0723 static void rs_set_new(struct raid_set *rs)
0724 {
0725 struct mddev *mddev = &rs->md;
0726
0727 mddev->level = mddev->new_level;
0728 mddev->layout = mddev->new_layout;
0729 mddev->chunk_sectors = mddev->new_chunk_sectors;
0730 mddev->raid_disks = rs->raid_disks;
0731 mddev->delta_disks = 0;
0732 }
0733
0734 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type,
0735 unsigned int raid_devs)
0736 {
0737 unsigned int i;
0738 struct raid_set *rs;
0739
0740 if (raid_devs <= raid_type->parity_devs) {
0741 ti->error = "Insufficient number of devices";
0742 return ERR_PTR(-EINVAL);
0743 }
0744
0745 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL);
0746 if (!rs) {
0747 ti->error = "Cannot allocate raid context";
0748 return ERR_PTR(-ENOMEM);
0749 }
0750
0751 mddev_init(&rs->md);
0752
0753 rs->raid_disks = raid_devs;
0754 rs->delta_disks = 0;
0755
0756 rs->ti = ti;
0757 rs->raid_type = raid_type;
0758 rs->stripe_cache_entries = 256;
0759 rs->md.raid_disks = raid_devs;
0760 rs->md.level = raid_type->level;
0761 rs->md.new_level = rs->md.level;
0762 rs->md.layout = raid_type->algorithm;
0763 rs->md.new_layout = rs->md.layout;
0764 rs->md.delta_disks = 0;
0765 rs->md.recovery_cp = MaxSector;
0766
0767 for (i = 0; i < raid_devs; i++)
0768 md_rdev_init(&rs->dev[i].rdev);
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779 return rs;
0780 }
0781
0782
0783 static void raid_set_free(struct raid_set *rs)
0784 {
0785 int i;
0786
0787 if (rs->journal_dev.dev) {
0788 md_rdev_clear(&rs->journal_dev.rdev);
0789 dm_put_device(rs->ti, rs->journal_dev.dev);
0790 }
0791
0792 for (i = 0; i < rs->raid_disks; i++) {
0793 if (rs->dev[i].meta_dev)
0794 dm_put_device(rs->ti, rs->dev[i].meta_dev);
0795 md_rdev_clear(&rs->dev[i].rdev);
0796 if (rs->dev[i].data_dev)
0797 dm_put_device(rs->ti, rs->dev[i].data_dev);
0798 }
0799
0800 kfree(rs);
0801 }
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
0820 {
0821 int i;
0822 int rebuild = 0;
0823 int metadata_available = 0;
0824 int r = 0;
0825 const char *arg;
0826
0827
0828 arg = dm_shift_arg(as);
0829 if (!arg)
0830 return -EINVAL;
0831
0832 for (i = 0; i < rs->raid_disks; i++) {
0833 rs->dev[i].rdev.raid_disk = i;
0834
0835 rs->dev[i].meta_dev = NULL;
0836 rs->dev[i].data_dev = NULL;
0837
0838
0839
0840
0841
0842 rs->dev[i].rdev.data_offset = 0;
0843 rs->dev[i].rdev.new_data_offset = 0;
0844 rs->dev[i].rdev.mddev = &rs->md;
0845
0846 arg = dm_shift_arg(as);
0847 if (!arg)
0848 return -EINVAL;
0849
0850 if (strcmp(arg, "-")) {
0851 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
0852 &rs->dev[i].meta_dev);
0853 if (r) {
0854 rs->ti->error = "RAID metadata device lookup failure";
0855 return r;
0856 }
0857
0858 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
0859 if (!rs->dev[i].rdev.sb_page) {
0860 rs->ti->error = "Failed to allocate superblock page";
0861 return -ENOMEM;
0862 }
0863 }
0864
0865 arg = dm_shift_arg(as);
0866 if (!arg)
0867 return -EINVAL;
0868
0869 if (!strcmp(arg, "-")) {
0870 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
0871 (!rs->dev[i].rdev.recovery_offset)) {
0872 rs->ti->error = "Drive designated for rebuild not specified";
0873 return -EINVAL;
0874 }
0875
0876 if (rs->dev[i].meta_dev) {
0877 rs->ti->error = "No data device supplied with metadata device";
0878 return -EINVAL;
0879 }
0880
0881 continue;
0882 }
0883
0884 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
0885 &rs->dev[i].data_dev);
0886 if (r) {
0887 rs->ti->error = "RAID device lookup failure";
0888 return r;
0889 }
0890
0891 if (rs->dev[i].meta_dev) {
0892 metadata_available = 1;
0893 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
0894 }
0895 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
0896 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
0897 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
0898 rebuild++;
0899 }
0900
0901 if (rs->journal_dev.dev)
0902 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
0903
0904 if (metadata_available) {
0905 rs->md.external = 0;
0906 rs->md.persistent = 1;
0907 rs->md.major_version = 2;
0908 } else if (rebuild && !rs->md.recovery_cp) {
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920 rs->ti->error = "Unable to rebuild drive while array is not in-sync";
0921 return -EINVAL;
0922 }
0923
0924 return 0;
0925 }
0926
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
0938 {
0939 unsigned long min_region_size = rs->ti->len / (1 << 21);
0940
0941 if (rs_is_raid0(rs))
0942 return 0;
0943
0944 if (!region_size) {
0945
0946
0947
0948 if (min_region_size > (1 << 13)) {
0949
0950 region_size = roundup_pow_of_two(min_region_size);
0951 DMINFO("Choosing default region size of %lu sectors",
0952 region_size);
0953 } else {
0954 DMINFO("Choosing default region size of 4MiB");
0955 region_size = 1 << 13;
0956 }
0957 } else {
0958
0959
0960
0961 if (region_size > rs->ti->len) {
0962 rs->ti->error = "Supplied region size is too large";
0963 return -EINVAL;
0964 }
0965
0966 if (region_size < min_region_size) {
0967 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
0968 region_size, min_region_size);
0969 rs->ti->error = "Supplied region size is too small";
0970 return -EINVAL;
0971 }
0972
0973 if (!is_power_of_2(region_size)) {
0974 rs->ti->error = "Region size is not a power of 2";
0975 return -EINVAL;
0976 }
0977
0978 if (region_size < rs->md.chunk_sectors) {
0979 rs->ti->error = "Region size is smaller than the chunk size";
0980 return -EINVAL;
0981 }
0982 }
0983
0984
0985
0986
0987 rs->md.bitmap_info.chunksize = to_bytes(region_size);
0988
0989 return 0;
0990 }
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 static int validate_raid_redundancy(struct raid_set *rs)
1002 {
1003 unsigned int i, rebuild_cnt = 0;
1004 unsigned int rebuilds_per_group = 0, copies, raid_disks;
1005 unsigned int group_size, last_group_start;
1006
1007 for (i = 0; i < rs->raid_disks; i++)
1008 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1009 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1010 !rs->dev[i].rdev.sb_page)))
1011 rebuild_cnt++;
1012
1013 switch (rs->md.level) {
1014 case 0:
1015 break;
1016 case 1:
1017 if (rebuild_cnt >= rs->md.raid_disks)
1018 goto too_many;
1019 break;
1020 case 4:
1021 case 5:
1022 case 6:
1023 if (rebuild_cnt > rs->raid_type->parity_devs)
1024 goto too_many;
1025 break;
1026 case 10:
1027 copies = raid10_md_layout_to_copies(rs->md.new_layout);
1028 if (copies < 2) {
1029 DMERR("Bogus raid10 data copies < 2!");
1030 return -EINVAL;
1031 }
1032
1033 if (rebuild_cnt < copies)
1034 break;
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 raid_disks = min(rs->raid_disks, rs->md.raid_disks);
1051 if (__is_raid10_near(rs->md.new_layout)) {
1052 for (i = 0; i < raid_disks; i++) {
1053 if (!(i % copies))
1054 rebuilds_per_group = 0;
1055 if ((!rs->dev[i].rdev.sb_page ||
1056 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1057 (++rebuilds_per_group >= copies))
1058 goto too_many;
1059 }
1060 break;
1061 }
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 group_size = (raid_disks / copies);
1076 last_group_start = (raid_disks / group_size) - 1;
1077 last_group_start *= group_size;
1078 for (i = 0; i < raid_disks; i++) {
1079 if (!(i % copies) && !(i > last_group_start))
1080 rebuilds_per_group = 0;
1081 if ((!rs->dev[i].rdev.sb_page ||
1082 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1083 (++rebuilds_per_group >= copies))
1084 goto too_many;
1085 }
1086 break;
1087 default:
1088 if (rebuild_cnt)
1089 return -EINVAL;
1090 }
1091
1092 return 0;
1093
1094 too_many:
1095 return -EINVAL;
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
1124 unsigned int num_raid_params)
1125 {
1126 int value, raid10_format = ALGORITHM_RAID10_DEFAULT;
1127 unsigned int raid10_copies = 2;
1128 unsigned int i, write_mostly = 0;
1129 unsigned int region_size = 0;
1130 sector_t max_io_len;
1131 const char *arg, *key;
1132 struct raid_dev *rd;
1133 struct raid_type *rt = rs->raid_type;
1134
1135 arg = dm_shift_arg(as);
1136 num_raid_params--;
1137
1138 if (kstrtoint(arg, 10, &value) < 0) {
1139 rs->ti->error = "Bad numerical argument given for chunk_size";
1140 return -EINVAL;
1141 }
1142
1143
1144
1145
1146
1147 if (rt_is_raid1(rt)) {
1148 if (value)
1149 DMERR("Ignoring chunk size parameter for RAID 1");
1150 value = 0;
1151 } else if (!is_power_of_2(value)) {
1152 rs->ti->error = "Chunk size must be a power of 2";
1153 return -EINVAL;
1154 } else if (value < 8) {
1155 rs->ti->error = "Chunk size value is too small";
1156 return -EINVAL;
1157 }
1158
1159 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 for (i = 0; i < rs->raid_disks; i++) {
1179 set_bit(In_sync, &rs->dev[i].rdev.flags);
1180 rs->dev[i].rdev.recovery_offset = MaxSector;
1181 }
1182
1183
1184
1185
1186 for (i = 0; i < num_raid_params; i++) {
1187 key = dm_shift_arg(as);
1188 if (!key) {
1189 rs->ti->error = "Not enough raid parameters given";
1190 return -EINVAL;
1191 }
1192
1193 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) {
1194 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1195 rs->ti->error = "Only one 'nosync' argument allowed";
1196 return -EINVAL;
1197 }
1198 continue;
1199 }
1200 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) {
1201 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) {
1202 rs->ti->error = "Only one 'sync' argument allowed";
1203 return -EINVAL;
1204 }
1205 continue;
1206 }
1207 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) {
1208 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1209 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed";
1210 return -EINVAL;
1211 }
1212 continue;
1213 }
1214
1215 arg = dm_shift_arg(as);
1216 i++;
1217 if (!arg) {
1218 rs->ti->error = "Wrong number of raid parameters given";
1219 return -EINVAL;
1220 }
1221
1222
1223
1224
1225
1226 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) {
1227 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) {
1228 rs->ti->error = "Only one 'raid10_format' argument pair allowed";
1229 return -EINVAL;
1230 }
1231 if (!rt_is_raid10(rt)) {
1232 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
1233 return -EINVAL;
1234 }
1235 raid10_format = raid10_name_to_format(arg);
1236 if (raid10_format < 0) {
1237 rs->ti->error = "Invalid 'raid10_format' value given";
1238 return raid10_format;
1239 }
1240 continue;
1241 }
1242
1243
1244 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) {
1245 int r;
1246 struct md_rdev *jdev;
1247
1248 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1249 rs->ti->error = "Only one raid4/5/6 set journaling device allowed";
1250 return -EINVAL;
1251 }
1252 if (!rt_is_raid456(rt)) {
1253 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type";
1254 return -EINVAL;
1255 }
1256 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table),
1257 &rs->journal_dev.dev);
1258 if (r) {
1259 rs->ti->error = "raid4/5/6 journal device lookup failure";
1260 return r;
1261 }
1262 jdev = &rs->journal_dev.rdev;
1263 md_rdev_init(jdev);
1264 jdev->mddev = &rs->md;
1265 jdev->bdev = rs->journal_dev.dev->bdev;
1266 jdev->sectors = bdev_nr_sectors(jdev->bdev);
1267 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
1268 rs->ti->error = "No space for raid4/5/6 journal";
1269 return -ENOSPC;
1270 }
1271 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
1272 set_bit(Journal, &jdev->flags);
1273 continue;
1274 }
1275
1276
1277 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) {
1278 int r;
1279
1280 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
1281 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'";
1282 return -EINVAL;
1283 }
1284 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
1285 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed";
1286 return -EINVAL;
1287 }
1288 r = dm_raid_journal_mode_to_md(arg);
1289 if (r < 0) {
1290 rs->ti->error = "Invalid 'journal_mode' argument";
1291 return r;
1292 }
1293 rs->journal_dev.mode = r;
1294 continue;
1295 }
1296
1297
1298
1299
1300 if (kstrtoint(arg, 10, &value) < 0) {
1301 rs->ti->error = "Bad numerical argument given in raid params";
1302 return -EINVAL;
1303 }
1304
1305 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) {
1306
1307
1308
1309
1310
1311 if (!__within_range(value, 0, rs->raid_disks - 1)) {
1312 rs->ti->error = "Invalid rebuild index given";
1313 return -EINVAL;
1314 }
1315
1316 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) {
1317 rs->ti->error = "rebuild for this index already given";
1318 return -EINVAL;
1319 }
1320
1321 rd = rs->dev + value;
1322 clear_bit(In_sync, &rd->rdev.flags);
1323 clear_bit(Faulty, &rd->rdev.flags);
1324 rd->rdev.recovery_offset = 0;
1325 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags);
1326 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
1327 if (!rt_is_raid1(rt)) {
1328 rs->ti->error = "write_mostly option is only valid for RAID1";
1329 return -EINVAL;
1330 }
1331
1332 if (!__within_range(value, 0, rs->md.raid_disks - 1)) {
1333 rs->ti->error = "Invalid write_mostly index given";
1334 return -EINVAL;
1335 }
1336
1337 write_mostly++;
1338 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1339 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
1340 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
1341 if (!rt_is_raid1(rt)) {
1342 rs->ti->error = "max_write_behind option is only valid for RAID1";
1343 return -EINVAL;
1344 }
1345
1346 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) {
1347 rs->ti->error = "Only one max_write_behind argument pair allowed";
1348 return -EINVAL;
1349 }
1350
1351
1352
1353
1354
1355 if (value < 0 || value / 2 > COUNTER_MAX) {
1356 rs->ti->error = "Max write-behind limit out of range";
1357 return -EINVAL;
1358 }
1359
1360 rs->md.bitmap_info.max_write_behind = value / 2;
1361 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
1362 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) {
1363 rs->ti->error = "Only one daemon_sleep argument pair allowed";
1364 return -EINVAL;
1365 }
1366 if (value < 0) {
1367 rs->ti->error = "daemon sleep period out of range";
1368 return -EINVAL;
1369 }
1370 rs->md.bitmap_info.daemon_sleep = value;
1371 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) {
1372
1373 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
1374 rs->ti->error = "Only one data_offset argument pair allowed";
1375 return -EINVAL;
1376 }
1377
1378 if (value < 0 ||
1379 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) {
1380 rs->ti->error = "Bogus data_offset value";
1381 return -EINVAL;
1382 }
1383 rs->data_offset = value;
1384 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) {
1385
1386 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
1387 rs->ti->error = "Only one delta_disks argument pair allowed";
1388 return -EINVAL;
1389 }
1390
1391 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) {
1392 rs->ti->error = "Too many delta_disk requested";
1393 return -EINVAL;
1394 }
1395
1396 rs->delta_disks = value;
1397 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) {
1398 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) {
1399 rs->ti->error = "Only one stripe_cache argument pair allowed";
1400 return -EINVAL;
1401 }
1402
1403 if (!rt_is_raid456(rt)) {
1404 rs->ti->error = "Inappropriate argument: stripe_cache";
1405 return -EINVAL;
1406 }
1407
1408 if (value < 0) {
1409 rs->ti->error = "Bogus stripe cache entries value";
1410 return -EINVAL;
1411 }
1412 rs->stripe_cache_entries = value;
1413 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
1414 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) {
1415 rs->ti->error = "Only one min_recovery_rate argument pair allowed";
1416 return -EINVAL;
1417 }
1418
1419 if (value < 0) {
1420 rs->ti->error = "min_recovery_rate out of range";
1421 return -EINVAL;
1422 }
1423 rs->md.sync_speed_min = value;
1424 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
1425 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) {
1426 rs->ti->error = "Only one max_recovery_rate argument pair allowed";
1427 return -EINVAL;
1428 }
1429
1430 if (value < 0) {
1431 rs->ti->error = "max_recovery_rate out of range";
1432 return -EINVAL;
1433 }
1434 rs->md.sync_speed_max = value;
1435 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) {
1436 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) {
1437 rs->ti->error = "Only one region_size argument pair allowed";
1438 return -EINVAL;
1439 }
1440
1441 region_size = value;
1442 rs->requested_bitmap_chunk_sectors = value;
1443 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) {
1444 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) {
1445 rs->ti->error = "Only one raid10_copies argument pair allowed";
1446 return -EINVAL;
1447 }
1448
1449 if (!__within_range(value, 2, rs->md.raid_disks)) {
1450 rs->ti->error = "Bad value for 'raid10_copies'";
1451 return -EINVAL;
1452 }
1453
1454 raid10_copies = value;
1455 } else {
1456 DMERR("Unable to parse RAID parameter: %s", key);
1457 rs->ti->error = "Unable to parse RAID parameter";
1458 return -EINVAL;
1459 }
1460 }
1461
1462 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) &&
1463 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
1464 rs->ti->error = "sync and nosync are mutually exclusive";
1465 return -EINVAL;
1466 }
1467
1468 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) &&
1469 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) ||
1470 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) {
1471 rs->ti->error = "sync/nosync and rebuild are mutually exclusive";
1472 return -EINVAL;
1473 }
1474
1475 if (write_mostly >= rs->md.raid_disks) {
1476 rs->ti->error = "Can't set all raid1 devices to write_mostly";
1477 return -EINVAL;
1478 }
1479
1480 if (rs->md.sync_speed_max &&
1481 rs->md.sync_speed_min > rs->md.sync_speed_max) {
1482 rs->ti->error = "Bogus recovery rates";
1483 return -EINVAL;
1484 }
1485
1486 if (validate_region_size(rs, region_size))
1487 return -EINVAL;
1488
1489 if (rs->md.chunk_sectors)
1490 max_io_len = rs->md.chunk_sectors;
1491 else
1492 max_io_len = region_size;
1493
1494 if (dm_set_target_max_io_len(rs->ti, max_io_len))
1495 return -EINVAL;
1496
1497 if (rt_is_raid10(rt)) {
1498 if (raid10_copies > rs->md.raid_disks) {
1499 rs->ti->error = "Not enough devices to satisfy specification";
1500 return -EINVAL;
1501 }
1502
1503 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies);
1504 if (rs->md.new_layout < 0) {
1505 rs->ti->error = "Error getting raid10 format";
1506 return rs->md.new_layout;
1507 }
1508
1509 rt = get_raid_type_by_ll(10, rs->md.new_layout);
1510 if (!rt) {
1511 rs->ti->error = "Failed to recognize new raid10 layout";
1512 return -EINVAL;
1513 }
1514
1515 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT ||
1516 rt->algorithm == ALGORITHM_RAID10_NEAR) &&
1517 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) {
1518 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible";
1519 return -EINVAL;
1520 }
1521 }
1522
1523 rs->raid10_copies = raid10_copies;
1524
1525
1526 rs->md.persistent = 0;
1527 rs->md.external = 1;
1528
1529
1530 return rs_check_for_valid_flags(rs);
1531 }
1532
1533
1534 static int rs_set_raid456_stripe_cache(struct raid_set *rs)
1535 {
1536 int r;
1537 struct r5conf *conf;
1538 struct mddev *mddev = &rs->md;
1539 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1540 uint32_t nr_stripes = rs->stripe_cache_entries;
1541
1542 if (!rt_is_raid456(rs->raid_type)) {
1543 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size";
1544 return -EINVAL;
1545 }
1546
1547 if (nr_stripes < min_stripes) {
1548 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size",
1549 nr_stripes, min_stripes);
1550 nr_stripes = min_stripes;
1551 }
1552
1553 conf = mddev->private;
1554 if (!conf) {
1555 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set";
1556 return -EINVAL;
1557 }
1558
1559
1560 if (conf->min_nr_stripes != nr_stripes) {
1561 r = raid5_set_cache_size(mddev, nr_stripes);
1562 if (r) {
1563 rs->ti->error = "Failed to set raid4/5/6 stripe cache size";
1564 return r;
1565 }
1566
1567 DMINFO("%u stripe cache entries", nr_stripes);
1568 }
1569
1570 return 0;
1571 }
1572
1573
1574 static unsigned int mddev_data_stripes(struct raid_set *rs)
1575 {
1576 return rs->md.raid_disks - rs->raid_type->parity_devs;
1577 }
1578
1579
1580 static unsigned int rs_data_stripes(struct raid_set *rs)
1581 {
1582 return rs->raid_disks - rs->raid_type->parity_devs;
1583 }
1584
1585
1586
1587
1588
1589 static sector_t __rdev_sectors(struct raid_set *rs)
1590 {
1591 int i;
1592
1593 for (i = 0; i < rs->raid_disks; i++) {
1594 struct md_rdev *rdev = &rs->dev[i].rdev;
1595
1596 if (!test_bit(Journal, &rdev->flags) &&
1597 rdev->bdev && rdev->sectors)
1598 return rdev->sectors;
1599 }
1600
1601 return 0;
1602 }
1603
1604
1605 static int _check_data_dev_sectors(struct raid_set *rs)
1606 {
1607 sector_t ds = ~0;
1608 struct md_rdev *rdev;
1609
1610 rdev_for_each(rdev, &rs->md)
1611 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1612 ds = min(ds, bdev_nr_sectors(rdev->bdev));
1613 if (ds < rs->md.dev_sectors) {
1614 rs->ti->error = "Component device(s) too small";
1615 return -EINVAL;
1616 }
1617 }
1618
1619 return 0;
1620 }
1621
1622
1623 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev)
1624 {
1625 int delta_disks;
1626 unsigned int data_stripes;
1627 sector_t array_sectors = sectors, dev_sectors = sectors;
1628 struct mddev *mddev = &rs->md;
1629
1630 if (use_mddev) {
1631 delta_disks = mddev->delta_disks;
1632 data_stripes = mddev_data_stripes(rs);
1633 } else {
1634 delta_disks = rs->delta_disks;
1635 data_stripes = rs_data_stripes(rs);
1636 }
1637
1638
1639 if (rt_is_raid1(rs->raid_type))
1640 ;
1641 else if (rt_is_raid10(rs->raid_type)) {
1642 if (rs->raid10_copies < 2 ||
1643 delta_disks < 0) {
1644 rs->ti->error = "Bogus raid10 data copies or delta disks";
1645 return -EINVAL;
1646 }
1647
1648 dev_sectors *= rs->raid10_copies;
1649 if (sector_div(dev_sectors, data_stripes))
1650 goto bad;
1651
1652 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1653 if (sector_div(array_sectors, rs->raid10_copies))
1654 goto bad;
1655
1656 } else if (sector_div(dev_sectors, data_stripes))
1657 goto bad;
1658
1659 else
1660
1661 array_sectors = (data_stripes + delta_disks) * dev_sectors;
1662
1663 mddev->array_sectors = array_sectors;
1664 mddev->dev_sectors = dev_sectors;
1665 rs_set_rdev_sectors(rs);
1666
1667 return _check_data_dev_sectors(rs);
1668 bad:
1669 rs->ti->error = "Target length not divisible by number of data devices";
1670 return -EINVAL;
1671 }
1672
1673
1674 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
1675 {
1676
1677 if (rs_is_raid0(rs))
1678 rs->md.recovery_cp = MaxSector;
1679
1680
1681
1682
1683
1684 else if (rs_is_raid6(rs))
1685 rs->md.recovery_cp = dev_sectors;
1686
1687
1688
1689
1690 else
1691 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
1692 ? MaxSector : dev_sectors;
1693 }
1694
1695 static void do_table_event(struct work_struct *ws)
1696 {
1697 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
1698
1699 smp_rmb();
1700 if (!rs_is_reshaping(rs)) {
1701 if (rs_is_raid10(rs))
1702 rs_set_rdev_sectors(rs);
1703 rs_set_capacity(rs);
1704 }
1705 dm_table_event(rs->ti->table);
1706 }
1707
1708
1709
1710
1711
1712
1713
1714 static int rs_check_takeover(struct raid_set *rs)
1715 {
1716 struct mddev *mddev = &rs->md;
1717 unsigned int near_copies;
1718
1719 if (rs->md.degraded) {
1720 rs->ti->error = "Can't takeover degraded raid set";
1721 return -EPERM;
1722 }
1723
1724 if (rs_is_reshaping(rs)) {
1725 rs->ti->error = "Can't takeover reshaping raid set";
1726 return -EPERM;
1727 }
1728
1729 switch (mddev->level) {
1730 case 0:
1731
1732 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1733 mddev->raid_disks == 1)
1734 return 0;
1735
1736
1737 if (mddev->new_level == 10 &&
1738 !(rs->raid_disks % mddev->raid_disks))
1739 return 0;
1740
1741
1742 if (__within_range(mddev->new_level, 4, 6) &&
1743 mddev->new_layout == ALGORITHM_PARITY_N &&
1744 mddev->raid_disks > 1)
1745 return 0;
1746
1747 break;
1748
1749 case 10:
1750
1751 if (__is_raid10_offset(mddev->layout))
1752 break;
1753
1754 near_copies = __raid10_near_copies(mddev->layout);
1755
1756
1757 if (mddev->new_level == 0) {
1758
1759 if (near_copies > 1 &&
1760 !(mddev->raid_disks % near_copies)) {
1761 mddev->raid_disks /= near_copies;
1762 mddev->delta_disks = mddev->raid_disks;
1763 return 0;
1764 }
1765
1766
1767 if (near_copies == 1 &&
1768 __raid10_far_copies(mddev->layout) > 1)
1769 return 0;
1770
1771 break;
1772 }
1773
1774
1775 if (mddev->new_level == 1 &&
1776 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1777 return 0;
1778
1779
1780 if (__within_range(mddev->new_level, 4, 5) &&
1781 mddev->raid_disks == 2)
1782 return 0;
1783 break;
1784
1785 case 1:
1786
1787 if (__within_range(mddev->new_level, 4, 5) &&
1788 mddev->raid_disks == 2) {
1789 mddev->degraded = 1;
1790 return 0;
1791 }
1792
1793
1794 if (mddev->new_level == 0 &&
1795 mddev->raid_disks == 1)
1796 return 0;
1797
1798
1799 if (mddev->new_level == 10)
1800 return 0;
1801 break;
1802
1803 case 4:
1804
1805 if (mddev->new_level == 0)
1806 return 0;
1807
1808
1809 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1810 mddev->raid_disks == 2)
1811 return 0;
1812
1813
1814 if (__within_range(mddev->new_level, 5, 6) &&
1815 mddev->layout == ALGORITHM_PARITY_N)
1816 return 0;
1817 break;
1818
1819 case 5:
1820
1821 if (mddev->new_level == 0 &&
1822 mddev->layout == ALGORITHM_PARITY_N)
1823 return 0;
1824
1825
1826 if (mddev->new_level == 4 &&
1827 mddev->layout == ALGORITHM_PARITY_N)
1828 return 0;
1829
1830
1831 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1832 mddev->raid_disks == 2)
1833 return 0;
1834
1835
1836 if (mddev->new_level == 6 &&
1837 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1838 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1839 return 0;
1840 break;
1841
1842 case 6:
1843
1844 if (mddev->new_level == 0 &&
1845 mddev->layout == ALGORITHM_PARITY_N)
1846 return 0;
1847
1848
1849 if (mddev->new_level == 4 &&
1850 mddev->layout == ALGORITHM_PARITY_N)
1851 return 0;
1852
1853
1854 if (mddev->new_level == 5 &&
1855 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1856 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1857 return 0;
1858 break;
1859
1860 default:
1861 break;
1862 }
1863
1864 rs->ti->error = "takeover not possible";
1865 return -EINVAL;
1866 }
1867
1868
1869 static bool rs_takeover_requested(struct raid_set *rs)
1870 {
1871 return rs->md.new_level != rs->md.level;
1872 }
1873
1874
1875 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
1876 {
1877 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
1878 rs->md.new_layout != rs->md.layout ||
1879 rs->md.new_chunk_sectors != rs->md.chunk_sectors;
1880 }
1881
1882
1883 static bool rs_reshape_requested(struct raid_set *rs)
1884 {
1885 bool change;
1886 struct mddev *mddev = &rs->md;
1887
1888 if (rs_takeover_requested(rs))
1889 return false;
1890
1891 if (rs_is_raid0(rs))
1892 return false;
1893
1894 change = rs_is_layout_change(rs, false);
1895
1896
1897 if (rs_is_raid1(rs)) {
1898 if (rs->delta_disks)
1899 return !!rs->delta_disks;
1900
1901 return !change &&
1902 mddev->raid_disks != rs->raid_disks;
1903 }
1904
1905 if (rs_is_raid10(rs))
1906 return change &&
1907 !__is_raid10_far(mddev->new_layout) &&
1908 rs->delta_disks >= 0;
1909
1910 return change;
1911 }
1912
1913
1914 #define FEATURE_FLAG_SUPPORTS_V190 0x1
1915
1916
1917 #define SB_FLAG_RESHAPE_ACTIVE 0x1
1918 #define SB_FLAG_RESHAPE_BACKWARDS 0x2
1919
1920
1921
1922
1923
1924 #define DM_RAID_MAGIC 0x64526D44
1925 struct dm_raid_superblock {
1926 __le32 magic;
1927 __le32 compat_features;
1928
1929 __le32 num_devices;
1930 __le32 array_position;
1931
1932 __le64 events;
1933 __le64 failed_devices;
1934
1935
1936
1937
1938
1939
1940 __le64 disk_recovery_offset;
1941
1942
1943
1944
1945
1946 __le64 array_resync_offset;
1947
1948
1949
1950
1951 __le32 level;
1952 __le32 layout;
1953 __le32 stripe_sectors;
1954
1955
1956
1957
1958
1959
1960
1961 __le32 flags;
1962
1963
1964
1965
1966
1967 __le64 reshape_position;
1968
1969
1970
1971
1972 __le32 new_level;
1973 __le32 new_layout;
1974 __le32 new_stripe_sectors;
1975 __le32 delta_disks;
1976
1977 __le64 array_sectors;
1978
1979
1980
1981
1982
1983
1984
1985 __le64 data_offset;
1986 __le64 new_data_offset;
1987
1988 __le64 sectors;
1989
1990
1991
1992
1993
1994 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
1995
1996 __le32 incompat_features;
1997
1998
1999 } __packed;
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012 static int rs_check_reshape(struct raid_set *rs)
2013 {
2014 struct mddev *mddev = &rs->md;
2015
2016 if (!mddev->pers || !mddev->pers->check_reshape)
2017 rs->ti->error = "Reshape not supported";
2018 else if (mddev->degraded)
2019 rs->ti->error = "Can't reshape degraded raid set";
2020 else if (rs_is_recovering(rs))
2021 rs->ti->error = "Convert request on recovering raid set prohibited";
2022 else if (rs_is_reshaping(rs))
2023 rs->ti->error = "raid set already reshaping!";
2024 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs)))
2025 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10";
2026 else
2027 return 0;
2028
2029 return -EPERM;
2030 }
2031
2032 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2033 {
2034 BUG_ON(!rdev->sb_page);
2035
2036 if (rdev->sb_loaded && !force_reload)
2037 return 0;
2038
2039 rdev->sb_loaded = 0;
2040
2041 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
2042 DMERR("Failed to read superblock of device at position %d",
2043 rdev->raid_disk);
2044 md_error(rdev->mddev, rdev);
2045 set_bit(Faulty, &rdev->flags);
2046 return -EIO;
2047 }
2048
2049 rdev->sb_loaded = 1;
2050
2051 return 0;
2052 }
2053
2054 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2055 {
2056 failed_devices[0] = le64_to_cpu(sb->failed_devices);
2057 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices));
2058
2059 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2060 int i = ARRAY_SIZE(sb->extended_failed_devices);
2061
2062 while (i--)
2063 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]);
2064 }
2065 }
2066
2067 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices)
2068 {
2069 int i = ARRAY_SIZE(sb->extended_failed_devices);
2070
2071 sb->failed_devices = cpu_to_le64(failed_devices[0]);
2072 while (i--)
2073 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]);
2074 }
2075
2076
2077
2078
2079
2080
2081 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2082 {
2083 bool update_failed_devices = false;
2084 unsigned int i;
2085 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2086 struct dm_raid_superblock *sb;
2087 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2088
2089
2090 if (!rdev->meta_bdev)
2091 return;
2092
2093 BUG_ON(!rdev->sb_page);
2094
2095 sb = page_address(rdev->sb_page);
2096
2097 sb_retrieve_failed_devices(sb, failed_devices);
2098
2099 for (i = 0; i < rs->raid_disks; i++)
2100 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2101 update_failed_devices = true;
2102 set_bit(i, (void *) failed_devices);
2103 }
2104
2105 if (update_failed_devices)
2106 sb_update_failed_devices(sb, failed_devices);
2107
2108 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
2109 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2110
2111 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2112 sb->array_position = cpu_to_le32(rdev->raid_disk);
2113
2114 sb->events = cpu_to_le64(mddev->events);
2115
2116 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2117 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2118
2119 sb->level = cpu_to_le32(mddev->level);
2120 sb->layout = cpu_to_le32(mddev->layout);
2121 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2122
2123
2124
2125
2126
2127
2128 sb->new_level = cpu_to_le32(mddev->new_level);
2129 sb->new_layout = cpu_to_le32(mddev->new_layout);
2130 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2131
2132 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2133
2134 smp_rmb();
2135 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2136 if (le64_to_cpu(sb->reshape_position) != MaxSector) {
2137
2138 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE);
2139
2140 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2141 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS);
2142 } else {
2143
2144 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS));
2145 }
2146
2147 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2148 sb->data_offset = cpu_to_le64(rdev->data_offset);
2149 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2150 sb->sectors = cpu_to_le64(rdev->sectors);
2151 sb->incompat_features = cpu_to_le32(0);
2152
2153
2154 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2155 }
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2166 {
2167 int r;
2168 struct dm_raid_superblock *sb;
2169 struct dm_raid_superblock *refsb;
2170 uint64_t events_sb, events_refsb;
2171
2172 r = read_disk_sb(rdev, rdev->sb_size, false);
2173 if (r)
2174 return r;
2175
2176 sb = page_address(rdev->sb_page);
2177
2178
2179
2180
2181
2182
2183 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
2184 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2185 super_sync(rdev->mddev, rdev);
2186
2187 set_bit(FirstUse, &rdev->flags);
2188 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
2189
2190
2191 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2192
2193
2194 return refdev ? 0 : 1;
2195 }
2196
2197 if (!refdev)
2198 return 1;
2199
2200 events_sb = le64_to_cpu(sb->events);
2201
2202 refsb = page_address(refdev->sb_page);
2203 events_refsb = le64_to_cpu(refsb->events);
2204
2205 return (events_sb > events_refsb) ? 1 : 0;
2206 }
2207
2208 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2209 {
2210 int role;
2211 unsigned int d;
2212 struct mddev *mddev = &rs->md;
2213 uint64_t events_sb;
2214 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
2215 struct dm_raid_superblock *sb;
2216 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0;
2217 struct md_rdev *r;
2218 struct dm_raid_superblock *sb2;
2219
2220 sb = page_address(rdev->sb_page);
2221 events_sb = le64_to_cpu(sb->events);
2222
2223
2224
2225
2226 mddev->events = events_sb ? : 1;
2227
2228 mddev->reshape_position = MaxSector;
2229
2230 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2231 mddev->level = le32_to_cpu(sb->level);
2232 mddev->layout = le32_to_cpu(sb->layout);
2233 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2234
2235
2236
2237
2238
2239 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
2240
2241 mddev->new_level = le32_to_cpu(sb->new_level);
2242 mddev->new_layout = le32_to_cpu(sb->new_layout);
2243 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2244 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2245 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2246
2247
2248 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) {
2249 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) {
2250 DMERR("Reshape requested but raid set is still reshaping");
2251 return -EINVAL;
2252 }
2253
2254 if (mddev->delta_disks < 0 ||
2255 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2256 mddev->reshape_backwards = 1;
2257 else
2258 mddev->reshape_backwards = 0;
2259
2260 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2261 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2262 }
2263
2264 } else {
2265
2266
2267
2268 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2269 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2270
2271 if (rs_takeover_requested(rs)) {
2272 if (rt_cur && rt_new)
2273 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
2274 rt_cur->name, rt_new->name);
2275 else
2276 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
2277 return -EINVAL;
2278 } else if (rs_reshape_requested(rs)) {
2279 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
2280 if (mddev->layout != mddev->new_layout) {
2281 if (rt_cur && rt_new)
2282 DMERR(" current layout %s vs new layout %s",
2283 rt_cur->name, rt_new->name);
2284 else
2285 DMERR(" current layout 0x%X vs new layout 0x%X",
2286 le32_to_cpu(sb->layout), mddev->new_layout);
2287 }
2288 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2289 DMERR(" current stripe sectors %u vs new stripe sectors %u",
2290 mddev->chunk_sectors, mddev->new_chunk_sectors);
2291 if (rs->delta_disks)
2292 DMERR(" current %u disks vs new %u disks",
2293 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2294 if (rs_is_raid10(rs)) {
2295 DMERR(" Old layout: %s w/ %u copies",
2296 raid10_md_layout_to_format(mddev->layout),
2297 raid10_md_layout_to_copies(mddev->layout));
2298 DMERR(" New layout: %s w/ %u copies",
2299 raid10_md_layout_to_format(mddev->new_layout),
2300 raid10_md_layout_to_copies(mddev->new_layout));
2301 }
2302 return -EINVAL;
2303 }
2304
2305 DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2306 }
2307
2308 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
2309 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326 d = 0;
2327 rdev_for_each(r, mddev) {
2328 if (test_bit(Journal, &rdev->flags))
2329 continue;
2330
2331 if (test_bit(FirstUse, &r->flags))
2332 new_devs++;
2333
2334 if (!test_bit(In_sync, &r->flags)) {
2335 DMINFO("Device %d specified for rebuild; clearing superblock",
2336 r->raid_disk);
2337 rebuilds++;
2338
2339 if (test_bit(FirstUse, &r->flags))
2340 rebuild_and_new++;
2341 }
2342
2343 d++;
2344 }
2345
2346 if (new_devs == rs->raid_disks || !rebuilds) {
2347
2348 if (new_devs == rs->raid_disks) {
2349 DMINFO("Superblocks created for new raid set");
2350 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2351 } else if (new_devs != rebuilds &&
2352 new_devs != rs->delta_disks) {
2353 DMERR("New device injected into existing raid set without "
2354 "'delta_disks' or 'rebuild' parameter specified");
2355 return -EINVAL;
2356 }
2357 } else if (new_devs && new_devs != rebuilds) {
2358 DMERR("%u 'rebuild' devices cannot be injected into"
2359 " a raid set with %u other first-time devices",
2360 rebuilds, new_devs);
2361 return -EINVAL;
2362 } else if (rebuilds) {
2363 if (rebuild_and_new && rebuilds != rebuild_and_new) {
2364 DMERR("new device%s provided without 'rebuild'",
2365 new_devs > 1 ? "s" : "");
2366 return -EINVAL;
2367 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
2368 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
2369 (unsigned long long) mddev->recovery_cp);
2370 return -EINVAL;
2371 } else if (rs_is_reshaping(rs)) {
2372 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
2373 (unsigned long long) mddev->reshape_position);
2374 return -EINVAL;
2375 }
2376 }
2377
2378
2379
2380
2381
2382 sb_retrieve_failed_devices(sb, failed_devices);
2383 rdev_for_each(r, mddev) {
2384 if (test_bit(Journal, &rdev->flags) ||
2385 !r->sb_page)
2386 continue;
2387 sb2 = page_address(r->sb_page);
2388 sb2->failed_devices = 0;
2389 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices));
2390
2391
2392
2393
2394 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
2395 role = le32_to_cpu(sb2->array_position);
2396 if (role < 0)
2397 continue;
2398
2399 if (role != r->raid_disk) {
2400 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2401 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2402 rs->raid_disks % rs->raid10_copies) {
2403 rs->ti->error =
2404 "Cannot change raid10 near set to odd # of devices!";
2405 return -EINVAL;
2406 }
2407
2408 sb2->array_position = cpu_to_le32(r->raid_disk);
2409
2410 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) &&
2411 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) &&
2412 !rt_is_raid1(rs->raid_type)) {
2413 rs->ti->error = "Cannot change device positions in raid set";
2414 return -EINVAL;
2415 }
2416
2417 DMINFO("raid device #%d now at position #%d", role, r->raid_disk);
2418 }
2419
2420
2421
2422
2423
2424 if (test_bit(role, (void *) failed_devices))
2425 set_bit(Faulty, &r->flags);
2426 }
2427 }
2428
2429 return 0;
2430 }
2431
2432 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2433 {
2434 struct mddev *mddev = &rs->md;
2435 struct dm_raid_superblock *sb;
2436
2437 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2438 return 0;
2439
2440 sb = page_address(rdev->sb_page);
2441
2442
2443
2444
2445
2446 if (!mddev->events && super_init_validation(rs, rdev))
2447 return -EINVAL;
2448
2449 if (le32_to_cpu(sb->compat_features) &&
2450 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2451 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2452 return -EINVAL;
2453 }
2454
2455 if (sb->incompat_features) {
2456 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet";
2457 return -EINVAL;
2458 }
2459
2460
2461 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2462 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2463
2464 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2465
2466
2467
2468
2469
2470 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
2471 rdev->sectors = le64_to_cpu(sb->sectors);
2472
2473 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2474 if (rdev->recovery_offset == MaxSector)
2475 set_bit(In_sync, &rdev->flags);
2476
2477
2478
2479
2480 else if (!rs_is_reshaping(rs))
2481 clear_bit(In_sync, &rdev->flags);
2482 }
2483
2484
2485
2486
2487 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2488 rdev->recovery_offset = 0;
2489 clear_bit(In_sync, &rdev->flags);
2490 rdev->saved_raid_disk = rdev->raid_disk;
2491 }
2492
2493
2494 rdev->data_offset = le64_to_cpu(sb->data_offset);
2495 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2496
2497 return 0;
2498 }
2499
2500
2501
2502
2503 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2504 {
2505 int r;
2506 struct md_rdev *rdev, *freshest;
2507 struct mddev *mddev = &rs->md;
2508
2509 freshest = NULL;
2510 rdev_for_each(rdev, mddev) {
2511 if (test_bit(Journal, &rdev->flags))
2512 continue;
2513
2514 if (!rdev->meta_bdev)
2515 continue;
2516
2517
2518 rdev->sb_start = 0;
2519 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2520 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2521 DMERR("superblock size of a logical block is no longer valid");
2522 return -EINVAL;
2523 }
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
2535 continue;
2536
2537 r = super_load(rdev, freshest);
2538
2539 switch (r) {
2540 case 1:
2541 freshest = rdev;
2542 break;
2543 case 0:
2544 break;
2545 default:
2546
2547
2548
2549
2550
2551 if (rs_is_raid0(rs))
2552 continue;
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562 rdev->raid_disk = rdev->saved_raid_disk = -1;
2563 break;
2564 }
2565 }
2566
2567 if (!freshest)
2568 return 0;
2569
2570
2571
2572
2573
2574 rs->ti->error = "Unable to assemble array: Invalid superblocks";
2575 if (super_validate(rs, freshest))
2576 return -EINVAL;
2577
2578 if (validate_raid_redundancy(rs)) {
2579 rs->ti->error = "Insufficient redundancy to activate array";
2580 return -EINVAL;
2581 }
2582
2583 rdev_for_each(rdev, mddev)
2584 if (!test_bit(Journal, &rdev->flags) &&
2585 rdev != freshest &&
2586 super_validate(rs, rdev))
2587 return -EINVAL;
2588 return 0;
2589 }
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599 static int rs_adjust_data_offsets(struct raid_set *rs)
2600 {
2601 sector_t data_offset = 0, new_data_offset = 0;
2602 struct md_rdev *rdev;
2603
2604
2605 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) {
2606 if (!rs_is_reshapable(rs))
2607 goto out;
2608
2609 return 0;
2610 }
2611
2612
2613 rdev = &rs->dev[0].rdev;
2614
2615 if (rs->delta_disks < 0) {
2616
2617
2618
2619
2620
2621
2622
2623
2624 data_offset = 0;
2625 new_data_offset = rs->data_offset;
2626
2627 } else if (rs->delta_disks > 0) {
2628
2629
2630
2631
2632
2633
2634
2635
2636 data_offset = rs->data_offset;
2637 new_data_offset = 0;
2638
2639 } else {
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658 data_offset = rs->data_offset ? rdev->data_offset : 0;
2659 new_data_offset = data_offset ? 0 : rs->data_offset;
2660 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2661 }
2662
2663
2664
2665
2666 if (rs->data_offset &&
2667 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2668 rs->ti->error = data_offset ? "No space for forward reshape" :
2669 "No space for backward reshape";
2670 return -ENOSPC;
2671 }
2672 out:
2673
2674
2675
2676
2677 if (rs->md.recovery_cp < rs->md.dev_sectors)
2678 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2679
2680
2681 rdev_for_each(rdev, &rs->md) {
2682 if (!test_bit(Journal, &rdev->flags)) {
2683 rdev->data_offset = data_offset;
2684 rdev->new_data_offset = new_data_offset;
2685 }
2686 }
2687
2688 return 0;
2689 }
2690
2691
2692 static void __reorder_raid_disk_indexes(struct raid_set *rs)
2693 {
2694 int i = 0;
2695 struct md_rdev *rdev;
2696
2697 rdev_for_each(rdev, &rs->md) {
2698 if (!test_bit(Journal, &rdev->flags)) {
2699 rdev->raid_disk = i++;
2700 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2701 }
2702 }
2703 }
2704
2705
2706
2707
2708 static int rs_setup_takeover(struct raid_set *rs)
2709 {
2710 struct mddev *mddev = &rs->md;
2711 struct md_rdev *rdev;
2712 unsigned int d = mddev->raid_disks = rs->raid_disks;
2713 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2714
2715 if (rt_is_raid10(rs->raid_type)) {
2716 if (rs_is_raid0(rs)) {
2717
2718 __reorder_raid_disk_indexes(rs);
2719
2720
2721 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2722 rs->raid10_copies);
2723 } else if (rs_is_raid1(rs))
2724
2725 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2726 rs->raid_disks);
2727 else
2728 return -EINVAL;
2729
2730 }
2731
2732 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2733 mddev->recovery_cp = MaxSector;
2734
2735 while (d--) {
2736 rdev = &rs->dev[d].rdev;
2737
2738 if (test_bit(d, (void *) rs->rebuild_disks)) {
2739 clear_bit(In_sync, &rdev->flags);
2740 clear_bit(Faulty, &rdev->flags);
2741 mddev->recovery_cp = rdev->recovery_offset = 0;
2742
2743 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2744 }
2745
2746 rdev->new_data_offset = new_data_offset;
2747 }
2748
2749 return 0;
2750 }
2751
2752
2753 static int rs_prepare_reshape(struct raid_set *rs)
2754 {
2755 bool reshape;
2756 struct mddev *mddev = &rs->md;
2757
2758 if (rs_is_raid10(rs)) {
2759 if (rs->raid_disks != mddev->raid_disks &&
2760 __is_raid10_near(mddev->layout) &&
2761 rs->raid10_copies &&
2762 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2763
2764
2765
2766
2767
2768
2769 if (rs->raid_disks % rs->raid10_copies) {
2770 rs->ti->error = "Can't reshape raid10 mirror groups";
2771 return -EINVAL;
2772 }
2773
2774
2775 __reorder_raid_disk_indexes(rs);
2776 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2777 rs->raid10_copies);
2778 mddev->new_layout = mddev->layout;
2779 reshape = false;
2780 } else
2781 reshape = true;
2782
2783 } else if (rs_is_raid456(rs))
2784 reshape = true;
2785
2786 else if (rs_is_raid1(rs)) {
2787 if (rs->delta_disks) {
2788
2789 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2790 reshape = true;
2791 } else {
2792
2793 mddev->raid_disks = rs->raid_disks;
2794 reshape = false;
2795 }
2796 } else {
2797 rs->ti->error = "Called with bogus raid type";
2798 return -EINVAL;
2799 }
2800
2801 if (reshape) {
2802 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags);
2803 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2804 } else if (mddev->raid_disks < rs->raid_disks)
2805
2806 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
2807
2808 return 0;
2809 }
2810
2811
2812 static sector_t _get_reshape_sectors(struct raid_set *rs)
2813 {
2814 struct md_rdev *rdev;
2815 sector_t reshape_sectors = 0;
2816
2817 rdev_for_each(rdev, &rs->md)
2818 if (!test_bit(Journal, &rdev->flags)) {
2819 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2820 rdev->data_offset - rdev->new_data_offset :
2821 rdev->new_data_offset - rdev->data_offset;
2822 break;
2823 }
2824
2825 return max(reshape_sectors, (sector_t) rs->data_offset);
2826 }
2827
2828
2829
2830
2831
2832
2833
2834
2835 static int rs_setup_reshape(struct raid_set *rs)
2836 {
2837 int r = 0;
2838 unsigned int cur_raid_devs, d;
2839 sector_t reshape_sectors = _get_reshape_sectors(rs);
2840 struct mddev *mddev = &rs->md;
2841 struct md_rdev *rdev;
2842
2843 mddev->delta_disks = rs->delta_disks;
2844 cur_raid_devs = mddev->raid_disks;
2845
2846
2847 if (mddev->delta_disks &&
2848 mddev->layout != mddev->new_layout) {
2849 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks);
2850 mddev->new_layout = mddev->layout;
2851 }
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876 if (rs->delta_disks > 0) {
2877
2878 for (d = cur_raid_devs; d < rs->raid_disks; d++) {
2879 rdev = &rs->dev[d].rdev;
2880 clear_bit(In_sync, &rdev->flags);
2881
2882
2883
2884
2885
2886 rdev->saved_raid_disk = -1;
2887 rdev->raid_disk = d;
2888
2889 rdev->sectors = mddev->dev_sectors;
2890 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2891 }
2892
2893 mddev->reshape_backwards = 0;
2894
2895
2896 } else if (rs->delta_disks < 0) {
2897 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true);
2898 mddev->reshape_backwards = 1;
2899
2900
2901 } else {
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2924 }
2925
2926
2927
2928
2929
2930 if (!mddev->reshape_backwards)
2931 rdev_for_each(rdev, &rs->md)
2932 if (!test_bit(Journal, &rdev->flags))
2933 rdev->sectors += reshape_sectors;
2934
2935 return r;
2936 }
2937
2938
2939
2940
2941
2942
2943 static void rs_reset_inconclusive_reshape(struct raid_set *rs)
2944 {
2945 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
2946 rs_set_cur(rs);
2947 rs->md.delta_disks = 0;
2948 rs->md.reshape_backwards = 0;
2949 }
2950 }
2951
2952
2953
2954
2955
2956 static void configure_discard_support(struct raid_set *rs)
2957 {
2958 int i;
2959 bool raid456;
2960 struct dm_target *ti = rs->ti;
2961
2962
2963
2964
2965 raid456 = rs_is_raid456(rs);
2966
2967 for (i = 0; i < rs->raid_disks; i++) {
2968 if (!rs->dev[i].rdev.bdev ||
2969 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
2970 return;
2971
2972 if (raid456) {
2973 if (!devices_handle_discard_safely) {
2974 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
2975 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
2976 return;
2977 }
2978 }
2979 }
2980
2981 ti->num_discard_bios = 1;
2982 }
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2998 {
2999 int r;
3000 bool resize = false;
3001 struct raid_type *rt;
3002 unsigned int num_raid_params, num_raid_devs;
3003 sector_t sb_array_sectors, rdev_sectors, reshape_sectors;
3004 struct raid_set *rs = NULL;
3005 const char *arg;
3006 struct rs_layout rs_layout;
3007 struct dm_arg_set as = { argc, argv }, as_nrd;
3008 struct dm_arg _args[] = {
3009 { 0, as.argc, "Cannot understand number of raid parameters" },
3010 { 1, 254, "Cannot understand number of raid devices parameters" }
3011 };
3012
3013 arg = dm_shift_arg(&as);
3014 if (!arg) {
3015 ti->error = "No arguments";
3016 return -EINVAL;
3017 }
3018
3019 rt = get_raid_type(arg);
3020 if (!rt) {
3021 ti->error = "Unrecognised raid_type";
3022 return -EINVAL;
3023 }
3024
3025
3026 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
3027 return -EINVAL;
3028
3029
3030 as_nrd = as;
3031 dm_consume_args(&as_nrd, num_raid_params);
3032 _args[1].max = (as_nrd.argc - 1) / 2;
3033 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
3034 return -EINVAL;
3035
3036 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) {
3037 ti->error = "Invalid number of supplied raid devices";
3038 return -EINVAL;
3039 }
3040
3041 rs = raid_set_alloc(ti, rt, num_raid_devs);
3042 if (IS_ERR(rs))
3043 return PTR_ERR(rs);
3044
3045 r = parse_raid_params(rs, &as, num_raid_params);
3046 if (r)
3047 goto bad;
3048
3049 r = parse_dev_params(rs, &as);
3050 if (r)
3051 goto bad;
3052
3053 rs->md.sync_super = super_sync;
3054
3055
3056
3057
3058
3059
3060
3061 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3062 if (r)
3063 goto bad;
3064
3065
3066 rs->array_sectors = rs->md.array_sectors;
3067 rs->dev_sectors = rs->md.dev_sectors;
3068
3069
3070
3071
3072
3073
3074 rs_config_backup(rs, &rs_layout);
3075
3076 r = analyse_superblocks(ti, rs);
3077 if (r)
3078 goto bad;
3079
3080
3081 sb_array_sectors = rs->md.array_sectors;
3082 rdev_sectors = __rdev_sectors(rs);
3083 if (!rdev_sectors) {
3084 ti->error = "Invalid rdev size";
3085 r = -EINVAL;
3086 goto bad;
3087 }
3088
3089
3090 reshape_sectors = _get_reshape_sectors(rs);
3091 if (rs->dev_sectors != rdev_sectors) {
3092 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors);
3093 if (rs->dev_sectors > rdev_sectors - reshape_sectors)
3094 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3095 }
3096
3097 INIT_WORK(&rs->md.event_work, do_table_event);
3098 ti->private = rs;
3099 ti->num_flush_bios = 1;
3100 ti->needs_bio_set_dev = true;
3101
3102
3103 rs_config_restore(rs, &rs_layout);
3104
3105
3106
3107
3108
3109
3110
3111 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) {
3112
3113 if (rs_is_raid6(rs) &&
3114 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) {
3115 ti->error = "'nosync' not allowed for new raid6 set";
3116 r = -EINVAL;
3117 goto bad;
3118 }
3119 rs_setup_recovery(rs, 0);
3120 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3121 rs_set_new(rs);
3122 } else if (rs_is_recovering(rs)) {
3123
3124 goto size_check;
3125 } else if (rs_is_reshaping(rs)) {
3126
3127 if (resize) {
3128 ti->error = "Can't resize a reshaping raid set";
3129 r = -EPERM;
3130 goto bad;
3131 }
3132
3133 } else if (rs_takeover_requested(rs)) {
3134 if (rs_is_reshaping(rs)) {
3135 ti->error = "Can't takeover a reshaping raid set";
3136 r = -EPERM;
3137 goto bad;
3138 }
3139
3140
3141 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3142 ti->error = "Can't takeover a journaled raid4/5/6 set";
3143 r = -EPERM;
3144 goto bad;
3145 }
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155 r = rs_check_takeover(rs);
3156 if (r)
3157 goto bad;
3158
3159 r = rs_setup_takeover(rs);
3160 if (r)
3161 goto bad;
3162
3163 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3164
3165 rs_setup_recovery(rs, MaxSector);
3166 rs_set_new(rs);
3167 } else if (rs_reshape_requested(rs)) {
3168
3169 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3170
3171
3172
3173
3174
3175
3176
3177 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) {
3178 ti->error = "Can't reshape a journaled raid4/5/6 set";
3179 r = -EPERM;
3180 goto bad;
3181 }
3182
3183
3184 if (reshape_sectors || rs_is_raid1(rs)) {
3185
3186
3187
3188
3189
3190
3191
3192 r = rs_prepare_reshape(rs);
3193 if (r)
3194 goto bad;
3195
3196
3197 rs_setup_recovery(rs, MaxSector);
3198 }
3199 rs_set_cur(rs);
3200 } else {
3201 size_check:
3202
3203 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3204 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags);
3205 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3206 rs_setup_recovery(rs, MaxSector);
3207 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3208
3209
3210
3211
3212 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false);
3213 if (r)
3214 goto bad;
3215
3216 rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
3217 } else {
3218
3219 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
3220 if (r)
3221 goto bad;
3222
3223 if (sb_array_sectors > rs->array_sectors)
3224 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3225 }
3226 rs_set_cur(rs);
3227 }
3228
3229
3230 r = rs_adjust_data_offsets(rs);
3231 if (r)
3232 goto bad;
3233
3234
3235 rs_reset_inconclusive_reshape(rs);
3236
3237
3238 rs->md.ro = 1;
3239 rs->md.in_sync = 1;
3240
3241
3242 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3243
3244
3245 mddev_lock_nointr(&rs->md);
3246 r = md_run(&rs->md);
3247 rs->md.in_sync = 0;
3248 if (r) {
3249 ti->error = "Failed to run raid array";
3250 mddev_unlock(&rs->md);
3251 goto bad;
3252 }
3253
3254 r = md_start(&rs->md);
3255 if (r) {
3256 ti->error = "Failed to start raid array";
3257 mddev_unlock(&rs->md);
3258 goto bad_md_start;
3259 }
3260
3261
3262 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3263 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3264 if (r) {
3265 ti->error = "Failed to set raid4/5/6 journal mode";
3266 mddev_unlock(&rs->md);
3267 goto bad_journal_mode_set;
3268 }
3269 }
3270
3271 mddev_suspend(&rs->md);
3272 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3273
3274
3275 if (rs_is_raid456(rs)) {
3276 r = rs_set_raid456_stripe_cache(rs);
3277 if (r)
3278 goto bad_stripe_cache;
3279 }
3280
3281
3282 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
3283 r = rs_check_reshape(rs);
3284 if (r)
3285 goto bad_check_reshape;
3286
3287
3288 rs_config_restore(rs, &rs_layout);
3289
3290 if (rs->md.pers->start_reshape) {
3291 r = rs->md.pers->check_reshape(&rs->md);
3292 if (r) {
3293 ti->error = "Reshape check failed";
3294 goto bad_check_reshape;
3295 }
3296 }
3297 }
3298
3299
3300 configure_discard_support(rs);
3301
3302 mddev_unlock(&rs->md);
3303 return 0;
3304
3305 bad_md_start:
3306 bad_journal_mode_set:
3307 bad_stripe_cache:
3308 bad_check_reshape:
3309 md_stop(&rs->md);
3310 bad:
3311 raid_set_free(rs);
3312
3313 return r;
3314 }
3315
3316 static void raid_dtr(struct dm_target *ti)
3317 {
3318 struct raid_set *rs = ti->private;
3319
3320 md_stop(&rs->md);
3321 raid_set_free(rs);
3322 }
3323
3324 static int raid_map(struct dm_target *ti, struct bio *bio)
3325 {
3326 struct raid_set *rs = ti->private;
3327 struct mddev *mddev = &rs->md;
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3338 return DM_MAPIO_REQUEUE;
3339
3340 md_handle_request(mddev, bio);
3341
3342 return DM_MAPIO_SUBMITTED;
3343 }
3344
3345
3346 enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3347 static const char *sync_str(enum sync_state state)
3348 {
3349
3350 static const char *sync_strs[] = {
3351 "frozen",
3352 "reshape",
3353 "resync",
3354 "check",
3355 "repair",
3356 "recover",
3357 "idle"
3358 };
3359
3360 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3361 };
3362
3363
3364 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3365 {
3366 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3367 return st_frozen;
3368
3369
3370 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3371 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3372 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3373 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3374 return st_reshape;
3375
3376 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3377 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3378 return st_resync;
3379 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3380 return st_check;
3381 return st_repair;
3382 }
3383
3384 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3385 return st_recover;
3386
3387 if (mddev->reshape_position != MaxSector)
3388 return st_reshape;
3389 }
3390
3391 return st_idle;
3392 }
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3405 {
3406 if (!rdev->bdev)
3407 return "-";
3408 else if (test_bit(Faulty, &rdev->flags))
3409 return "D";
3410 else if (test_bit(Journal, &rdev->flags))
3411 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
3412 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
3413 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
3414 !test_bit(In_sync, &rdev->flags)))
3415 return "a";
3416 else
3417 return "A";
3418 }
3419
3420
3421 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3422 enum sync_state state, sector_t resync_max_sectors)
3423 {
3424 sector_t r;
3425 struct mddev *mddev = &rs->md;
3426
3427 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3428 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3429
3430 if (rs_is_raid0(rs)) {
3431 r = resync_max_sectors;
3432 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3433
3434 } else {
3435 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3436 r = mddev->recovery_cp;
3437 else
3438 r = mddev->curr_resync_completed;
3439
3440 if (state == st_idle && r >= resync_max_sectors) {
3441
3442
3443
3444
3445 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3446 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3447
3448 } else if (state == st_recover)
3449
3450
3451
3452
3453
3454
3455 ;
3456
3457 else if (state == st_resync || state == st_reshape)
3458
3459
3460
3461
3462
3463 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3464
3465 else if (state == st_check || state == st_repair)
3466
3467
3468
3469
3470
3471 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3472
3473 else if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3474
3475
3476
3477
3478 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3479
3480 else {
3481
3482
3483
3484
3485
3486
3487 struct md_rdev *rdev;
3488
3489 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3490 rdev_for_each(rdev, mddev)
3491 if (!test_bit(Journal, &rdev->flags) &&
3492 !test_bit(In_sync, &rdev->flags)) {
3493 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3494 break;
3495 }
3496 }
3497 }
3498
3499 return min(r, resync_max_sectors);
3500 }
3501
3502
3503 static const char *__get_dev_name(struct dm_dev *dev)
3504 {
3505 return dev ? dev->name : "-";
3506 }
3507
3508 static void raid_status(struct dm_target *ti, status_type_t type,
3509 unsigned int status_flags, char *result, unsigned int maxlen)
3510 {
3511 struct raid_set *rs = ti->private;
3512 struct mddev *mddev = &rs->md;
3513 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
3514 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
3515 unsigned long recovery;
3516 unsigned int raid_param_cnt = 1;
3517 unsigned int sz = 0;
3518 unsigned int rebuild_writemostly_count = 0;
3519 sector_t progress, resync_max_sectors, resync_mismatches;
3520 enum sync_state state;
3521 struct raid_type *rt;
3522
3523 switch (type) {
3524 case STATUSTYPE_INFO:
3525
3526 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3527 if (!rt)
3528 return;
3529
3530 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3531
3532
3533 smp_rmb();
3534
3535 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
3536 mddev->resync_max_sectors : mddev->dev_sectors;
3537 recovery = rs->md.recovery;
3538 state = decipher_sync_action(mddev, recovery);
3539 progress = rs_get_progress(rs, recovery, state, resync_max_sectors);
3540 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3541 atomic64_read(&mddev->resync_mismatches) : 0;
3542
3543
3544 for (i = 0; i < rs->raid_disks; i++)
3545 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559 DMEMIT(" %llu/%llu", (unsigned long long) progress,
3560 (unsigned long long) resync_max_sectors);
3561
3562
3563
3564
3565
3566
3567
3568
3569 DMEMIT(" %s", sync_str(state));
3570
3571
3572
3573
3574
3575
3576
3577
3578 DMEMIT(" %llu", (unsigned long long) resync_mismatches);
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3591
3592
3593
3594
3595 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
3596 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3597 break;
3598
3599 case STATUSTYPE_TABLE:
3600
3601
3602
3603
3604
3605
3606 for (i = 0; i < rs->raid_disks; i++) {
3607 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) +
3608 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3609 }
3610 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) +
3611 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0);
3612
3613 raid_param_cnt += rebuild_writemostly_count +
3614 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) +
3615 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2;
3616
3617
3618 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3619 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
3620 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC));
3621 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
3622 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC));
3623 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags))
3624 for (i = 0; i < rs->raid_disks; i++)
3625 if (test_bit(i, (void *) rs->rebuild_disks))
3626 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i);
3627 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
3628 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP),
3629 mddev->bitmap_info.daemon_sleep);
3630 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
3631 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE),
3632 mddev->sync_speed_min);
3633 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags))
3634 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE),
3635 mddev->sync_speed_max);
3636 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags))
3637 for (i = 0; i < rs->raid_disks; i++)
3638 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3639 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY),
3640 rs->dev[i].rdev.raid_disk);
3641 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
3642 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND),
3643 mddev->bitmap_info.max_write_behind);
3644 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
3645 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE),
3646 max_nr_stripes);
3647 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
3648 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE),
3649 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3650 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
3651 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES),
3652 raid10_md_layout_to_copies(mddev->layout));
3653 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
3654 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT),
3655 raid10_md_layout_to_format(mddev->layout));
3656 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags))
3657 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS),
3658 max(rs->delta_disks, mddev->delta_disks));
3659 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags))
3660 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET),
3661 (unsigned long long) rs->data_offset);
3662 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags))
3663 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV),
3664 __get_dev_name(rs->journal_dev.dev));
3665 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags))
3666 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE),
3667 md_journal_mode_to_dm_raid(rs->journal_dev.mode));
3668 DMEMIT(" %d", rs->raid_disks);
3669 for (i = 0; i < rs->raid_disks; i++)
3670 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev),
3671 __get_dev_name(rs->dev[i].data_dev));
3672 break;
3673
3674 case STATUSTYPE_IMA:
3675 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3676 if (!rt)
3677 return;
3678
3679 DMEMIT_TARGET_NAME_VERSION(ti->type);
3680 DMEMIT(",raid_type=%s,raid_disks=%d", rt->name, mddev->raid_disks);
3681
3682
3683 smp_rmb();
3684 recovery = rs->md.recovery;
3685 state = decipher_sync_action(mddev, recovery);
3686 DMEMIT(",raid_state=%s", sync_str(state));
3687
3688 for (i = 0; i < rs->raid_disks; i++) {
3689 DMEMIT(",raid_device_%d_status=", i);
3690 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3691 }
3692
3693 if (rt_is_raid456(rt)) {
3694 DMEMIT(",journal_dev_mode=");
3695 switch (rs->journal_dev.mode) {
3696 case R5C_JOURNAL_MODE_WRITE_THROUGH:
3697 DMEMIT("%s",
3698 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_THROUGH].param);
3699 break;
3700 case R5C_JOURNAL_MODE_WRITE_BACK:
3701 DMEMIT("%s",
3702 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_BACK].param);
3703 break;
3704 default:
3705 DMEMIT("invalid");
3706 break;
3707 }
3708 }
3709 DMEMIT(";");
3710 break;
3711 }
3712 }
3713
3714 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
3715 char *result, unsigned maxlen)
3716 {
3717 struct raid_set *rs = ti->private;
3718 struct mddev *mddev = &rs->md;
3719
3720 if (!mddev->pers || !mddev->pers->sync_request)
3721 return -EINVAL;
3722
3723 if (!strcasecmp(argv[0], "frozen"))
3724 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3725 else
3726 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3727
3728 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
3729 if (mddev->sync_thread) {
3730 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3731 md_unregister_thread(&mddev->sync_thread);
3732 md_reap_sync_thread(mddev);
3733 }
3734 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3735 return -EBUSY;
3736 else if (!strcasecmp(argv[0], "resync"))
3737 ;
3738 else if (!strcasecmp(argv[0], "recover"))
3739 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3740 else {
3741 if (!strcasecmp(argv[0], "check")) {
3742 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3743 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3744 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3745 } else if (!strcasecmp(argv[0], "repair")) {
3746 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3747 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3748 } else
3749 return -EINVAL;
3750 }
3751 if (mddev->ro == 2) {
3752
3753
3754
3755 mddev->ro = 0;
3756 if (!mddev->suspended && mddev->sync_thread)
3757 md_wakeup_thread(mddev->sync_thread);
3758 }
3759 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3760 if (!mddev->suspended && mddev->thread)
3761 md_wakeup_thread(mddev->thread);
3762
3763 return 0;
3764 }
3765
3766 static int raid_iterate_devices(struct dm_target *ti,
3767 iterate_devices_callout_fn fn, void *data)
3768 {
3769 struct raid_set *rs = ti->private;
3770 unsigned int i;
3771 int r = 0;
3772
3773 for (i = 0; !r && i < rs->raid_disks; i++) {
3774 if (rs->dev[i].data_dev) {
3775 r = fn(ti, rs->dev[i].data_dev,
3776 0,
3777 rs->md.dev_sectors, data);
3778 }
3779 }
3780
3781 return r;
3782 }
3783
3784 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
3785 {
3786 struct raid_set *rs = ti->private;
3787 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
3788
3789 blk_limits_io_min(limits, chunk_size_bytes);
3790 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
3791 }
3792
3793 static void raid_postsuspend(struct dm_target *ti)
3794 {
3795 struct raid_set *rs = ti->private;
3796
3797 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
3798
3799 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
3800 md_stop_writes(&rs->md);
3801
3802 mddev_lock_nointr(&rs->md);
3803 mddev_suspend(&rs->md);
3804 mddev_unlock(&rs->md);
3805 }
3806 }
3807
3808 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
3809 {
3810 int i;
3811 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS];
3812 unsigned long flags;
3813 bool cleared = false;
3814 struct dm_raid_superblock *sb;
3815 struct mddev *mddev = &rs->md;
3816 struct md_rdev *r;
3817
3818
3819 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3820 return;
3821
3822 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices));
3823
3824 for (i = 0; i < rs->raid_disks; i++) {
3825 r = &rs->dev[i].rdev;
3826
3827 if (test_bit(Journal, &r->flags))
3828 continue;
3829
3830 if (test_bit(Faulty, &r->flags) &&
3831 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) {
3832 DMINFO("Faulty %s device #%d has readable super block."
3833 " Attempting to revive it.",
3834 rs->raid_type->name, i);
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845 flags = r->flags;
3846 clear_bit(In_sync, &r->flags);
3847 if (r->raid_disk >= 0) {
3848 if (mddev->pers->hot_remove_disk(mddev, r)) {
3849
3850 r->flags = flags;
3851 continue;
3852 }
3853 } else
3854 r->raid_disk = r->saved_raid_disk = i;
3855
3856 clear_bit(Faulty, &r->flags);
3857 clear_bit(WriteErrorSeen, &r->flags);
3858
3859 if (mddev->pers->hot_add_disk(mddev, r)) {
3860
3861 r->raid_disk = r->saved_raid_disk = -1;
3862 r->flags = flags;
3863 } else {
3864 clear_bit(In_sync, &r->flags);
3865 r->recovery_offset = 0;
3866 set_bit(i, (void *) cleared_failed_devices);
3867 cleared = true;
3868 }
3869 }
3870 }
3871
3872
3873 if (cleared) {
3874 uint64_t failed_devices[DISKS_ARRAY_ELEMS];
3875
3876 rdev_for_each(r, &rs->md) {
3877 if (test_bit(Journal, &r->flags))
3878 continue;
3879
3880 sb = page_address(r->sb_page);
3881 sb_retrieve_failed_devices(sb, failed_devices);
3882
3883 for (i = 0; i < DISKS_ARRAY_ELEMS; i++)
3884 failed_devices[i] &= ~cleared_failed_devices[i];
3885
3886 sb_update_failed_devices(sb, failed_devices);
3887 }
3888 }
3889 }
3890
3891 static int __load_dirty_region_bitmap(struct raid_set *rs)
3892 {
3893 int r = 0;
3894
3895
3896 if (!rs_is_raid0(rs) &&
3897 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3898 r = md_bitmap_load(&rs->md);
3899 if (r)
3900 DMERR("Failed to load bitmap");
3901 }
3902
3903 return r;
3904 }
3905
3906
3907 static void rs_update_sbs(struct raid_set *rs)
3908 {
3909 struct mddev *mddev = &rs->md;
3910 int ro = mddev->ro;
3911
3912 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3913 mddev->ro = 0;
3914 md_update_sb(mddev, 1);
3915 mddev->ro = ro;
3916 }
3917
3918
3919
3920
3921
3922
3923
3924
3925 static int rs_start_reshape(struct raid_set *rs)
3926 {
3927 int r;
3928 struct mddev *mddev = &rs->md;
3929 struct md_personality *pers = mddev->pers;
3930
3931
3932 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3933
3934 r = rs_setup_reshape(rs);
3935 if (r)
3936 return r;
3937
3938
3939
3940
3941
3942
3943 r = pers->check_reshape(mddev);
3944 if (r) {
3945 rs->ti->error = "pers->check_reshape() failed";
3946 return r;
3947 }
3948
3949
3950
3951
3952
3953 if (pers->start_reshape) {
3954 r = pers->start_reshape(mddev);
3955 if (r) {
3956 rs->ti->error = "pers->start_reshape() failed";
3957 return r;
3958 }
3959 }
3960
3961
3962
3963
3964
3965
3966 rs_update_sbs(rs);
3967
3968 return 0;
3969 }
3970
3971 static int raid_preresume(struct dm_target *ti)
3972 {
3973 int r;
3974 struct raid_set *rs = ti->private;
3975 struct mddev *mddev = &rs->md;
3976
3977
3978 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3979 return 0;
3980
3981
3982
3983
3984
3985
3986
3987 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
3988 rs_update_sbs(rs);
3989
3990
3991 r = __load_dirty_region_bitmap(rs);
3992 if (r)
3993 return r;
3994
3995
3996 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) {
3997 mddev->array_sectors = rs->array_sectors;
3998 mddev->dev_sectors = rs->dev_sectors;
3999 rs_set_rdev_sectors(rs);
4000 rs_set_capacity(rs);
4001 }
4002
4003
4004 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
4005 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
4006 (rs->requested_bitmap_chunk_sectors &&
4007 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
4008 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
4009
4010 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
4011 if (r)
4012 DMERR("Failed to resize bitmap");
4013 }
4014
4015
4016
4017 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4018 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
4019 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4020 mddev->resync_min = mddev->recovery_cp;
4021 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
4022 mddev->resync_max_sectors = mddev->dev_sectors;
4023 }
4024
4025
4026 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
4027
4028 rs_set_rdev_sectors(rs);
4029 mddev_lock_nointr(mddev);
4030 r = rs_start_reshape(rs);
4031 mddev_unlock(mddev);
4032 if (r)
4033 DMWARN("Failed to check/start reshape, continuing without change");
4034 r = 0;
4035 }
4036
4037 return r;
4038 }
4039
4040 static void raid_resume(struct dm_target *ti)
4041 {
4042 struct raid_set *rs = ti->private;
4043 struct mddev *mddev = &rs->md;
4044
4045 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) {
4046
4047
4048
4049
4050
4051 attempt_restore_of_faulty_devices(rs);
4052 }
4053
4054 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
4055
4056 if (mddev->delta_disks < 0)
4057 rs_set_capacity(rs);
4058
4059 mddev_lock_nointr(mddev);
4060 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4061 mddev->ro = 0;
4062 mddev->in_sync = 0;
4063 mddev_resume(mddev);
4064 mddev_unlock(mddev);
4065 }
4066 }
4067
4068 static struct target_type raid_target = {
4069 .name = "raid",
4070 .version = {1, 15, 1},
4071 .module = THIS_MODULE,
4072 .ctr = raid_ctr,
4073 .dtr = raid_dtr,
4074 .map = raid_map,
4075 .status = raid_status,
4076 .message = raid_message,
4077 .iterate_devices = raid_iterate_devices,
4078 .io_hints = raid_io_hints,
4079 .postsuspend = raid_postsuspend,
4080 .preresume = raid_preresume,
4081 .resume = raid_resume,
4082 };
4083
4084 static int __init dm_raid_init(void)
4085 {
4086 DMINFO("Loading target version %u.%u.%u",
4087 raid_target.version[0],
4088 raid_target.version[1],
4089 raid_target.version[2]);
4090 return dm_register_target(&raid_target);
4091 }
4092
4093 static void __exit dm_raid_exit(void)
4094 {
4095 dm_unregister_target(&raid_target);
4096 }
4097
4098 module_init(dm_raid_init);
4099 module_exit(dm_raid_exit);
4100
4101 module_param(devices_handle_discard_safely, bool, 0644);
4102 MODULE_PARM_DESC(devices_handle_discard_safely,
4103 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
4104
4105 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target");
4106 MODULE_ALIAS("dm-raid0");
4107 MODULE_ALIAS("dm-raid1");
4108 MODULE_ALIAS("dm-raid10");
4109 MODULE_ALIAS("dm-raid4");
4110 MODULE_ALIAS("dm-raid5");
4111 MODULE_ALIAS("dm-raid6");
4112 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
4113 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
4114 MODULE_LICENSE("GPL");