0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/blkdev.h>
0014 #include <linux/seq_file.h>
0015 #include <linux/module.h>
0016 #include <linux/slab.h>
0017 #include <trace/events/block.h>
0018 #include "md.h"
0019 #include "raid0.h"
0020 #include "raid5.h"
0021
0022 static int default_layout = 0;
0023 module_param(default_layout, int, 0644);
0024
0025 #define UNSUPPORTED_MDDEV_FLAGS \
0026 ((1L << MD_HAS_JOURNAL) | \
0027 (1L << MD_JOURNAL_CLEAN) | \
0028 (1L << MD_FAILFAST_SUPPORTED) |\
0029 (1L << MD_HAS_PPL) | \
0030 (1L << MD_HAS_MULTIPLE_PPLS))
0031
0032
0033
0034
0035 static void dump_zones(struct mddev *mddev)
0036 {
0037 int j, k;
0038 sector_t zone_size = 0;
0039 sector_t zone_start = 0;
0040 struct r0conf *conf = mddev->private;
0041 int raid_disks = conf->strip_zone[0].nb_dev;
0042 pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
0043 mdname(mddev),
0044 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
0045 for (j = 0; j < conf->nr_strip_zones; j++) {
0046 char line[200];
0047 int len = 0;
0048
0049 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
0050 len += snprintf(line+len, 200-len, "%s%pg", k?"/":"",
0051 conf->devlist[j * raid_disks + k]->bdev);
0052 pr_debug("md: zone%d=[%s]\n", j, line);
0053
0054 zone_size = conf->strip_zone[j].zone_end - zone_start;
0055 pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
0056 (unsigned long long)zone_start>>1,
0057 (unsigned long long)conf->strip_zone[j].dev_start>>1,
0058 (unsigned long long)zone_size>>1);
0059 zone_start = conf->strip_zone[j].zone_end;
0060 }
0061 }
0062
0063 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
0064 {
0065 int i, c, err;
0066 sector_t curr_zone_end, sectors;
0067 struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
0068 struct strip_zone *zone;
0069 int cnt;
0070 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
0071 unsigned blksize = 512;
0072
0073 *private_conf = ERR_PTR(-ENOMEM);
0074 if (!conf)
0075 return -ENOMEM;
0076 rdev_for_each(rdev1, mddev) {
0077 pr_debug("md/raid0:%s: looking at %pg\n",
0078 mdname(mddev),
0079 rdev1->bdev);
0080 c = 0;
0081
0082
0083 sectors = rdev1->sectors;
0084 sector_div(sectors, mddev->chunk_sectors);
0085 rdev1->sectors = sectors * mddev->chunk_sectors;
0086
0087 blksize = max(blksize, queue_logical_block_size(
0088 rdev1->bdev->bd_disk->queue));
0089
0090 rdev_for_each(rdev2, mddev) {
0091 pr_debug("md/raid0:%s: comparing %pg(%llu)"
0092 " with %pg(%llu)\n",
0093 mdname(mddev),
0094 rdev1->bdev,
0095 (unsigned long long)rdev1->sectors,
0096 rdev2->bdev,
0097 (unsigned long long)rdev2->sectors);
0098 if (rdev2 == rdev1) {
0099 pr_debug("md/raid0:%s: END\n",
0100 mdname(mddev));
0101 break;
0102 }
0103 if (rdev2->sectors == rdev1->sectors) {
0104
0105
0106
0107
0108 pr_debug("md/raid0:%s: EQUAL\n",
0109 mdname(mddev));
0110 c = 1;
0111 break;
0112 }
0113 pr_debug("md/raid0:%s: NOT EQUAL\n",
0114 mdname(mddev));
0115 }
0116 if (!c) {
0117 pr_debug("md/raid0:%s: ==> UNIQUE\n",
0118 mdname(mddev));
0119 conf->nr_strip_zones++;
0120 pr_debug("md/raid0:%s: %d zones\n",
0121 mdname(mddev), conf->nr_strip_zones);
0122 }
0123 }
0124 pr_debug("md/raid0:%s: FINAL %d zones\n",
0125 mdname(mddev), conf->nr_strip_zones);
0126
0127
0128
0129
0130
0131 if ((mddev->chunk_sectors << 9) % blksize) {
0132 pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
0133 mdname(mddev),
0134 mddev->chunk_sectors << 9, blksize);
0135 err = -EINVAL;
0136 goto abort;
0137 }
0138
0139 err = -ENOMEM;
0140 conf->strip_zone = kcalloc(conf->nr_strip_zones,
0141 sizeof(struct strip_zone),
0142 GFP_KERNEL);
0143 if (!conf->strip_zone)
0144 goto abort;
0145 conf->devlist = kzalloc(array3_size(sizeof(struct md_rdev *),
0146 conf->nr_strip_zones,
0147 mddev->raid_disks),
0148 GFP_KERNEL);
0149 if (!conf->devlist)
0150 goto abort;
0151
0152
0153
0154
0155 zone = &conf->strip_zone[0];
0156 cnt = 0;
0157 smallest = NULL;
0158 dev = conf->devlist;
0159 err = -EINVAL;
0160 rdev_for_each(rdev1, mddev) {
0161 int j = rdev1->raid_disk;
0162
0163 if (mddev->level == 10) {
0164
0165 j /= 2;
0166 rdev1->new_raid_disk = j;
0167 }
0168
0169 if (mddev->level == 1) {
0170
0171
0172
0173 j = 0;
0174 rdev1->new_raid_disk = j;
0175 }
0176
0177 if (j < 0) {
0178 pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
0179 mdname(mddev));
0180 goto abort;
0181 }
0182 if (j >= mddev->raid_disks) {
0183 pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
0184 mdname(mddev), j);
0185 goto abort;
0186 }
0187 if (dev[j]) {
0188 pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
0189 mdname(mddev), j);
0190 goto abort;
0191 }
0192 dev[j] = rdev1;
0193
0194 if (!smallest || (rdev1->sectors < smallest->sectors))
0195 smallest = rdev1;
0196 cnt++;
0197 }
0198 if (cnt != mddev->raid_disks) {
0199 pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
0200 mdname(mddev), cnt, mddev->raid_disks);
0201 goto abort;
0202 }
0203 zone->nb_dev = cnt;
0204 zone->zone_end = smallest->sectors * cnt;
0205
0206 curr_zone_end = zone->zone_end;
0207
0208
0209 for (i = 1; i < conf->nr_strip_zones; i++)
0210 {
0211 int j;
0212
0213 zone = conf->strip_zone + i;
0214 dev = conf->devlist + i * mddev->raid_disks;
0215
0216 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
0217 zone->dev_start = smallest->sectors;
0218 smallest = NULL;
0219 c = 0;
0220
0221 for (j=0; j<cnt; j++) {
0222 rdev = conf->devlist[j];
0223 if (rdev->sectors <= zone->dev_start) {
0224 pr_debug("md/raid0:%s: checking %pg ... nope\n",
0225 mdname(mddev),
0226 rdev->bdev);
0227 continue;
0228 }
0229 pr_debug("md/raid0:%s: checking %pg ..."
0230 " contained as device %d\n",
0231 mdname(mddev),
0232 rdev->bdev, c);
0233 dev[c] = rdev;
0234 c++;
0235 if (!smallest || rdev->sectors < smallest->sectors) {
0236 smallest = rdev;
0237 pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
0238 mdname(mddev),
0239 (unsigned long long)rdev->sectors);
0240 }
0241 }
0242
0243 zone->nb_dev = c;
0244 sectors = (smallest->sectors - zone->dev_start) * c;
0245 pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
0246 mdname(mddev),
0247 zone->nb_dev, (unsigned long long)sectors);
0248
0249 curr_zone_end += sectors;
0250 zone->zone_end = curr_zone_end;
0251
0252 pr_debug("md/raid0:%s: current zone start: %llu\n",
0253 mdname(mddev),
0254 (unsigned long long)smallest->sectors);
0255 }
0256
0257 if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
0258 conf->layout = RAID0_ORIG_LAYOUT;
0259 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
0260 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
0261 conf->layout = mddev->layout;
0262 } else if (default_layout == RAID0_ORIG_LAYOUT ||
0263 default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
0264 conf->layout = default_layout;
0265 } else {
0266 pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
0267 mdname(mddev));
0268 pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
0269 err = -EOPNOTSUPP;
0270 goto abort;
0271 }
0272
0273 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
0274 *private_conf = conf;
0275
0276 return 0;
0277 abort:
0278 kfree(conf->strip_zone);
0279 kfree(conf->devlist);
0280 kfree(conf);
0281 *private_conf = ERR_PTR(err);
0282 return err;
0283 }
0284
0285
0286
0287
0288 static struct strip_zone *find_zone(struct r0conf *conf,
0289 sector_t *sectorp)
0290 {
0291 int i;
0292 struct strip_zone *z = conf->strip_zone;
0293 sector_t sector = *sectorp;
0294
0295 for (i = 0; i < conf->nr_strip_zones; i++)
0296 if (sector < z[i].zone_end) {
0297 if (i)
0298 *sectorp = sector - z[i-1].zone_end;
0299 return z + i;
0300 }
0301 BUG();
0302 }
0303
0304
0305
0306
0307
0308 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
0309 sector_t sector, sector_t *sector_offset)
0310 {
0311 unsigned int sect_in_chunk;
0312 sector_t chunk;
0313 struct r0conf *conf = mddev->private;
0314 int raid_disks = conf->strip_zone[0].nb_dev;
0315 unsigned int chunk_sects = mddev->chunk_sectors;
0316
0317 if (is_power_of_2(chunk_sects)) {
0318 int chunksect_bits = ffz(~chunk_sects);
0319
0320 sect_in_chunk = sector & (chunk_sects - 1);
0321 sector >>= chunksect_bits;
0322
0323 chunk = *sector_offset;
0324
0325 sector_div(chunk, zone->nb_dev << chunksect_bits);
0326 } else{
0327 sect_in_chunk = sector_div(sector, chunk_sects);
0328 chunk = *sector_offset;
0329 sector_div(chunk, chunk_sects * zone->nb_dev);
0330 }
0331
0332
0333
0334
0335
0336 *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
0337 return conf->devlist[(zone - conf->strip_zone)*raid_disks
0338 + sector_div(sector, zone->nb_dev)];
0339 }
0340
0341 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
0342 {
0343 sector_t array_sectors = 0;
0344 struct md_rdev *rdev;
0345
0346 WARN_ONCE(sectors || raid_disks,
0347 "%s does not support generic reshape\n", __func__);
0348
0349 rdev_for_each(rdev, mddev)
0350 array_sectors += (rdev->sectors &
0351 ~(sector_t)(mddev->chunk_sectors-1));
0352
0353 return array_sectors;
0354 }
0355
0356 static void free_conf(struct mddev *mddev, struct r0conf *conf)
0357 {
0358 kfree(conf->strip_zone);
0359 kfree(conf->devlist);
0360 kfree(conf);
0361 }
0362
0363 static void raid0_free(struct mddev *mddev, void *priv)
0364 {
0365 struct r0conf *conf = priv;
0366
0367 free_conf(mddev, conf);
0368 acct_bioset_exit(mddev);
0369 }
0370
0371 static int raid0_run(struct mddev *mddev)
0372 {
0373 struct r0conf *conf;
0374 int ret;
0375
0376 if (mddev->chunk_sectors == 0) {
0377 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
0378 return -EINVAL;
0379 }
0380 if (md_check_no_bitmap(mddev))
0381 return -EINVAL;
0382
0383 if (acct_bioset_init(mddev)) {
0384 pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
0385 return -ENOMEM;
0386 }
0387
0388
0389 if (mddev->private == NULL) {
0390 ret = create_strip_zones(mddev, &conf);
0391 if (ret < 0)
0392 goto exit_acct_set;
0393 mddev->private = conf;
0394 }
0395 conf = mddev->private;
0396 if (mddev->queue) {
0397 struct md_rdev *rdev;
0398
0399 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
0400 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
0401 blk_queue_max_discard_sectors(mddev->queue, UINT_MAX);
0402
0403 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
0404 blk_queue_io_opt(mddev->queue,
0405 (mddev->chunk_sectors << 9) * mddev->raid_disks);
0406
0407 rdev_for_each(rdev, mddev) {
0408 disk_stack_limits(mddev->gendisk, rdev->bdev,
0409 rdev->data_offset << 9);
0410 }
0411 }
0412
0413
0414 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
0415
0416 pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
0417 mdname(mddev),
0418 (unsigned long long)mddev->array_sectors);
0419
0420 dump_zones(mddev);
0421
0422 ret = md_integrity_register(mddev);
0423 if (ret)
0424 goto free;
0425
0426 return ret;
0427
0428 free:
0429 free_conf(mddev, conf);
0430 exit_acct_set:
0431 acct_bioset_exit(mddev);
0432 return ret;
0433 }
0434
0435 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
0436 {
0437 struct r0conf *conf = mddev->private;
0438 struct strip_zone *zone;
0439 sector_t start = bio->bi_iter.bi_sector;
0440 sector_t end;
0441 unsigned int stripe_size;
0442 sector_t first_stripe_index, last_stripe_index;
0443 sector_t start_disk_offset;
0444 unsigned int start_disk_index;
0445 sector_t end_disk_offset;
0446 unsigned int end_disk_index;
0447 unsigned int disk;
0448
0449 zone = find_zone(conf, &start);
0450
0451 if (bio_end_sector(bio) > zone->zone_end) {
0452 struct bio *split = bio_split(bio,
0453 zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
0454 &mddev->bio_set);
0455 bio_chain(split, bio);
0456 submit_bio_noacct(bio);
0457 bio = split;
0458 end = zone->zone_end;
0459 } else
0460 end = bio_end_sector(bio);
0461
0462 if (zone != conf->strip_zone)
0463 end = end - zone[-1].zone_end;
0464
0465
0466 stripe_size = zone->nb_dev * mddev->chunk_sectors;
0467
0468 first_stripe_index = start;
0469 sector_div(first_stripe_index, stripe_size);
0470 last_stripe_index = end;
0471 sector_div(last_stripe_index, stripe_size);
0472
0473 start_disk_index = (int)(start - first_stripe_index * stripe_size) /
0474 mddev->chunk_sectors;
0475 start_disk_offset = ((int)(start - first_stripe_index * stripe_size) %
0476 mddev->chunk_sectors) +
0477 first_stripe_index * mddev->chunk_sectors;
0478 end_disk_index = (int)(end - last_stripe_index * stripe_size) /
0479 mddev->chunk_sectors;
0480 end_disk_offset = ((int)(end - last_stripe_index * stripe_size) %
0481 mddev->chunk_sectors) +
0482 last_stripe_index * mddev->chunk_sectors;
0483
0484 for (disk = 0; disk < zone->nb_dev; disk++) {
0485 sector_t dev_start, dev_end;
0486 struct md_rdev *rdev;
0487
0488 if (disk < start_disk_index)
0489 dev_start = (first_stripe_index + 1) *
0490 mddev->chunk_sectors;
0491 else if (disk > start_disk_index)
0492 dev_start = first_stripe_index * mddev->chunk_sectors;
0493 else
0494 dev_start = start_disk_offset;
0495
0496 if (disk < end_disk_index)
0497 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
0498 else if (disk > end_disk_index)
0499 dev_end = last_stripe_index * mddev->chunk_sectors;
0500 else
0501 dev_end = end_disk_offset;
0502
0503 if (dev_end <= dev_start)
0504 continue;
0505
0506 rdev = conf->devlist[(zone - conf->strip_zone) *
0507 conf->strip_zone[0].nb_dev + disk];
0508 md_submit_discard_bio(mddev, rdev, bio,
0509 dev_start + zone->dev_start + rdev->data_offset,
0510 dev_end - dev_start);
0511 }
0512 bio_endio(bio);
0513 }
0514
0515 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
0516 {
0517 struct r0conf *conf = mddev->private;
0518 struct strip_zone *zone;
0519 struct md_rdev *tmp_dev;
0520 sector_t bio_sector;
0521 sector_t sector;
0522 sector_t orig_sector;
0523 unsigned chunk_sects;
0524 unsigned sectors;
0525
0526 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
0527 && md_flush_request(mddev, bio))
0528 return true;
0529
0530 if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
0531 raid0_handle_discard(mddev, bio);
0532 return true;
0533 }
0534
0535 bio_sector = bio->bi_iter.bi_sector;
0536 sector = bio_sector;
0537 chunk_sects = mddev->chunk_sectors;
0538
0539 sectors = chunk_sects -
0540 (likely(is_power_of_2(chunk_sects))
0541 ? (sector & (chunk_sects-1))
0542 : sector_div(sector, chunk_sects));
0543
0544
0545 sector = bio_sector;
0546
0547 if (sectors < bio_sectors(bio)) {
0548 struct bio *split = bio_split(bio, sectors, GFP_NOIO,
0549 &mddev->bio_set);
0550 bio_chain(split, bio);
0551 submit_bio_noacct(bio);
0552 bio = split;
0553 }
0554
0555 if (bio->bi_pool != &mddev->bio_set)
0556 md_account_bio(mddev, &bio);
0557
0558 orig_sector = sector;
0559 zone = find_zone(mddev->private, §or);
0560 switch (conf->layout) {
0561 case RAID0_ORIG_LAYOUT:
0562 tmp_dev = map_sector(mddev, zone, orig_sector, §or);
0563 break;
0564 case RAID0_ALT_MULTIZONE_LAYOUT:
0565 tmp_dev = map_sector(mddev, zone, sector, §or);
0566 break;
0567 default:
0568 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
0569 bio_io_error(bio);
0570 return true;
0571 }
0572
0573 if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
0574 bio_io_error(bio);
0575 return true;
0576 }
0577
0578 bio_set_dev(bio, tmp_dev->bdev);
0579 bio->bi_iter.bi_sector = sector + zone->dev_start +
0580 tmp_dev->data_offset;
0581
0582 if (mddev->gendisk)
0583 trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
0584 bio_sector);
0585 mddev_check_write_zeroes(mddev, bio);
0586 submit_bio_noacct(bio);
0587 return true;
0588 }
0589
0590 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
0591 {
0592 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
0593 return;
0594 }
0595
0596 static void *raid0_takeover_raid45(struct mddev *mddev)
0597 {
0598 struct md_rdev *rdev;
0599 struct r0conf *priv_conf;
0600
0601 if (mddev->degraded != 1) {
0602 pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
0603 mdname(mddev),
0604 mddev->degraded);
0605 return ERR_PTR(-EINVAL);
0606 }
0607
0608 rdev_for_each(rdev, mddev) {
0609
0610 if (rdev->raid_disk == mddev->raid_disks-1) {
0611 pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
0612 mdname(mddev));
0613 return ERR_PTR(-EINVAL);
0614 }
0615 rdev->sectors = mddev->dev_sectors;
0616 }
0617
0618
0619 mddev->new_level = 0;
0620 mddev->new_layout = 0;
0621 mddev->new_chunk_sectors = mddev->chunk_sectors;
0622 mddev->raid_disks--;
0623 mddev->delta_disks = -1;
0624
0625 mddev->recovery_cp = MaxSector;
0626 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
0627
0628 create_strip_zones(mddev, &priv_conf);
0629
0630 return priv_conf;
0631 }
0632
0633 static void *raid0_takeover_raid10(struct mddev *mddev)
0634 {
0635 struct r0conf *priv_conf;
0636
0637
0638
0639
0640
0641
0642
0643 if (mddev->layout != ((1 << 8) + 2)) {
0644 pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
0645 mdname(mddev),
0646 mddev->layout);
0647 return ERR_PTR(-EINVAL);
0648 }
0649 if (mddev->raid_disks & 1) {
0650 pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
0651 mdname(mddev));
0652 return ERR_PTR(-EINVAL);
0653 }
0654 if (mddev->degraded != (mddev->raid_disks>>1)) {
0655 pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
0656 mdname(mddev));
0657 return ERR_PTR(-EINVAL);
0658 }
0659
0660
0661 mddev->new_level = 0;
0662 mddev->new_layout = 0;
0663 mddev->new_chunk_sectors = mddev->chunk_sectors;
0664 mddev->delta_disks = - mddev->raid_disks / 2;
0665 mddev->raid_disks += mddev->delta_disks;
0666 mddev->degraded = 0;
0667
0668 mddev->recovery_cp = MaxSector;
0669 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
0670
0671 create_strip_zones(mddev, &priv_conf);
0672 return priv_conf;
0673 }
0674
0675 static void *raid0_takeover_raid1(struct mddev *mddev)
0676 {
0677 struct r0conf *priv_conf;
0678 int chunksect;
0679
0680
0681
0682
0683 if ((mddev->raid_disks - 1) != mddev->degraded) {
0684 pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
0685 mdname(mddev));
0686 return ERR_PTR(-EINVAL);
0687 }
0688
0689
0690
0691
0692
0693 chunksect = 64 * 2;
0694
0695
0696 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
0697 chunksect >>= 1;
0698
0699 if ((chunksect << 9) < PAGE_SIZE)
0700
0701 return ERR_PTR(-EINVAL);
0702
0703
0704 mddev->new_level = 0;
0705 mddev->new_layout = 0;
0706 mddev->new_chunk_sectors = chunksect;
0707 mddev->chunk_sectors = chunksect;
0708 mddev->delta_disks = 1 - mddev->raid_disks;
0709 mddev->raid_disks = 1;
0710
0711 mddev->recovery_cp = MaxSector;
0712 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
0713
0714 create_strip_zones(mddev, &priv_conf);
0715 return priv_conf;
0716 }
0717
0718 static void *raid0_takeover(struct mddev *mddev)
0719 {
0720
0721
0722
0723
0724
0725
0726
0727 if (mddev->bitmap) {
0728 pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
0729 mdname(mddev));
0730 return ERR_PTR(-EBUSY);
0731 }
0732 if (mddev->level == 4)
0733 return raid0_takeover_raid45(mddev);
0734
0735 if (mddev->level == 5) {
0736 if (mddev->layout == ALGORITHM_PARITY_N)
0737 return raid0_takeover_raid45(mddev);
0738
0739 pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
0740 mdname(mddev), ALGORITHM_PARITY_N);
0741 }
0742
0743 if (mddev->level == 10)
0744 return raid0_takeover_raid10(mddev);
0745
0746 if (mddev->level == 1)
0747 return raid0_takeover_raid1(mddev);
0748
0749 pr_warn("Takeover from raid%i to raid0 not supported\n",
0750 mddev->level);
0751
0752 return ERR_PTR(-EINVAL);
0753 }
0754
0755 static void raid0_quiesce(struct mddev *mddev, int quiesce)
0756 {
0757 }
0758
0759 static struct md_personality raid0_personality=
0760 {
0761 .name = "raid0",
0762 .level = 0,
0763 .owner = THIS_MODULE,
0764 .make_request = raid0_make_request,
0765 .run = raid0_run,
0766 .free = raid0_free,
0767 .status = raid0_status,
0768 .size = raid0_size,
0769 .takeover = raid0_takeover,
0770 .quiesce = raid0_quiesce,
0771 };
0772
0773 static int __init raid0_init (void)
0774 {
0775 return register_md_personality (&raid0_personality);
0776 }
0777
0778 static void raid0_exit (void)
0779 {
0780 unregister_md_personality (&raid0_personality);
0781 }
0782
0783 module_init(raid0_init);
0784 module_exit(raid0_exit);
0785 MODULE_LICENSE("GPL");
0786 MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
0787 MODULE_ALIAS("md-personality-2");
0788 MODULE_ALIAS("md-raid0");
0789 MODULE_ALIAS("md-level-0");